Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_ARM64
      6 
      7 #include "src/code-stubs.h"
      8 #include "src/api-arguments.h"
      9 #include "src/bootstrapper.h"
     10 #include "src/codegen.h"
     11 #include "src/ic/handler-compiler.h"
     12 #include "src/ic/ic.h"
     13 #include "src/ic/stub-cache.h"
     14 #include "src/isolate.h"
     15 #include "src/regexp/jsregexp.h"
     16 #include "src/regexp/regexp-macro-assembler.h"
     17 #include "src/runtime/runtime.h"
     18 
     19 #include "src/arm64/code-stubs-arm64.h"
     20 #include "src/arm64/frames-arm64.h"
     21 
     22 namespace v8 {
     23 namespace internal {
     24 
     25 #define __ ACCESS_MASM(masm)
     26 
     27 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
     28   __ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
     29   __ Str(x1, MemOperand(jssp, x5));
     30   __ Push(x1);
     31   __ Push(x2);
     32   __ Add(x0, x0, Operand(3));
     33   __ TailCallRuntime(Runtime::kNewArray);
     34 }
     35 
     36 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
     37   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
     38   descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
     39 }
     40 
     41 void FastFunctionBindStub::InitializeDescriptor(
     42     CodeStubDescriptor* descriptor) {
     43   Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
     44   descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
     45 }
     46 
     47 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
     48                                                ExternalReference miss) {
     49   // Update the static counter each time a new code stub is generated.
     50   isolate()->counters()->code_stubs()->Increment();
     51 
     52   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
     53   int param_count = descriptor.GetRegisterParameterCount();
     54   {
     55     // Call the runtime system in a fresh internal frame.
     56     FrameScope scope(masm, StackFrame::INTERNAL);
     57     DCHECK((param_count == 0) ||
     58            x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
     59 
     60     // Push arguments
     61     MacroAssembler::PushPopQueue queue(masm);
     62     for (int i = 0; i < param_count; ++i) {
     63       queue.Queue(descriptor.GetRegisterParameter(i));
     64     }
     65     queue.PushQueued();
     66 
     67     __ CallExternalReference(miss, param_count);
     68   }
     69 
     70   __ Ret();
     71 }
     72 
     73 
     74 void DoubleToIStub::Generate(MacroAssembler* masm) {
     75   Label done;
     76   Register input = source();
     77   Register result = destination();
     78   DCHECK(is_truncating());
     79 
     80   DCHECK(result.Is64Bits());
     81   DCHECK(jssp.Is(masm->StackPointer()));
     82 
     83   int double_offset = offset();
     84 
     85   DoubleRegister double_scratch = d0;  // only used if !skip_fastpath()
     86   Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
     87   Register scratch2 =
     88       GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
     89 
     90   __ Push(scratch1, scratch2);
     91   // Account for saved regs if input is jssp.
     92   if (input.is(jssp)) double_offset += 2 * kPointerSize;
     93 
     94   if (!skip_fastpath()) {
     95     __ Push(double_scratch);
     96     if (input.is(jssp)) double_offset += 1 * kDoubleSize;
     97     __ Ldr(double_scratch, MemOperand(input, double_offset));
     98     // Try to convert with a FPU convert instruction.  This handles all
     99     // non-saturating cases.
    100     __ TryConvertDoubleToInt64(result, double_scratch, &done);
    101     __ Fmov(result, double_scratch);
    102   } else {
    103     __ Ldr(result, MemOperand(input, double_offset));
    104   }
    105 
    106   // If we reach here we need to manually convert the input to an int32.
    107 
    108   // Extract the exponent.
    109   Register exponent = scratch1;
    110   __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
    111           HeapNumber::kExponentBits);
    112 
    113   // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
    114   // the mantissa gets shifted completely out of the int32_t result.
    115   __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
    116   __ CzeroX(result, ge);
    117   __ B(ge, &done);
    118 
    119   // The Fcvtzs sequence handles all cases except where the conversion causes
    120   // signed overflow in the int64_t target. Since we've already handled
    121   // exponents >= 84, we can guarantee that 63 <= exponent < 84.
    122 
    123   if (masm->emit_debug_code()) {
    124     __ Cmp(exponent, HeapNumber::kExponentBias + 63);
    125     // Exponents less than this should have been handled by the Fcvt case.
    126     __ Check(ge, kUnexpectedValue);
    127   }
    128 
    129   // Isolate the mantissa bits, and set the implicit '1'.
    130   Register mantissa = scratch2;
    131   __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
    132   __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
    133 
    134   // Negate the mantissa if necessary.
    135   __ Tst(result, kXSignMask);
    136   __ Cneg(mantissa, mantissa, ne);
    137 
    138   // Shift the mantissa bits in the correct place. We know that we have to shift
    139   // it left here, because exponent >= 63 >= kMantissaBits.
    140   __ Sub(exponent, exponent,
    141          HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
    142   __ Lsl(result, mantissa, exponent);
    143 
    144   __ Bind(&done);
    145   if (!skip_fastpath()) {
    146     __ Pop(double_scratch);
    147   }
    148   __ Pop(scratch2, scratch1);
    149   __ Ret();
    150 }
    151 
    152 
    153 // See call site for description.
    154 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
    155                                           Register right, Register scratch,
    156                                           FPRegister double_scratch,
    157                                           Label* slow, Condition cond) {
    158   DCHECK(!AreAliased(left, right, scratch));
    159   Label not_identical, return_equal, heap_number;
    160   Register result = x0;
    161 
    162   __ Cmp(right, left);
    163   __ B(ne, &not_identical);
    164 
    165   // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
    166   // so we do the second best thing - test it ourselves.
    167   // They are both equal and they are not both Smis so both of them are not
    168   // Smis.  If it's not a heap number, then return equal.
    169   Register right_type = scratch;
    170   if ((cond == lt) || (cond == gt)) {
    171     // Call runtime on identical JSObjects.  Otherwise return equal.
    172     __ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
    173                         slow, ge);
    174     // Call runtime on identical symbols since we need to throw a TypeError.
    175     __ Cmp(right_type, SYMBOL_TYPE);
    176     __ B(eq, slow);
    177     // Call runtime on identical SIMD values since we must throw a TypeError.
    178     __ Cmp(right_type, SIMD128_VALUE_TYPE);
    179     __ B(eq, slow);
    180   } else if (cond == eq) {
    181     __ JumpIfHeapNumber(right, &heap_number);
    182   } else {
    183     __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
    184                         &heap_number);
    185     // Comparing JS objects with <=, >= is complicated.
    186     __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
    187     __ B(ge, slow);
    188     // Call runtime on identical symbols since we need to throw a TypeError.
    189     __ Cmp(right_type, SYMBOL_TYPE);
    190     __ B(eq, slow);
    191     // Call runtime on identical SIMD values since we must throw a TypeError.
    192     __ Cmp(right_type, SIMD128_VALUE_TYPE);
    193     __ B(eq, slow);
    194     // Normally here we fall through to return_equal, but undefined is
    195     // special: (undefined == undefined) == true, but
    196     // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
    197     if ((cond == le) || (cond == ge)) {
    198       __ Cmp(right_type, ODDBALL_TYPE);
    199       __ B(ne, &return_equal);
    200       __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
    201       if (cond == le) {
    202         // undefined <= undefined should fail.
    203         __ Mov(result, GREATER);
    204       } else {
    205         // undefined >= undefined should fail.
    206         __ Mov(result, LESS);
    207       }
    208       __ Ret();
    209     }
    210   }
    211 
    212   __ Bind(&return_equal);
    213   if (cond == lt) {
    214     __ Mov(result, GREATER);  // Things aren't less than themselves.
    215   } else if (cond == gt) {
    216     __ Mov(result, LESS);     // Things aren't greater than themselves.
    217   } else {
    218     __ Mov(result, EQUAL);    // Things are <=, >=, ==, === themselves.
    219   }
    220   __ Ret();
    221 
    222   // Cases lt and gt have been handled earlier, and case ne is never seen, as
    223   // it is handled in the parser (see Parser::ParseBinaryExpression). We are
    224   // only concerned with cases ge, le and eq here.
    225   if ((cond != lt) && (cond != gt)) {
    226     DCHECK((cond == ge) || (cond == le) || (cond == eq));
    227     __ Bind(&heap_number);
    228     // Left and right are identical pointers to a heap number object. Return
    229     // non-equal if the heap number is a NaN, and equal otherwise. Comparing
    230     // the number to itself will set the overflow flag iff the number is NaN.
    231     __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
    232     __ Fcmp(double_scratch, double_scratch);
    233     __ B(vc, &return_equal);  // Not NaN, so treat as normal heap number.
    234 
    235     if (cond == le) {
    236       __ Mov(result, GREATER);
    237     } else {
    238       __ Mov(result, LESS);
    239     }
    240     __ Ret();
    241   }
    242 
    243   // No fall through here.
    244   if (FLAG_debug_code) {
    245     __ Unreachable();
    246   }
    247 
    248   __ Bind(&not_identical);
    249 }
    250 
    251 
    252 // See call site for description.
    253 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
    254                                            Register left,
    255                                            Register right,
    256                                            Register left_type,
    257                                            Register right_type,
    258                                            Register scratch) {
    259   DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
    260 
    261   if (masm->emit_debug_code()) {
    262     // We assume that the arguments are not identical.
    263     __ Cmp(left, right);
    264     __ Assert(ne, kExpectedNonIdenticalObjects);
    265   }
    266 
    267   // If either operand is a JS object or an oddball value, then they are not
    268   // equal since their pointers are different.
    269   // There is no test for undetectability in strict equality.
    270   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
    271   Label right_non_object;
    272 
    273   __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
    274   __ B(lt, &right_non_object);
    275 
    276   // Return non-zero - x0 already contains a non-zero pointer.
    277   DCHECK(left.is(x0) || right.is(x0));
    278   Label return_not_equal;
    279   __ Bind(&return_not_equal);
    280   __ Ret();
    281 
    282   __ Bind(&right_non_object);
    283 
    284   // Check for oddballs: true, false, null, undefined.
    285   __ Cmp(right_type, ODDBALL_TYPE);
    286 
    287   // If right is not ODDBALL, test left. Otherwise, set eq condition.
    288   __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
    289 
    290   // If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
    291   // Otherwise, right or left is ODDBALL, so set a ge condition.
    292   __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
    293 
    294   __ B(ge, &return_not_equal);
    295 
    296   // Internalized strings are unique, so they can only be equal if they are the
    297   // same object. We have already tested that case, so if left and right are
    298   // both internalized strings, they cannot be equal.
    299   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
    300   __ Orr(scratch, left_type, right_type);
    301   __ TestAndBranchIfAllClear(
    302       scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
    303 }
    304 
    305 
    306 // See call site for description.
    307 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
    308                                     Register left,
    309                                     Register right,
    310                                     FPRegister left_d,
    311                                     FPRegister right_d,
    312                                     Label* slow,
    313                                     bool strict) {
    314   DCHECK(!AreAliased(left_d, right_d));
    315   DCHECK((left.is(x0) && right.is(x1)) ||
    316          (right.is(x0) && left.is(x1)));
    317   Register result = x0;
    318 
    319   Label right_is_smi, done;
    320   __ JumpIfSmi(right, &right_is_smi);
    321 
    322   // Left is the smi. Check whether right is a heap number.
    323   if (strict) {
    324     // If right is not a number and left is a smi, then strict equality cannot
    325     // succeed. Return non-equal.
    326     Label is_heap_number;
    327     __ JumpIfHeapNumber(right, &is_heap_number);
    328     // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
    329     if (!right.is(result)) {
    330       __ Mov(result, NOT_EQUAL);
    331     }
    332     __ Ret();
    333     __ Bind(&is_heap_number);
    334   } else {
    335     // Smi compared non-strictly with a non-smi, non-heap-number. Call the
    336     // runtime.
    337     __ JumpIfNotHeapNumber(right, slow);
    338   }
    339 
    340   // Left is the smi. Right is a heap number. Load right value into right_d, and
    341   // convert left smi into double in left_d.
    342   __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
    343   __ SmiUntagToDouble(left_d, left);
    344   __ B(&done);
    345 
    346   __ Bind(&right_is_smi);
    347   // Right is a smi. Check whether the non-smi left is a heap number.
    348   if (strict) {
    349     // If left is not a number and right is a smi then strict equality cannot
    350     // succeed. Return non-equal.
    351     Label is_heap_number;
    352     __ JumpIfHeapNumber(left, &is_heap_number);
    353     // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
    354     if (!left.is(result)) {
    355       __ Mov(result, NOT_EQUAL);
    356     }
    357     __ Ret();
    358     __ Bind(&is_heap_number);
    359   } else {
    360     // Smi compared non-strictly with a non-smi, non-heap-number. Call the
    361     // runtime.
    362     __ JumpIfNotHeapNumber(left, slow);
    363   }
    364 
    365   // Right is the smi. Left is a heap number. Load left value into left_d, and
    366   // convert right smi into double in right_d.
    367   __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
    368   __ SmiUntagToDouble(right_d, right);
    369 
    370   // Fall through to both_loaded_as_doubles.
    371   __ Bind(&done);
    372 }
    373 
    374 
    375 // Fast negative check for internalized-to-internalized equality or receiver
    376 // equality. Also handles the undetectable receiver to null/undefined
    377 // comparison.
    378 // See call site for description.
    379 static void EmitCheckForInternalizedStringsOrObjects(
    380     MacroAssembler* masm, Register left, Register right, Register left_map,
    381     Register right_map, Register left_type, Register right_type,
    382     Label* possible_strings, Label* runtime_call) {
    383   DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
    384   Register result = x0;
    385   DCHECK(left.is(x0) || right.is(x0));
    386 
    387   Label object_test, return_equal, return_unequal, undetectable;
    388   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
    389   // TODO(all): reexamine this branch sequence for optimisation wrt branch
    390   // prediction.
    391   __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
    392   __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
    393   __ Tbnz(left_type, MaskToBit(kIsNotStringMask), runtime_call);
    394   __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
    395 
    396   // Both are internalized. We already checked they weren't the same pointer so
    397   // they are not equal. Return non-equal by returning the non-zero object
    398   // pointer in x0.
    399   __ Ret();
    400 
    401   __ Bind(&object_test);
    402 
    403   Register left_bitfield = left_type;
    404   Register right_bitfield = right_type;
    405   __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
    406   __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
    407   __ Tbnz(right_bitfield, MaskToBit(1 << Map::kIsUndetectable), &undetectable);
    408   __ Tbnz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
    409 
    410   __ CompareInstanceType(right_map, right_type, FIRST_JS_RECEIVER_TYPE);
    411   __ B(lt, runtime_call);
    412   __ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
    413   __ B(lt, runtime_call);
    414 
    415   __ Bind(&return_unequal);
    416   // Return non-equal by returning the non-zero object pointer in x0.
    417   __ Ret();
    418 
    419   __ Bind(&undetectable);
    420   __ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
    421 
    422   // If both sides are JSReceivers, then the result is false according to
    423   // the HTML specification, which says that only comparisons with null or
    424   // undefined are affected by special casing for document.all.
    425   __ CompareInstanceType(right_map, right_type, ODDBALL_TYPE);
    426   __ B(eq, &return_equal);
    427   __ CompareInstanceType(left_map, left_type, ODDBALL_TYPE);
    428   __ B(ne, &return_unequal);
    429 
    430   __ Bind(&return_equal);
    431   __ Mov(result, EQUAL);
    432   __ Ret();
    433 }
    434 
    435 
    436 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
    437                                          CompareICState::State expected,
    438                                          Label* fail) {
    439   Label ok;
    440   if (expected == CompareICState::SMI) {
    441     __ JumpIfNotSmi(input, fail);
    442   } else if (expected == CompareICState::NUMBER) {
    443     __ JumpIfSmi(input, &ok);
    444     __ JumpIfNotHeapNumber(input, fail);
    445   }
    446   // We could be strict about internalized/non-internalized here, but as long as
    447   // hydrogen doesn't care, the stub doesn't have to care either.
    448   __ Bind(&ok);
    449 }
    450 
    451 
    452 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
    453   Register lhs = x1;
    454   Register rhs = x0;
    455   Register result = x0;
    456   Condition cond = GetCondition();
    457 
    458   Label miss;
    459   CompareICStub_CheckInputType(masm, lhs, left(), &miss);
    460   CompareICStub_CheckInputType(masm, rhs, right(), &miss);
    461 
    462   Label slow;  // Call builtin.
    463   Label not_smis, both_loaded_as_doubles;
    464   Label not_two_smis, smi_done;
    465   __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
    466   __ SmiUntag(lhs);
    467   __ Sub(result, lhs, Operand::UntagSmi(rhs));
    468   __ Ret();
    469 
    470   __ Bind(&not_two_smis);
    471 
    472   // NOTICE! This code is only reached after a smi-fast-case check, so it is
    473   // certain that at least one operand isn't a smi.
    474 
    475   // Handle the case where the objects are identical. Either returns the answer
    476   // or goes to slow. Only falls through if the objects were not identical.
    477   EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
    478 
    479   // If either is a smi (we know that at least one is not a smi), then they can
    480   // only be strictly equal if the other is a HeapNumber.
    481   __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
    482 
    483   // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
    484   // can:
    485   //  1) Return the answer.
    486   //  2) Branch to the slow case.
    487   //  3) Fall through to both_loaded_as_doubles.
    488   // In case 3, we have found out that we were dealing with a number-number
    489   // comparison. The double values of the numbers have been loaded, right into
    490   // rhs_d, left into lhs_d.
    491   FPRegister rhs_d = d0;
    492   FPRegister lhs_d = d1;
    493   EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
    494 
    495   __ Bind(&both_loaded_as_doubles);
    496   // The arguments have been converted to doubles and stored in rhs_d and
    497   // lhs_d.
    498   Label nan;
    499   __ Fcmp(lhs_d, rhs_d);
    500   __ B(vs, &nan);  // Overflow flag set if either is NaN.
    501   STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
    502   __ Cset(result, gt);  // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
    503   __ Csinv(result, result, xzr, ge);  // lt => -1, gt => 1, eq => 0.
    504   __ Ret();
    505 
    506   __ Bind(&nan);
    507   // Left and/or right is a NaN. Load the result register with whatever makes
    508   // the comparison fail, since comparisons with NaN always fail (except ne,
    509   // which is filtered out at a higher level.)
    510   DCHECK(cond != ne);
    511   if ((cond == lt) || (cond == le)) {
    512     __ Mov(result, GREATER);
    513   } else {
    514     __ Mov(result, LESS);
    515   }
    516   __ Ret();
    517 
    518   __ Bind(&not_smis);
    519   // At this point we know we are dealing with two different objects, and
    520   // neither of them is a smi. The objects are in rhs_ and lhs_.
    521 
    522   // Load the maps and types of the objects.
    523   Register rhs_map = x10;
    524   Register rhs_type = x11;
    525   Register lhs_map = x12;
    526   Register lhs_type = x13;
    527   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
    528   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
    529   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
    530   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
    531 
    532   if (strict()) {
    533     // This emits a non-equal return sequence for some object types, or falls
    534     // through if it was not lucky.
    535     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
    536   }
    537 
    538   Label check_for_internalized_strings;
    539   Label flat_string_check;
    540   // Check for heap number comparison. Branch to earlier double comparison code
    541   // if they are heap numbers, otherwise, branch to internalized string check.
    542   __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
    543   __ B(ne, &check_for_internalized_strings);
    544   __ Cmp(lhs_map, rhs_map);
    545 
    546   // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
    547   // string check.
    548   __ B(ne, &flat_string_check);
    549 
    550   // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
    551   // comparison code.
    552   __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    553   __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    554   __ B(&both_loaded_as_doubles);
    555 
    556   __ Bind(&check_for_internalized_strings);
    557   // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
    558   // of internalized strings.
    559   if ((cond == eq) && !strict()) {
    560     // Returns an answer for two internalized strings or two detectable objects.
    561     // Otherwise branches to the string case or not both strings case.
    562     EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
    563                                              lhs_type, rhs_type,
    564                                              &flat_string_check, &slow);
    565   }
    566 
    567   // Check for both being sequential one-byte strings,
    568   // and inline if that is the case.
    569   __ Bind(&flat_string_check);
    570   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
    571                                                     x15, &slow);
    572 
    573   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
    574                       x11);
    575   if (cond == eq) {
    576     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
    577                                                   x12);
    578   } else {
    579     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
    580                                                     x12, x13);
    581   }
    582 
    583   // Never fall through to here.
    584   if (FLAG_debug_code) {
    585     __ Unreachable();
    586   }
    587 
    588   __ Bind(&slow);
    589 
    590   if (cond == eq) {
    591     {
    592       FrameScope scope(masm, StackFrame::INTERNAL);
    593       __ Push(lhs, rhs);
    594       __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
    595     }
    596     // Turn true into 0 and false into some non-zero value.
    597     STATIC_ASSERT(EQUAL == 0);
    598     __ LoadRoot(x1, Heap::kTrueValueRootIndex);
    599     __ Sub(x0, x0, x1);
    600     __ Ret();
    601   } else {
    602     __ Push(lhs, rhs);
    603     int ncr;  // NaN compare result
    604     if ((cond == lt) || (cond == le)) {
    605       ncr = GREATER;
    606     } else {
    607       DCHECK((cond == gt) || (cond == ge));  // remaining cases
    608       ncr = LESS;
    609     }
    610     __ Mov(x10, Smi::FromInt(ncr));
    611     __ Push(x10);
    612 
    613     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
    614     // tagged as a small integer.
    615     __ TailCallRuntime(Runtime::kCompare);
    616   }
    617 
    618   __ Bind(&miss);
    619   GenerateMiss(masm);
    620 }
    621 
    622 
    623 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
    624   CPURegList saved_regs = kCallerSaved;
    625   CPURegList saved_fp_regs = kCallerSavedFP;
    626 
    627   // We don't allow a GC during a store buffer overflow so there is no need to
    628   // store the registers in any particular way, but we do have to store and
    629   // restore them.
    630 
    631   // We don't care if MacroAssembler scratch registers are corrupted.
    632   saved_regs.Remove(*(masm->TmpList()));
    633   saved_fp_regs.Remove(*(masm->FPTmpList()));
    634 
    635   __ PushCPURegList(saved_regs);
    636   if (save_doubles()) {
    637     __ PushCPURegList(saved_fp_regs);
    638   }
    639 
    640   AllowExternalCallThatCantCauseGC scope(masm);
    641   __ Mov(x0, ExternalReference::isolate_address(isolate()));
    642   __ CallCFunction(
    643       ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
    644 
    645   if (save_doubles()) {
    646     __ PopCPURegList(saved_fp_regs);
    647   }
    648   __ PopCPURegList(saved_regs);
    649   __ Ret();
    650 }
    651 
    652 
    653 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
    654     Isolate* isolate) {
    655   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
    656   stub1.GetCode();
    657   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
    658   stub2.GetCode();
    659 }
    660 
    661 
    662 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
    663   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
    664   UseScratchRegisterScope temps(masm);
    665   Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
    666   Register return_address = temps.AcquireX();
    667   __ Mov(return_address, lr);
    668   // Restore lr with the value it had before the call to this stub (the value
    669   // which must be pushed).
    670   __ Mov(lr, saved_lr);
    671   __ PushSafepointRegisters();
    672   __ Ret(return_address);
    673 }
    674 
    675 
    676 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
    677   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
    678   UseScratchRegisterScope temps(masm);
    679   Register return_address = temps.AcquireX();
    680   // Preserve the return address (lr will be clobbered by the pop).
    681   __ Mov(return_address, lr);
    682   __ PopSafepointRegisters();
    683   __ Ret(return_address);
    684 }
    685 
    686 
    687 void MathPowStub::Generate(MacroAssembler* masm) {
    688   // Stack on entry:
    689   // jssp[0]: Exponent (as a tagged value).
    690   // jssp[1]: Base (as a tagged value).
    691   //
    692   // The (tagged) result will be returned in x0, as a heap number.
    693 
    694   Register result_tagged = x0;
    695   Register base_tagged = x10;
    696   Register exponent_tagged = MathPowTaggedDescriptor::exponent();
    697   DCHECK(exponent_tagged.is(x11));
    698   Register exponent_integer = MathPowIntegerDescriptor::exponent();
    699   DCHECK(exponent_integer.is(x12));
    700   Register scratch1 = x14;
    701   Register scratch0 = x15;
    702   Register saved_lr = x19;
    703   FPRegister result_double = d0;
    704   FPRegister base_double = d0;
    705   FPRegister exponent_double = d1;
    706   FPRegister base_double_copy = d2;
    707   FPRegister scratch1_double = d6;
    708   FPRegister scratch0_double = d7;
    709 
    710   // A fast-path for integer exponents.
    711   Label exponent_is_smi, exponent_is_integer;
    712   // Bail out to runtime.
    713   Label call_runtime;
    714   // Allocate a heap number for the result, and return it.
    715   Label done;
    716 
    717   // Unpack the inputs.
    718   if (exponent_type() == ON_STACK) {
    719     Label base_is_smi;
    720     Label unpack_exponent;
    721 
    722     __ Pop(exponent_tagged, base_tagged);
    723 
    724     __ JumpIfSmi(base_tagged, &base_is_smi);
    725     __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
    726     // base_tagged is a heap number, so load its double value.
    727     __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
    728     __ B(&unpack_exponent);
    729     __ Bind(&base_is_smi);
    730     // base_tagged is a SMI, so untag it and convert it to a double.
    731     __ SmiUntagToDouble(base_double, base_tagged);
    732 
    733     __ Bind(&unpack_exponent);
    734     //  x10   base_tagged       The tagged base (input).
    735     //  x11   exponent_tagged   The tagged exponent (input).
    736     //  d1    base_double       The base as a double.
    737     __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
    738     __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
    739     // exponent_tagged is a heap number, so load its double value.
    740     __ Ldr(exponent_double,
    741            FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
    742   } else if (exponent_type() == TAGGED) {
    743     __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
    744     __ Ldr(exponent_double,
    745            FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
    746   }
    747 
    748   // Handle double (heap number) exponents.
    749   if (exponent_type() != INTEGER) {
    750     // Detect integer exponents stored as doubles and handle those in the
    751     // integer fast-path.
    752     __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
    753                                  scratch0_double, &exponent_is_integer);
    754 
    755     if (exponent_type() == ON_STACK) {
    756       FPRegister  half_double = d3;
    757       FPRegister  minus_half_double = d4;
    758       // Detect square root case. Crankshaft detects constant +/-0.5 at compile
    759       // time and uses DoMathPowHalf instead. We then skip this check for
    760       // non-constant cases of +/-0.5 as these hardly occur.
    761 
    762       __ Fmov(minus_half_double, -0.5);
    763       __ Fmov(half_double, 0.5);
    764       __ Fcmp(minus_half_double, exponent_double);
    765       __ Fccmp(half_double, exponent_double, NZFlag, ne);
    766       // Condition flags at this point:
    767       //    0.5;  nZCv    // Identified by eq && pl
    768       //   -0.5:  NZcv    // Identified by eq && mi
    769       //  other:  ?z??    // Identified by ne
    770       __ B(ne, &call_runtime);
    771 
    772       // The exponent is 0.5 or -0.5.
    773 
    774       // Given that exponent is known to be either 0.5 or -0.5, the following
    775       // special cases could apply (according to ECMA-262 15.8.2.13):
    776       //
    777       //  base.isNaN():                   The result is NaN.
    778       //  (base == +INFINITY) || (base == -INFINITY)
    779       //    exponent == 0.5:              The result is +INFINITY.
    780       //    exponent == -0.5:             The result is +0.
    781       //  (base == +0) || (base == -0)
    782       //    exponent == 0.5:              The result is +0.
    783       //    exponent == -0.5:             The result is +INFINITY.
    784       //  (base < 0) && base.isFinite():  The result is NaN.
    785       //
    786       // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
    787       // where base is -INFINITY or -0.
    788 
    789       // Add +0 to base. This has no effect other than turning -0 into +0.
    790       __ Fadd(base_double, base_double, fp_zero);
    791       // The operation -0+0 results in +0 in all cases except where the
    792       // FPCR rounding mode is 'round towards minus infinity' (RM). The
    793       // ARM64 simulator does not currently simulate FPCR (where the rounding
    794       // mode is set), so test the operation with some debug code.
    795       if (masm->emit_debug_code()) {
    796         UseScratchRegisterScope temps(masm);
    797         Register temp = temps.AcquireX();
    798         __ Fneg(scratch0_double, fp_zero);
    799         // Verify that we correctly generated +0.0 and -0.0.
    800         //  bits(+0.0) = 0x0000000000000000
    801         //  bits(-0.0) = 0x8000000000000000
    802         __ Fmov(temp, fp_zero);
    803         __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
    804         __ Fmov(temp, scratch0_double);
    805         __ Eor(temp, temp, kDSignMask);
    806         __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
    807         // Check that -0.0 + 0.0 == +0.0.
    808         __ Fadd(scratch0_double, scratch0_double, fp_zero);
    809         __ Fmov(temp, scratch0_double);
    810         __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
    811       }
    812 
    813       // If base is -INFINITY, make it +INFINITY.
    814       //  * Calculate base - base: All infinities will become NaNs since both
    815       //    -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
    816       //  * If the result is NaN, calculate abs(base).
    817       __ Fsub(scratch0_double, base_double, base_double);
    818       __ Fcmp(scratch0_double, 0.0);
    819       __ Fabs(scratch1_double, base_double);
    820       __ Fcsel(base_double, scratch1_double, base_double, vs);
    821 
    822       // Calculate the square root of base.
    823       __ Fsqrt(result_double, base_double);
    824       __ Fcmp(exponent_double, 0.0);
    825       __ B(ge, &done);  // Finish now for exponents of 0.5.
    826       // Find the inverse for exponents of -0.5.
    827       __ Fmov(scratch0_double, 1.0);
    828       __ Fdiv(result_double, scratch0_double, result_double);
    829       __ B(&done);
    830     }
    831 
    832     {
    833       AllowExternalCallThatCantCauseGC scope(masm);
    834       __ Mov(saved_lr, lr);
    835       __ CallCFunction(
    836           ExternalReference::power_double_double_function(isolate()),
    837           0, 2);
    838       __ Mov(lr, saved_lr);
    839       __ B(&done);
    840     }
    841 
    842     // Handle SMI exponents.
    843     __ Bind(&exponent_is_smi);
    844     //  x10   base_tagged       The tagged base (input).
    845     //  x11   exponent_tagged   The tagged exponent (input).
    846     //  d1    base_double       The base as a double.
    847     __ SmiUntag(exponent_integer, exponent_tagged);
    848   }
    849 
    850   __ Bind(&exponent_is_integer);
    851   //  x10   base_tagged       The tagged base (input).
    852   //  x11   exponent_tagged   The tagged exponent (input).
    853   //  x12   exponent_integer  The exponent as an integer.
    854   //  d1    base_double       The base as a double.
    855 
    856   // Find abs(exponent). For negative exponents, we can find the inverse later.
    857   Register exponent_abs = x13;
    858   __ Cmp(exponent_integer, 0);
    859   __ Cneg(exponent_abs, exponent_integer, mi);
    860   //  x13   exponent_abs      The value of abs(exponent_integer).
    861 
    862   // Repeatedly multiply to calculate the power.
    863   //  result = 1.0;
    864   //  For each bit n (exponent_integer{n}) {
    865   //    if (exponent_integer{n}) {
    866   //      result *= base;
    867   //    }
    868   //    base *= base;
    869   //    if (remaining bits in exponent_integer are all zero) {
    870   //      break;
    871   //    }
    872   //  }
    873   Label power_loop, power_loop_entry, power_loop_exit;
    874   __ Fmov(scratch1_double, base_double);
    875   __ Fmov(base_double_copy, base_double);
    876   __ Fmov(result_double, 1.0);
    877   __ B(&power_loop_entry);
    878 
    879   __ Bind(&power_loop);
    880   __ Fmul(scratch1_double, scratch1_double, scratch1_double);
    881   __ Lsr(exponent_abs, exponent_abs, 1);
    882   __ Cbz(exponent_abs, &power_loop_exit);
    883 
    884   __ Bind(&power_loop_entry);
    885   __ Tbz(exponent_abs, 0, &power_loop);
    886   __ Fmul(result_double, result_double, scratch1_double);
    887   __ B(&power_loop);
    888 
    889   __ Bind(&power_loop_exit);
    890 
    891   // If the exponent was positive, result_double holds the result.
    892   __ Tbz(exponent_integer, kXSignBit, &done);
    893 
    894   // The exponent was negative, so find the inverse.
    895   __ Fmov(scratch0_double, 1.0);
    896   __ Fdiv(result_double, scratch0_double, result_double);
    897   // ECMA-262 only requires Math.pow to return an 'implementation-dependent
    898   // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
    899   // to calculate the subnormal value 2^-1074. This method of calculating
    900   // negative powers doesn't work because 2^1074 overflows to infinity. To
    901   // catch this corner-case, we bail out if the result was 0. (This can only
    902   // occur if the divisor is infinity or the base is zero.)
    903   __ Fcmp(result_double, 0.0);
    904   __ B(&done, ne);
    905 
    906   if (exponent_type() == ON_STACK) {
    907     // Bail out to runtime code.
    908     __ Bind(&call_runtime);
    909     // Put the arguments back on the stack.
    910     __ Push(base_tagged, exponent_tagged);
    911     __ TailCallRuntime(Runtime::kMathPowRT);
    912 
    913     // Return.
    914     __ Bind(&done);
    915     __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
    916                           result_double);
    917     DCHECK(result_tagged.is(x0));
    918     __ Ret();
    919   } else {
    920     AllowExternalCallThatCantCauseGC scope(masm);
    921     __ Mov(saved_lr, lr);
    922     __ Fmov(base_double, base_double_copy);
    923     __ Scvtf(exponent_double, exponent_integer);
    924     __ CallCFunction(
    925         ExternalReference::power_double_double_function(isolate()),
    926         0, 2);
    927     __ Mov(lr, saved_lr);
    928     __ Bind(&done);
    929     __ Ret();
    930   }
    931 }
    932 
    933 
    934 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
    935   // It is important that the following stubs are generated in this order
    936   // because pregenerated stubs can only call other pregenerated stubs.
    937   // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
    938   // CEntryStub.
    939   CEntryStub::GenerateAheadOfTime(isolate);
    940   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
    941   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
    942   CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
    943   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
    944   CreateWeakCellStub::GenerateAheadOfTime(isolate);
    945   BinaryOpICStub::GenerateAheadOfTime(isolate);
    946   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
    947   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
    948   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
    949   StoreFastElementStub::GenerateAheadOfTime(isolate);
    950   TypeofStub::GenerateAheadOfTime(isolate);
    951 }
    952 
    953 
    954 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
    955   StoreRegistersStateStub stub(isolate);
    956   stub.GetCode();
    957 }
    958 
    959 
    960 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
    961   RestoreRegistersStateStub stub(isolate);
    962   stub.GetCode();
    963 }
    964 
    965 
    966 void CodeStub::GenerateFPStubs(Isolate* isolate) {
    967   // Floating-point code doesn't get special handling in ARM64, so there's
    968   // nothing to do here.
    969   USE(isolate);
    970 }
    971 
    972 
    973 bool CEntryStub::NeedsImmovableCode() {
    974   // CEntryStub stores the return address on the stack before calling into
    975   // C++ code. In some cases, the VM accesses this address, but it is not used
    976   // when the C++ code returns to the stub because LR holds the return address
    977   // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
    978   // returning to dead code.
    979   // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
    980   // find any comment to confirm this, and I don't hit any crashes whatever
    981   // this function returns. The anaylsis should be properly confirmed.
    982   return true;
    983 }
    984 
    985 
    986 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
    987   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
    988   stub.GetCode();
    989   CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
    990   stub_fp.GetCode();
    991 }
    992 
    993 
    994 void CEntryStub::Generate(MacroAssembler* masm) {
    995   // The Abort mechanism relies on CallRuntime, which in turn relies on
    996   // CEntryStub, so until this stub has been generated, we have to use a
    997   // fall-back Abort mechanism.
    998   //
    999   // Note that this stub must be generated before any use of Abort.
   1000   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
   1001 
   1002   ASM_LOCATION("CEntryStub::Generate entry");
   1003   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1004 
   1005   // Register parameters:
   1006   //    x0: argc (including receiver, untagged)
   1007   //    x1: target
   1008   // If argv_in_register():
   1009   //    x11: argv (pointer to first argument)
   1010   //
   1011   // The stack on entry holds the arguments and the receiver, with the receiver
   1012   // at the highest address:
   1013   //
   1014   //    jssp]argc-1]: receiver
   1015   //    jssp[argc-2]: arg[argc-2]
   1016   //    ...           ...
   1017   //    jssp[1]:      arg[1]
   1018   //    jssp[0]:      arg[0]
   1019   //
   1020   // The arguments are in reverse order, so that arg[argc-2] is actually the
   1021   // first argument to the target function and arg[0] is the last.
   1022   DCHECK(jssp.Is(__ StackPointer()));
   1023   const Register& argc_input = x0;
   1024   const Register& target_input = x1;
   1025 
   1026   // Calculate argv, argc and the target address, and store them in
   1027   // callee-saved registers so we can retry the call without having to reload
   1028   // these arguments.
   1029   // TODO(jbramley): If the first call attempt succeeds in the common case (as
   1030   // it should), then we might be better off putting these parameters directly
   1031   // into their argument registers, rather than using callee-saved registers and
   1032   // preserving them on the stack.
   1033   const Register& argv = x21;
   1034   const Register& argc = x22;
   1035   const Register& target = x23;
   1036 
   1037   // Derive argv from the stack pointer so that it points to the first argument
   1038   // (arg[argc-2]), or just below the receiver in case there are no arguments.
   1039   //  - Adjust for the arg[] array.
   1040   Register temp_argv = x11;
   1041   if (!argv_in_register()) {
   1042     __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
   1043     //  - Adjust for the receiver.
   1044     __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
   1045   }
   1046 
   1047   // Reserve three slots to preserve x21-x23 callee-saved registers. If the
   1048   // result size is too large to be returned in registers then also reserve
   1049   // space for the return value.
   1050   int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
   1051   // Enter the exit frame.
   1052   FrameScope scope(masm, StackFrame::MANUAL);
   1053   __ EnterExitFrame(save_doubles(), x10, extra_stack_space);
   1054   DCHECK(csp.Is(__ StackPointer()));
   1055 
   1056   // Poke callee-saved registers into reserved space.
   1057   __ Poke(argv, 1 * kPointerSize);
   1058   __ Poke(argc, 2 * kPointerSize);
   1059   __ Poke(target, 3 * kPointerSize);
   1060 
   1061   if (result_size() > 2) {
   1062     // Save the location of the return value into x8 for call.
   1063     __ Add(x8, __ StackPointer(), Operand(4 * kPointerSize));
   1064   }
   1065 
   1066   // We normally only keep tagged values in callee-saved registers, as they
   1067   // could be pushed onto the stack by called stubs and functions, and on the
   1068   // stack they can confuse the GC. However, we're only calling C functions
   1069   // which can push arbitrary data onto the stack anyway, and so the GC won't
   1070   // examine that part of the stack.
   1071   __ Mov(argc, argc_input);
   1072   __ Mov(target, target_input);
   1073   __ Mov(argv, temp_argv);
   1074 
   1075   // x21 : argv
   1076   // x22 : argc
   1077   // x23 : call target
   1078   //
   1079   // The stack (on entry) holds the arguments and the receiver, with the
   1080   // receiver at the highest address:
   1081   //
   1082   //         argv[8]:     receiver
   1083   // argv -> argv[0]:     arg[argc-2]
   1084   //         ...          ...
   1085   //         argv[...]:   arg[1]
   1086   //         argv[...]:   arg[0]
   1087   //
   1088   // Immediately below (after) this is the exit frame, as constructed by
   1089   // EnterExitFrame:
   1090   //         fp[8]:    CallerPC (lr)
   1091   //   fp -> fp[0]:    CallerFP (old fp)
   1092   //         fp[-8]:   Space reserved for SPOffset.
   1093   //         fp[-16]:  CodeObject()
   1094   //         csp[...]: Saved doubles, if saved_doubles is true.
   1095   //         csp[32]:  Alignment padding, if necessary.
   1096   //         csp[24]:  Preserved x23 (used for target).
   1097   //         csp[16]:  Preserved x22 (used for argc).
   1098   //         csp[8]:   Preserved x21 (used for argv).
   1099   //  csp -> csp[0]:   Space reserved for the return address.
   1100   //
   1101   // After a successful call, the exit frame, preserved registers (x21-x23) and
   1102   // the arguments (including the receiver) are dropped or popped as
   1103   // appropriate. The stub then returns.
   1104   //
   1105   // After an unsuccessful call, the exit frame and suchlike are left
   1106   // untouched, and the stub either throws an exception by jumping to one of
   1107   // the exception_returned label.
   1108 
   1109   DCHECK(csp.Is(__ StackPointer()));
   1110 
   1111   // Prepare AAPCS64 arguments to pass to the builtin.
   1112   __ Mov(x0, argc);
   1113   __ Mov(x1, argv);
   1114   __ Mov(x2, ExternalReference::isolate_address(isolate()));
   1115 
   1116   Label return_location;
   1117   __ Adr(x12, &return_location);
   1118   __ Poke(x12, 0);
   1119 
   1120   if (__ emit_debug_code()) {
   1121     // Verify that the slot below fp[kSPOffset]-8 points to the return location
   1122     // (currently in x12).
   1123     UseScratchRegisterScope temps(masm);
   1124     Register temp = temps.AcquireX();
   1125     __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
   1126     __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
   1127     __ Cmp(temp, x12);
   1128     __ Check(eq, kReturnAddressNotFoundInFrame);
   1129   }
   1130 
   1131   // Call the builtin.
   1132   __ Blr(target);
   1133   __ Bind(&return_location);
   1134 
   1135   if (result_size() > 2) {
   1136     DCHECK_EQ(3, result_size());
   1137     // Read result values stored on stack.
   1138     __ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize));
   1139     __ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize));
   1140     __ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize));
   1141   }
   1142   // Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers!
   1143 
   1144   //  x0    result0      The return code from the call.
   1145   //  x1    result1      For calls which return ObjectPair or ObjectTriple.
   1146   //  x2    result2      For calls which return ObjectTriple.
   1147   //  x21   argv
   1148   //  x22   argc
   1149   //  x23   target
   1150   const Register& result = x0;
   1151 
   1152   // Check result for exception sentinel.
   1153   Label exception_returned;
   1154   __ CompareRoot(result, Heap::kExceptionRootIndex);
   1155   __ B(eq, &exception_returned);
   1156 
   1157   // The call succeeded, so unwind the stack and return.
   1158 
   1159   // Restore callee-saved registers x21-x23.
   1160   __ Mov(x11, argc);
   1161 
   1162   __ Peek(argv, 1 * kPointerSize);
   1163   __ Peek(argc, 2 * kPointerSize);
   1164   __ Peek(target, 3 * kPointerSize);
   1165 
   1166   __ LeaveExitFrame(save_doubles(), x10, true);
   1167   DCHECK(jssp.Is(__ StackPointer()));
   1168   if (!argv_in_register()) {
   1169     // Drop the remaining stack slots and return from the stub.
   1170     __ Drop(x11);
   1171   }
   1172   __ AssertFPCRState();
   1173   __ Ret();
   1174 
   1175   // The stack pointer is still csp if we aren't returning, and the frame
   1176   // hasn't changed (except for the return address).
   1177   __ SetStackPointer(csp);
   1178 
   1179   // Handling of exception.
   1180   __ Bind(&exception_returned);
   1181 
   1182   ExternalReference pending_handler_context_address(
   1183       Isolate::kPendingHandlerContextAddress, isolate());
   1184   ExternalReference pending_handler_code_address(
   1185       Isolate::kPendingHandlerCodeAddress, isolate());
   1186   ExternalReference pending_handler_offset_address(
   1187       Isolate::kPendingHandlerOffsetAddress, isolate());
   1188   ExternalReference pending_handler_fp_address(
   1189       Isolate::kPendingHandlerFPAddress, isolate());
   1190   ExternalReference pending_handler_sp_address(
   1191       Isolate::kPendingHandlerSPAddress, isolate());
   1192 
   1193   // Ask the runtime for help to determine the handler. This will set x0 to
   1194   // contain the current pending exception, don't clobber it.
   1195   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
   1196                                  isolate());
   1197   DCHECK(csp.Is(masm->StackPointer()));
   1198   {
   1199     FrameScope scope(masm, StackFrame::MANUAL);
   1200     __ Mov(x0, 0);  // argc.
   1201     __ Mov(x1, 0);  // argv.
   1202     __ Mov(x2, ExternalReference::isolate_address(isolate()));
   1203     __ CallCFunction(find_handler, 3);
   1204   }
   1205 
   1206   // We didn't execute a return case, so the stack frame hasn't been updated
   1207   // (except for the return address slot). However, we don't need to initialize
   1208   // jssp because the throw method will immediately overwrite it when it
   1209   // unwinds the stack.
   1210   __ SetStackPointer(jssp);
   1211 
   1212   // Retrieve the handler context, SP and FP.
   1213   __ Mov(cp, Operand(pending_handler_context_address));
   1214   __ Ldr(cp, MemOperand(cp));
   1215   __ Mov(jssp, Operand(pending_handler_sp_address));
   1216   __ Ldr(jssp, MemOperand(jssp));
   1217   __ Mov(fp, Operand(pending_handler_fp_address));
   1218   __ Ldr(fp, MemOperand(fp));
   1219 
   1220   // If the handler is a JS frame, restore the context to the frame. Note that
   1221   // the context will be set to (cp == 0) for non-JS frames.
   1222   Label not_js_frame;
   1223   __ Cbz(cp, &not_js_frame);
   1224   __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   1225   __ Bind(&not_js_frame);
   1226 
   1227   // Compute the handler entry address and jump to it.
   1228   __ Mov(x10, Operand(pending_handler_code_address));
   1229   __ Ldr(x10, MemOperand(x10));
   1230   __ Mov(x11, Operand(pending_handler_offset_address));
   1231   __ Ldr(x11, MemOperand(x11));
   1232   __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
   1233   __ Add(x10, x10, x11);
   1234   __ Br(x10);
   1235 }
   1236 
   1237 
   1238 // This is the entry point from C++. 5 arguments are provided in x0-x4.
   1239 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
   1240 // Input:
   1241 //   x0: code entry.
   1242 //   x1: function.
   1243 //   x2: receiver.
   1244 //   x3: argc.
   1245 //   x4: argv.
   1246 // Output:
   1247 //   x0: result.
   1248 void JSEntryStub::Generate(MacroAssembler* masm) {
   1249   DCHECK(jssp.Is(__ StackPointer()));
   1250   Register code_entry = x0;
   1251 
   1252   // Enable instruction instrumentation. This only works on the simulator, and
   1253   // will have no effect on the model or real hardware.
   1254   __ EnableInstrumentation();
   1255 
   1256   Label invoke, handler_entry, exit;
   1257 
   1258   // Push callee-saved registers and synchronize the system stack pointer (csp)
   1259   // and the JavaScript stack pointer (jssp).
   1260   //
   1261   // We must not write to jssp until after the PushCalleeSavedRegisters()
   1262   // call, since jssp is itself a callee-saved register.
   1263   __ SetStackPointer(csp);
   1264   __ PushCalleeSavedRegisters();
   1265   __ Mov(jssp, csp);
   1266   __ SetStackPointer(jssp);
   1267 
   1268   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1269 
   1270   // Set up the reserved register for 0.0.
   1271   __ Fmov(fp_zero, 0.0);
   1272 
   1273   // Build an entry frame (see layout below).
   1274   int marker = type();
   1275   int64_t bad_frame_pointer = -1L;  // Bad frame pointer to fail if it is used.
   1276   __ Mov(x13, bad_frame_pointer);
   1277   __ Mov(x12, Smi::FromInt(marker));
   1278   __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
   1279   __ Ldr(x10, MemOperand(x11));
   1280 
   1281   __ Push(x13, x12, xzr, x10);
   1282   // Set up fp.
   1283   __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
   1284 
   1285   // Push the JS entry frame marker. Also set js_entry_sp if this is the
   1286   // outermost JS call.
   1287   Label non_outermost_js, done;
   1288   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
   1289   __ Mov(x10, ExternalReference(js_entry_sp));
   1290   __ Ldr(x11, MemOperand(x10));
   1291   __ Cbnz(x11, &non_outermost_js);
   1292   __ Str(fp, MemOperand(x10));
   1293   __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1294   __ Push(x12);
   1295   __ B(&done);
   1296   __ Bind(&non_outermost_js);
   1297   // We spare one instruction by pushing xzr since the marker is 0.
   1298   DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
   1299   __ Push(xzr);
   1300   __ Bind(&done);
   1301 
   1302   // The frame set up looks like this:
   1303   // jssp[0] : JS entry frame marker.
   1304   // jssp[1] : C entry FP.
   1305   // jssp[2] : stack frame marker.
   1306   // jssp[3] : stack frmae marker.
   1307   // jssp[4] : bad frame pointer 0xfff...ff   <- fp points here.
   1308 
   1309 
   1310   // Jump to a faked try block that does the invoke, with a faked catch
   1311   // block that sets the pending exception.
   1312   __ B(&invoke);
   1313 
   1314   // Prevent the constant pool from being emitted between the record of the
   1315   // handler_entry position and the first instruction of the sequence here.
   1316   // There is no risk because Assembler::Emit() emits the instruction before
   1317   // checking for constant pool emission, but we do not want to depend on
   1318   // that.
   1319   {
   1320     Assembler::BlockPoolsScope block_pools(masm);
   1321     __ bind(&handler_entry);
   1322     handler_offset_ = handler_entry.pos();
   1323     // Caught exception: Store result (exception) in the pending exception
   1324     // field in the JSEnv and return a failure sentinel. Coming in here the
   1325     // fp will be invalid because the PushTryHandler below sets it to 0 to
   1326     // signal the existence of the JSEntry frame.
   1327     __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1328                                           isolate())));
   1329   }
   1330   __ Str(code_entry, MemOperand(x10));
   1331   __ LoadRoot(x0, Heap::kExceptionRootIndex);
   1332   __ B(&exit);
   1333 
   1334   // Invoke: Link this frame into the handler chain.
   1335   __ Bind(&invoke);
   1336   __ PushStackHandler();
   1337   // If an exception not caught by another handler occurs, this handler
   1338   // returns control to the code after the B(&invoke) above, which
   1339   // restores all callee-saved registers (including cp and fp) to their
   1340   // saved values before returning a failure to C.
   1341 
   1342   // Clear any pending exceptions.
   1343   __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
   1344   __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1345                                         isolate())));
   1346   __ Str(x10, MemOperand(x11));
   1347 
   1348   // Invoke the function by calling through the JS entry trampoline builtin.
   1349   // Notice that we cannot store a reference to the trampoline code directly in
   1350   // this stub, because runtime stubs are not traversed when doing GC.
   1351 
   1352   // Expected registers by Builtins::JSEntryTrampoline
   1353   // x0: code entry.
   1354   // x1: function.
   1355   // x2: receiver.
   1356   // x3: argc.
   1357   // x4: argv.
   1358   ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
   1359                               ? Builtins::kJSConstructEntryTrampoline
   1360                               : Builtins::kJSEntryTrampoline,
   1361                           isolate());
   1362   __ Mov(x10, entry);
   1363 
   1364   // Call the JSEntryTrampoline.
   1365   __ Ldr(x11, MemOperand(x10));  // Dereference the address.
   1366   __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
   1367   __ Blr(x12);
   1368 
   1369   // Unlink this frame from the handler chain.
   1370   __ PopStackHandler();
   1371 
   1372 
   1373   __ Bind(&exit);
   1374   // x0 holds the result.
   1375   // The stack pointer points to the top of the entry frame pushed on entry from
   1376   // C++ (at the beginning of this stub):
   1377   // jssp[0] : JS entry frame marker.
   1378   // jssp[1] : C entry FP.
   1379   // jssp[2] : stack frame marker.
   1380   // jssp[3] : stack frmae marker.
   1381   // jssp[4] : bad frame pointer 0xfff...ff   <- fp points here.
   1382 
   1383   // Check if the current stack frame is marked as the outermost JS frame.
   1384   Label non_outermost_js_2;
   1385   __ Pop(x10);
   1386   __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1387   __ B(ne, &non_outermost_js_2);
   1388   __ Mov(x11, ExternalReference(js_entry_sp));
   1389   __ Str(xzr, MemOperand(x11));
   1390   __ Bind(&non_outermost_js_2);
   1391 
   1392   // Restore the top frame descriptors from the stack.
   1393   __ Pop(x10);
   1394   __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
   1395   __ Str(x10, MemOperand(x11));
   1396 
   1397   // Reset the stack to the callee saved registers.
   1398   __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
   1399   // Restore the callee-saved registers and return.
   1400   DCHECK(jssp.Is(__ StackPointer()));
   1401   __ Mov(csp, jssp);
   1402   __ SetStackPointer(csp);
   1403   __ PopCalleeSavedRegisters();
   1404   // After this point, we must not modify jssp because it is a callee-saved
   1405   // register which we have just restored.
   1406   __ Ret();
   1407 }
   1408 
   1409 
   1410 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   1411   Label miss;
   1412   Register receiver = LoadDescriptor::ReceiverRegister();
   1413   // Ensure that the vector and slot registers won't be clobbered before
   1414   // calling the miss handler.
   1415   DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
   1416                      LoadWithVectorDescriptor::SlotRegister()));
   1417 
   1418   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
   1419                                                           x11, &miss);
   1420 
   1421   __ Bind(&miss);
   1422   PropertyAccessCompiler::TailCallBuiltin(
   1423       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
   1424 }
   1425 
   1426 
   1427 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
   1428   // Return address is in lr.
   1429   Label miss;
   1430 
   1431   Register receiver = LoadDescriptor::ReceiverRegister();
   1432   Register index = LoadDescriptor::NameRegister();
   1433   Register result = x0;
   1434   Register scratch = x10;
   1435   DCHECK(!scratch.is(receiver) && !scratch.is(index));
   1436   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
   1437          result.is(LoadWithVectorDescriptor::SlotRegister()));
   1438 
   1439   // StringCharAtGenerator doesn't use the result register until it's passed
   1440   // the different miss possibilities. If it did, we would have a conflict
   1441   // when FLAG_vector_ics is true.
   1442   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
   1443                                           &miss,  // When not a string.
   1444                                           &miss,  // When not a number.
   1445                                           &miss,  // When index out of range.
   1446                                           RECEIVER_IS_STRING);
   1447   char_at_generator.GenerateFast(masm);
   1448   __ Ret();
   1449 
   1450   StubRuntimeCallHelper call_helper;
   1451   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
   1452 
   1453   __ Bind(&miss);
   1454   PropertyAccessCompiler::TailCallBuiltin(
   1455       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
   1456 }
   1457 
   1458 
   1459 void RegExpExecStub::Generate(MacroAssembler* masm) {
   1460 #ifdef V8_INTERPRETED_REGEXP
   1461   __ TailCallRuntime(Runtime::kRegExpExec);
   1462 #else  // V8_INTERPRETED_REGEXP
   1463 
   1464   // Stack frame on entry.
   1465   //  jssp[0]: last_match_info (expected JSArray)
   1466   //  jssp[8]: previous index
   1467   //  jssp[16]: subject string
   1468   //  jssp[24]: JSRegExp object
   1469   Label runtime;
   1470 
   1471   // Use of registers for this function.
   1472 
   1473   // Variable registers:
   1474   //   x10-x13                                  used as scratch registers
   1475   //   w0       string_type                     type of subject string
   1476   //   x2       jsstring_length                 subject string length
   1477   //   x3       jsregexp_object                 JSRegExp object
   1478   //   w4       string_encoding                 Latin1 or UC16
   1479   //   w5       sliced_string_offset            if the string is a SlicedString
   1480   //                                            offset to the underlying string
   1481   //   w6       string_representation           groups attributes of the string:
   1482   //                                              - is a string
   1483   //                                              - type of the string
   1484   //                                              - is a short external string
   1485   Register string_type = w0;
   1486   Register jsstring_length = x2;
   1487   Register jsregexp_object = x3;
   1488   Register string_encoding = w4;
   1489   Register sliced_string_offset = w5;
   1490   Register string_representation = w6;
   1491 
   1492   // These are in callee save registers and will be preserved by the call
   1493   // to the native RegExp code, as this code is called using the normal
   1494   // C calling convention. When calling directly from generated code the
   1495   // native RegExp code will not do a GC and therefore the content of
   1496   // these registers are safe to use after the call.
   1497 
   1498   //   x19       subject                        subject string
   1499   //   x20       regexp_data                    RegExp data (FixedArray)
   1500   //   x21       last_match_info_elements       info relative to the last match
   1501   //                                            (FixedArray)
   1502   //   x22       code_object                    generated regexp code
   1503   Register subject = x19;
   1504   Register regexp_data = x20;
   1505   Register last_match_info_elements = x21;
   1506   Register code_object = x22;
   1507 
   1508   // Stack frame.
   1509   //  jssp[00]: last_match_info (JSArray)
   1510   //  jssp[08]: previous index
   1511   //  jssp[16]: subject string
   1512   //  jssp[24]: JSRegExp object
   1513 
   1514   const int kLastMatchInfoOffset = 0 * kPointerSize;
   1515   const int kPreviousIndexOffset = 1 * kPointerSize;
   1516   const int kSubjectOffset = 2 * kPointerSize;
   1517   const int kJSRegExpOffset = 3 * kPointerSize;
   1518 
   1519   // Ensure that a RegExp stack is allocated.
   1520   ExternalReference address_of_regexp_stack_memory_address =
   1521       ExternalReference::address_of_regexp_stack_memory_address(isolate());
   1522   ExternalReference address_of_regexp_stack_memory_size =
   1523       ExternalReference::address_of_regexp_stack_memory_size(isolate());
   1524   __ Mov(x10, address_of_regexp_stack_memory_size);
   1525   __ Ldr(x10, MemOperand(x10));
   1526   __ Cbz(x10, &runtime);
   1527 
   1528   // Check that the first argument is a JSRegExp object.
   1529   DCHECK(jssp.Is(__ StackPointer()));
   1530   __ Peek(jsregexp_object, kJSRegExpOffset);
   1531   __ JumpIfSmi(jsregexp_object, &runtime);
   1532   __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
   1533 
   1534   // Check that the RegExp has been compiled (data contains a fixed array).
   1535   __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
   1536   if (FLAG_debug_code) {
   1537     STATIC_ASSERT(kSmiTag == 0);
   1538     __ Tst(regexp_data, kSmiTagMask);
   1539     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
   1540     __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
   1541     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
   1542   }
   1543 
   1544   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   1545   __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
   1546   __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
   1547   __ B(ne, &runtime);
   1548 
   1549   // Check that the number of captures fit in the static offsets vector buffer.
   1550   // We have always at least one capture for the whole match, plus additional
   1551   // ones due to capturing parentheses. A capture takes 2 registers.
   1552   // The number of capture registers then is (number_of_captures + 1) * 2.
   1553   __ Ldrsw(x10,
   1554            UntagSmiFieldMemOperand(regexp_data,
   1555                                    JSRegExp::kIrregexpCaptureCountOffset));
   1556   // Check (number_of_captures + 1) * 2 <= offsets vector size
   1557   //             number_of_captures * 2 <= offsets vector size - 2
   1558   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
   1559   __ Add(x10, x10, x10);
   1560   __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
   1561   __ B(hi, &runtime);
   1562 
   1563   // Initialize offset for possibly sliced string.
   1564   __ Mov(sliced_string_offset, 0);
   1565 
   1566   DCHECK(jssp.Is(__ StackPointer()));
   1567   __ Peek(subject, kSubjectOffset);
   1568   __ JumpIfSmi(subject, &runtime);
   1569 
   1570   __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
   1571 
   1572   // Handle subject string according to its encoding and representation:
   1573   // (1) Sequential string?  If yes, go to (4).
   1574   // (2) Sequential or cons?  If not, go to (5).
   1575   // (3) Cons string.  If the string is flat, replace subject with first string
   1576   //     and go to (1). Otherwise bail out to runtime.
   1577   // (4) Sequential string.  Load regexp code according to encoding.
   1578   // (E) Carry on.
   1579   /// [...]
   1580 
   1581   // Deferred code at the end of the stub:
   1582   // (5) Long external string?  If not, go to (7).
   1583   // (6) External string.  Make it, offset-wise, look like a sequential string.
   1584   //     Go to (4).
   1585   // (7) Short external string or not a string?  If yes, bail out to runtime.
   1586   // (8) Sliced string.  Replace subject with parent.  Go to (1).
   1587 
   1588   Label check_underlying;   // (1)
   1589   Label seq_string;         // (4)
   1590   Label not_seq_nor_cons;   // (5)
   1591   Label external_string;    // (6)
   1592   Label not_long_external;  // (7)
   1593 
   1594   __ Bind(&check_underlying);
   1595   __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
   1596   __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
   1597 
   1598   // (1) Sequential string?  If yes, go to (4).
   1599   __ And(string_representation,
   1600          string_type,
   1601          kIsNotStringMask |
   1602              kStringRepresentationMask |
   1603              kShortExternalStringMask);
   1604   // We depend on the fact that Strings of type
   1605   // SeqString and not ShortExternalString are defined
   1606   // by the following pattern:
   1607   //   string_type: 0XX0 XX00
   1608   //                ^  ^   ^^
   1609   //                |  |   ||
   1610   //                |  |   is a SeqString
   1611   //                |  is not a short external String
   1612   //                is a String
   1613   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   1614   STATIC_ASSERT(kShortExternalStringTag != 0);
   1615   __ Cbz(string_representation, &seq_string);  // Go to (4).
   1616 
   1617   // (2) Sequential or cons?  If not, go to (5).
   1618   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   1619   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   1620   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   1621   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   1622   __ Cmp(string_representation, kExternalStringTag);
   1623   __ B(ge, &not_seq_nor_cons);  // Go to (5).
   1624 
   1625   // (3) Cons string.  Check that it's flat.
   1626   __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
   1627   __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
   1628   // Replace subject with first string.
   1629   __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   1630   __ B(&check_underlying);
   1631 
   1632   // (4) Sequential string.  Load regexp code according to encoding.
   1633   __ Bind(&seq_string);
   1634 
   1635   // Check that the third argument is a positive smi less than the subject
   1636   // string length. A negative value will be greater (unsigned comparison).
   1637   DCHECK(jssp.Is(__ StackPointer()));
   1638   __ Peek(x10, kPreviousIndexOffset);
   1639   __ JumpIfNotSmi(x10, &runtime);
   1640   __ Cmp(jsstring_length, x10);
   1641   __ B(ls, &runtime);
   1642 
   1643   // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
   1644   // before entering the exit frame.
   1645   __ SmiUntag(x1, x10);
   1646 
   1647   // The third bit determines the string encoding in string_type.
   1648   STATIC_ASSERT(kOneByteStringTag == 0x04);
   1649   STATIC_ASSERT(kTwoByteStringTag == 0x00);
   1650   STATIC_ASSERT(kStringEncodingMask == 0x04);
   1651 
   1652   // Find the code object based on the assumptions above.
   1653   // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
   1654   // of kPointerSize to reach the latter.
   1655   STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
   1656                 JSRegExp::kDataUC16CodeOffset);
   1657   __ Mov(x10, kPointerSize);
   1658   // We will need the encoding later: Latin1 = 0x04
   1659   //                                  UC16   = 0x00
   1660   __ Ands(string_encoding, string_type, kStringEncodingMask);
   1661   __ CzeroX(x10, ne);
   1662   __ Add(x10, regexp_data, x10);
   1663   __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
   1664 
   1665   // (E) Carry on.  String handling is done.
   1666 
   1667   // Check that the irregexp code has been generated for the actual string
   1668   // encoding. If it has, the field contains a code object otherwise it contains
   1669   // a smi (code flushing support).
   1670   __ JumpIfSmi(code_object, &runtime);
   1671 
   1672   // All checks done. Now push arguments for native regexp code.
   1673   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
   1674                       x10,
   1675                       x11);
   1676 
   1677   // Isolates: note we add an additional parameter here (isolate pointer).
   1678   __ EnterExitFrame(false, x10, 1);
   1679   DCHECK(csp.Is(__ StackPointer()));
   1680 
   1681   // We have 9 arguments to pass to the regexp code, therefore we have to pass
   1682   // one on the stack and the rest as registers.
   1683 
   1684   // Note that the placement of the argument on the stack isn't standard
   1685   // AAPCS64:
   1686   // csp[0]: Space for the return address placed by DirectCEntryStub.
   1687   // csp[8]: Argument 9, the current isolate address.
   1688 
   1689   __ Mov(x10, ExternalReference::isolate_address(isolate()));
   1690   __ Poke(x10, kPointerSize);
   1691 
   1692   Register length = w11;
   1693   Register previous_index_in_bytes = w12;
   1694   Register start = x13;
   1695 
   1696   // Load start of the subject string.
   1697   __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
   1698   // Load the length from the original subject string from the previous stack
   1699   // frame. Therefore we have to use fp, which points exactly to two pointer
   1700   // sizes below the previous sp. (Because creating a new stack frame pushes
   1701   // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
   1702   __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
   1703   __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
   1704 
   1705   // Handle UC16 encoding, two bytes make one character.
   1706   //   string_encoding: if Latin1: 0x04
   1707   //                    if UC16:   0x00
   1708   STATIC_ASSERT(kStringEncodingMask == 0x04);
   1709   __ Ubfx(string_encoding, string_encoding, 2, 1);
   1710   __ Eor(string_encoding, string_encoding, 1);
   1711   //   string_encoding: if Latin1: 0
   1712   //                    if UC16:   1
   1713 
   1714   // Convert string positions from characters to bytes.
   1715   // Previous index is in x1.
   1716   __ Lsl(previous_index_in_bytes, w1, string_encoding);
   1717   __ Lsl(length, length, string_encoding);
   1718   __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
   1719 
   1720   // Argument 1 (x0): Subject string.
   1721   __ Mov(x0, subject);
   1722 
   1723   // Argument 2 (x1): Previous index, already there.
   1724 
   1725   // Argument 3 (x2): Get the start of input.
   1726   // Start of input = start of string + previous index + substring offset
   1727   //                                                     (0 if the string
   1728   //                                                      is not sliced).
   1729   __ Add(w10, previous_index_in_bytes, sliced_string_offset);
   1730   __ Add(x2, start, Operand(w10, UXTW));
   1731 
   1732   // Argument 4 (x3):
   1733   // End of input = start of input + (length of input - previous index)
   1734   __ Sub(w10, length, previous_index_in_bytes);
   1735   __ Add(x3, x2, Operand(w10, UXTW));
   1736 
   1737   // Argument 5 (x4): static offsets vector buffer.
   1738   __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
   1739 
   1740   // Argument 6 (x5): Set the number of capture registers to zero to force
   1741   // global regexps to behave as non-global. This stub is not used for global
   1742   // regexps.
   1743   __ Mov(x5, 0);
   1744 
   1745   // Argument 7 (x6): Start (high end) of backtracking stack memory area.
   1746   __ Mov(x10, address_of_regexp_stack_memory_address);
   1747   __ Ldr(x10, MemOperand(x10));
   1748   __ Mov(x11, address_of_regexp_stack_memory_size);
   1749   __ Ldr(x11, MemOperand(x11));
   1750   __ Add(x6, x10, x11);
   1751 
   1752   // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
   1753   __ Mov(x7, 1);
   1754 
   1755   // Locate the code entry and call it.
   1756   __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
   1757   DirectCEntryStub stub(isolate());
   1758   stub.GenerateCall(masm, code_object);
   1759 
   1760   __ LeaveExitFrame(false, x10, true);
   1761 
   1762   // The generated regexp code returns an int32 in w0.
   1763   Label failure, exception;
   1764   __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
   1765   __ CompareAndBranch(w0,
   1766                       NativeRegExpMacroAssembler::EXCEPTION,
   1767                       eq,
   1768                       &exception);
   1769   __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
   1770 
   1771   // Success: process the result from the native regexp code.
   1772   Register number_of_capture_registers = x12;
   1773 
   1774   // Calculate number of capture registers (number_of_captures + 1) * 2
   1775   // and store it in the last match info.
   1776   __ Ldrsw(x10,
   1777            UntagSmiFieldMemOperand(regexp_data,
   1778                                    JSRegExp::kIrregexpCaptureCountOffset));
   1779   __ Add(x10, x10, x10);
   1780   __ Add(number_of_capture_registers, x10, 2);
   1781 
   1782   // Check that the fourth object is a JSArray object.
   1783   DCHECK(jssp.Is(__ StackPointer()));
   1784   __ Peek(x10, kLastMatchInfoOffset);
   1785   __ JumpIfSmi(x10, &runtime);
   1786   __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
   1787 
   1788   // Check that the JSArray is the fast case.
   1789   __ Ldr(last_match_info_elements,
   1790          FieldMemOperand(x10, JSArray::kElementsOffset));
   1791   __ Ldr(x10,
   1792          FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   1793   __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
   1794 
   1795   // Check that the last match info has space for the capture registers and the
   1796   // additional information (overhead).
   1797   //     (number_of_captures + 1) * 2 + overhead <= last match info size
   1798   //     (number_of_captures * 2) + 2 + overhead <= last match info size
   1799   //      number_of_capture_registers + overhead <= last match info size
   1800   __ Ldrsw(x10,
   1801            UntagSmiFieldMemOperand(last_match_info_elements,
   1802                                    FixedArray::kLengthOffset));
   1803   __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
   1804   __ Cmp(x11, x10);
   1805   __ B(gt, &runtime);
   1806 
   1807   // Store the capture count.
   1808   __ SmiTag(x10, number_of_capture_registers);
   1809   __ Str(x10,
   1810          FieldMemOperand(last_match_info_elements,
   1811                          RegExpImpl::kLastCaptureCountOffset));
   1812   // Store last subject and last input.
   1813   __ Str(subject,
   1814          FieldMemOperand(last_match_info_elements,
   1815                          RegExpImpl::kLastSubjectOffset));
   1816   // Use x10 as the subject string in order to only need
   1817   // one RecordWriteStub.
   1818   __ Mov(x10, subject);
   1819   __ RecordWriteField(last_match_info_elements,
   1820                       RegExpImpl::kLastSubjectOffset,
   1821                       x10,
   1822                       x11,
   1823                       kLRHasNotBeenSaved,
   1824                       kDontSaveFPRegs);
   1825   __ Str(subject,
   1826          FieldMemOperand(last_match_info_elements,
   1827                          RegExpImpl::kLastInputOffset));
   1828   __ Mov(x10, subject);
   1829   __ RecordWriteField(last_match_info_elements,
   1830                       RegExpImpl::kLastInputOffset,
   1831                       x10,
   1832                       x11,
   1833                       kLRHasNotBeenSaved,
   1834                       kDontSaveFPRegs);
   1835 
   1836   Register last_match_offsets = x13;
   1837   Register offsets_vector_index = x14;
   1838   Register current_offset = x15;
   1839 
   1840   // Get the static offsets vector filled by the native regexp code
   1841   // and fill the last match info.
   1842   ExternalReference address_of_static_offsets_vector =
   1843       ExternalReference::address_of_static_offsets_vector(isolate());
   1844   __ Mov(offsets_vector_index, address_of_static_offsets_vector);
   1845 
   1846   Label next_capture, done;
   1847   // Capture register counter starts from number of capture registers and
   1848   // iterates down to zero (inclusive).
   1849   __ Add(last_match_offsets,
   1850          last_match_info_elements,
   1851          RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
   1852   __ Bind(&next_capture);
   1853   __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
   1854   __ B(mi, &done);
   1855   // Read two 32 bit values from the static offsets vector buffer into
   1856   // an X register
   1857   __ Ldr(current_offset,
   1858          MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
   1859   // Store the smi values in the last match info.
   1860   __ SmiTag(x10, current_offset);
   1861   // Clearing the 32 bottom bits gives us a Smi.
   1862   STATIC_ASSERT(kSmiTag == 0);
   1863   __ Bic(x11, current_offset, kSmiShiftMask);
   1864   __ Stp(x10,
   1865          x11,
   1866          MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
   1867   __ B(&next_capture);
   1868   __ Bind(&done);
   1869 
   1870   // Return last match info.
   1871   __ Peek(x0, kLastMatchInfoOffset);
   1872   // Drop the 4 arguments of the stub from the stack.
   1873   __ Drop(4);
   1874   __ Ret();
   1875 
   1876   __ Bind(&exception);
   1877   Register exception_value = x0;
   1878   // A stack overflow (on the backtrack stack) may have occured
   1879   // in the RegExp code but no exception has been created yet.
   1880   // If there is no pending exception, handle that in the runtime system.
   1881   __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
   1882   __ Mov(x11,
   1883          Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1884                                    isolate())));
   1885   __ Ldr(exception_value, MemOperand(x11));
   1886   __ Cmp(x10, exception_value);
   1887   __ B(eq, &runtime);
   1888 
   1889   // For exception, throw the exception again.
   1890   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
   1891 
   1892   __ Bind(&failure);
   1893   __ Mov(x0, Operand(isolate()->factory()->null_value()));
   1894   // Drop the 4 arguments of the stub from the stack.
   1895   __ Drop(4);
   1896   __ Ret();
   1897 
   1898   __ Bind(&runtime);
   1899   __ TailCallRuntime(Runtime::kRegExpExec);
   1900 
   1901   // Deferred code for string handling.
   1902   // (5) Long external string?  If not, go to (7).
   1903   __ Bind(&not_seq_nor_cons);
   1904   // Compare flags are still set.
   1905   __ B(ne, &not_long_external);  // Go to (7).
   1906 
   1907   // (6) External string. Make it, offset-wise, look like a sequential string.
   1908   __ Bind(&external_string);
   1909   if (masm->emit_debug_code()) {
   1910     // Assert that we do not have a cons or slice (indirect strings) here.
   1911     // Sequential strings have already been ruled out.
   1912     __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
   1913     __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
   1914     __ Tst(x10, kIsIndirectStringMask);
   1915     __ Check(eq, kExternalStringExpectedButNotFound);
   1916     __ And(x10, x10, kStringRepresentationMask);
   1917     __ Cmp(x10, 0);
   1918     __ Check(ne, kExternalStringExpectedButNotFound);
   1919   }
   1920   __ Ldr(subject,
   1921          FieldMemOperand(subject, ExternalString::kResourceDataOffset));
   1922   // Move the pointer so that offset-wise, it looks like a sequential string.
   1923   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   1924   __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
   1925   __ B(&seq_string);  // Go to (4).
   1926 
   1927   // (7) If this is a short external string or not a string, bail out to
   1928   // runtime.
   1929   __ Bind(&not_long_external);
   1930   STATIC_ASSERT(kShortExternalStringTag != 0);
   1931   __ TestAndBranchIfAnySet(string_representation,
   1932                            kShortExternalStringMask | kIsNotStringMask,
   1933                            &runtime);
   1934 
   1935   // (8) Sliced string. Replace subject with parent.
   1936   __ Ldr(sliced_string_offset,
   1937          UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
   1938   __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   1939   __ B(&check_underlying);  // Go to (1).
   1940 #endif
   1941 }
   1942 
   1943 
   1944 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
   1945                                        Register argc, Register function,
   1946                                        Register feedback_vector, Register index,
   1947                                        Register new_target) {
   1948   FrameScope scope(masm, StackFrame::INTERNAL);
   1949 
   1950   // Number-of-arguments register must be smi-tagged to call out.
   1951   __ SmiTag(argc);
   1952   __ Push(argc, function, feedback_vector, index);
   1953 
   1954   DCHECK(feedback_vector.Is(x2) && index.Is(x3));
   1955   __ CallStub(stub);
   1956 
   1957   __ Pop(index, feedback_vector, function, argc);
   1958   __ SmiUntag(argc);
   1959 }
   1960 
   1961 
   1962 static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
   1963                                      Register function,
   1964                                      Register feedback_vector, Register index,
   1965                                      Register new_target, Register scratch1,
   1966                                      Register scratch2, Register scratch3) {
   1967   ASM_LOCATION("GenerateRecordCallTarget");
   1968   DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
   1969                      feedback_vector, index, new_target));
   1970   // Cache the called function in a feedback vector slot. Cache states are
   1971   // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
   1972   //  argc :            number of arguments to the construct function
   1973   //  function :        the function to call
   1974   //  feedback_vector : the feedback vector
   1975   //  index :           slot in feedback vector (smi)
   1976   Label initialize, done, miss, megamorphic, not_array_function;
   1977   Label done_initialize_count, done_increment_count;
   1978 
   1979   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
   1980             masm->isolate()->heap()->megamorphic_symbol());
   1981   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
   1982             masm->isolate()->heap()->uninitialized_symbol());
   1983 
   1984   // Load the cache state.
   1985   Register feedback = scratch1;
   1986   Register feedback_map = scratch2;
   1987   Register feedback_value = scratch3;
   1988   __ Add(feedback, feedback_vector,
   1989          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   1990   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   1991 
   1992   // A monomorphic cache hit or an already megamorphic state: invoke the
   1993   // function without changing the state.
   1994   // We don't know if feedback value is a WeakCell or a Symbol, but it's
   1995   // harmless to read at this position in a symbol (see static asserts in
   1996   // type-feedback-vector.h).
   1997   Label check_allocation_site;
   1998   __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
   1999   __ Cmp(function, feedback_value);
   2000   __ B(eq, &done_increment_count);
   2001   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   2002   __ B(eq, &done);
   2003   __ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
   2004   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
   2005   __ B(ne, &check_allocation_site);
   2006 
   2007   // If the weak cell is cleared, we have a new chance to become monomorphic.
   2008   __ JumpIfSmi(feedback_value, &initialize);
   2009   __ B(&megamorphic);
   2010 
   2011   __ bind(&check_allocation_site);
   2012   // If we came here, we need to see if we are the array function.
   2013   // If we didn't have a matching function, and we didn't find the megamorph
   2014   // sentinel, then we have in the slot either some other function or an
   2015   // AllocationSite.
   2016   __ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
   2017 
   2018   // Make sure the function is the Array() function
   2019   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
   2020   __ Cmp(function, scratch1);
   2021   __ B(ne, &megamorphic);
   2022   __ B(&done_increment_count);
   2023 
   2024   __ Bind(&miss);
   2025 
   2026   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   2027   // megamorphic.
   2028   __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
   2029   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   2030   // write-barrier is needed.
   2031   __ Bind(&megamorphic);
   2032   __ Add(scratch1, feedback_vector,
   2033          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   2034   __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
   2035   __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
   2036   __ B(&done);
   2037 
   2038   // An uninitialized cache is patched with the function or sentinel to
   2039   // indicate the ElementsKind if function is the Array constructor.
   2040   __ Bind(&initialize);
   2041 
   2042   // Make sure the function is the Array() function
   2043   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
   2044   __ Cmp(function, scratch1);
   2045   __ B(ne, &not_array_function);
   2046 
   2047   // The target function is the Array constructor,
   2048   // Create an AllocationSite if we don't already have it, store it in the
   2049   // slot.
   2050   CreateAllocationSiteStub create_stub(masm->isolate());
   2051   CallStubInRecordCallTarget(masm, &create_stub, argc, function,
   2052                              feedback_vector, index, new_target);
   2053   __ B(&done_initialize_count);
   2054 
   2055   __ Bind(&not_array_function);
   2056   CreateWeakCellStub weak_cell_stub(masm->isolate());
   2057   CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
   2058                              feedback_vector, index, new_target);
   2059 
   2060   __ bind(&done_initialize_count);
   2061   // Initialize the call counter.
   2062   __ Mov(scratch1, Operand(Smi::FromInt(1)));
   2063   __ Adds(scratch2, feedback_vector,
   2064           Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   2065   __ Str(scratch1,
   2066          FieldMemOperand(scratch2, FixedArray::kHeaderSize + kPointerSize));
   2067   __ b(&done);
   2068 
   2069   __ bind(&done_increment_count);
   2070 
   2071   // Increment the call count for monomorphic function calls.
   2072   __ Add(scratch1, feedback_vector,
   2073          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   2074   __ Add(scratch1, scratch1, Operand(FixedArray::kHeaderSize + kPointerSize));
   2075   __ Ldr(scratch2, FieldMemOperand(scratch1, 0));
   2076   __ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
   2077   __ Str(scratch2, FieldMemOperand(scratch1, 0));
   2078 
   2079   __ Bind(&done);
   2080 }
   2081 
   2082 
   2083 void CallConstructStub::Generate(MacroAssembler* masm) {
   2084   ASM_LOCATION("CallConstructStub::Generate");
   2085   // x0 : number of arguments
   2086   // x1 : the function to call
   2087   // x2 : feedback vector
   2088   // x3 : slot in feedback vector (Smi, for RecordCallTarget)
   2089   Register function = x1;
   2090 
   2091   Label non_function;
   2092   // Check that the function is not a smi.
   2093   __ JumpIfSmi(function, &non_function);
   2094   // Check that the function is a JSFunction.
   2095   Register object_type = x10;
   2096   __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
   2097                          &non_function);
   2098 
   2099   GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
   2100 
   2101   __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
   2102   Label feedback_register_initialized;
   2103   // Put the AllocationSite from the feedback vector into x2, or undefined.
   2104   __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
   2105   __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
   2106   __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
   2107                 &feedback_register_initialized);
   2108   __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
   2109   __ bind(&feedback_register_initialized);
   2110 
   2111   __ AssertUndefinedOrAllocationSite(x2, x5);
   2112 
   2113   __ Mov(x3, function);
   2114 
   2115   // Tail call to the function-specific construct stub (still in the caller
   2116   // context at this point).
   2117   __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   2118   __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
   2119   __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
   2120   __ Br(x4);
   2121 
   2122   __ Bind(&non_function);
   2123   __ Mov(x3, function);
   2124   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   2125 }
   2126 
   2127 
   2128 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   2129   // x1 - function
   2130   // x3 - slot id
   2131   // x2 - vector
   2132   // x4 - allocation site (loaded from vector[slot])
   2133   Register function = x1;
   2134   Register feedback_vector = x2;
   2135   Register index = x3;
   2136   Register allocation_site = x4;
   2137   Register scratch = x5;
   2138 
   2139   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
   2140   __ Cmp(function, scratch);
   2141   __ B(ne, miss);
   2142 
   2143   __ Mov(x0, Operand(arg_count()));
   2144 
   2145   // Increment the call count for monomorphic function calls.
   2146   __ Add(feedback_vector, feedback_vector,
   2147          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   2148   __ Add(feedback_vector, feedback_vector,
   2149          Operand(FixedArray::kHeaderSize + kPointerSize));
   2150   __ Ldr(index, FieldMemOperand(feedback_vector, 0));
   2151   __ Add(index, index, Operand(Smi::FromInt(1)));
   2152   __ Str(index, FieldMemOperand(feedback_vector, 0));
   2153 
   2154   // Set up arguments for the array constructor stub.
   2155   Register allocation_site_arg = feedback_vector;
   2156   Register new_target_arg = index;
   2157   __ Mov(allocation_site_arg, allocation_site);
   2158   __ Mov(new_target_arg, function);
   2159   ArrayConstructorStub stub(masm->isolate(), arg_count());
   2160   __ TailCallStub(&stub);
   2161 }
   2162 
   2163 
   2164 void CallICStub::Generate(MacroAssembler* masm) {
   2165   ASM_LOCATION("CallICStub");
   2166 
   2167   // x1 - function
   2168   // x3 - slot id (Smi)
   2169   // x2 - vector
   2170   Label extra_checks_or_miss, call, call_function;
   2171   int argc = arg_count();
   2172   ParameterCount actual(argc);
   2173 
   2174   Register function = x1;
   2175   Register feedback_vector = x2;
   2176   Register index = x3;
   2177 
   2178   // The checks. First, does x1 match the recorded monomorphic target?
   2179   __ Add(x4, feedback_vector,
   2180          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   2181   __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
   2182 
   2183   // We don't know that we have a weak cell. We might have a private symbol
   2184   // or an AllocationSite, but the memory is safe to examine.
   2185   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
   2186   // FixedArray.
   2187   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
   2188   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
   2189   // computed, meaning that it can't appear to be a pointer. If the low bit is
   2190   // 0, then hash is computed, but the 0 bit prevents the field from appearing
   2191   // to be a pointer.
   2192   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
   2193   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
   2194                     WeakCell::kValueOffset &&
   2195                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
   2196 
   2197   __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
   2198   __ Cmp(x5, function);
   2199   __ B(ne, &extra_checks_or_miss);
   2200 
   2201   // The compare above could have been a SMI/SMI comparison. Guard against this
   2202   // convincing us that we have a monomorphic JSFunction.
   2203   __ JumpIfSmi(function, &extra_checks_or_miss);
   2204 
   2205   // Increment the call count for monomorphic function calls.
   2206   __ Add(feedback_vector, feedback_vector,
   2207          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   2208   __ Add(feedback_vector, feedback_vector,
   2209          Operand(FixedArray::kHeaderSize + kPointerSize));
   2210   __ Ldr(index, FieldMemOperand(feedback_vector, 0));
   2211   __ Add(index, index, Operand(Smi::FromInt(1)));
   2212   __ Str(index, FieldMemOperand(feedback_vector, 0));
   2213 
   2214   __ Bind(&call_function);
   2215   __ Mov(x0, argc);
   2216   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
   2217                                                     tail_call_mode()),
   2218           RelocInfo::CODE_TARGET);
   2219 
   2220   __ bind(&extra_checks_or_miss);
   2221   Label uninitialized, miss, not_allocation_site;
   2222 
   2223   __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
   2224 
   2225   __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
   2226   __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
   2227 
   2228   HandleArrayCase(masm, &miss);
   2229 
   2230   __ bind(&not_allocation_site);
   2231 
   2232   // The following cases attempt to handle MISS cases without going to the
   2233   // runtime.
   2234   if (FLAG_trace_ic) {
   2235     __ jmp(&miss);
   2236   }
   2237 
   2238   __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
   2239 
   2240   // We are going megamorphic. If the feedback is a JSFunction, it is fine
   2241   // to handle it here. More complex cases are dealt with in the runtime.
   2242   __ AssertNotSmi(x4);
   2243   __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
   2244   __ Add(x4, feedback_vector,
   2245          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   2246   __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
   2247   __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
   2248 
   2249   __ Bind(&call);
   2250   __ Mov(x0, argc);
   2251   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
   2252           RelocInfo::CODE_TARGET);
   2253 
   2254   __ bind(&uninitialized);
   2255 
   2256   // We are going monomorphic, provided we actually have a JSFunction.
   2257   __ JumpIfSmi(function, &miss);
   2258 
   2259   // Goto miss case if we do not have a function.
   2260   __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
   2261 
   2262   // Make sure the function is not the Array() function, which requires special
   2263   // behavior on MISS.
   2264   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
   2265   __ Cmp(function, x5);
   2266   __ B(eq, &miss);
   2267 
   2268   // Make sure the function belongs to the same native context.
   2269   __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
   2270   __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
   2271   __ Ldr(x5, NativeContextMemOperand());
   2272   __ Cmp(x4, x5);
   2273   __ B(ne, &miss);
   2274 
   2275   // Initialize the call counter.
   2276   __ Mov(x5, Smi::FromInt(1));
   2277   __ Adds(x4, feedback_vector,
   2278           Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   2279   __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
   2280 
   2281   // Store the function. Use a stub since we need a frame for allocation.
   2282   // x2 - vector
   2283   // x3 - slot
   2284   // x1 - function
   2285   {
   2286     FrameScope scope(masm, StackFrame::INTERNAL);
   2287     CreateWeakCellStub create_stub(masm->isolate());
   2288     __ Push(function);
   2289     __ CallStub(&create_stub);
   2290     __ Pop(function);
   2291   }
   2292 
   2293   __ B(&call_function);
   2294 
   2295   // We are here because tracing is on or we encountered a MISS case we can't
   2296   // handle here.
   2297   __ bind(&miss);
   2298   GenerateMiss(masm);
   2299 
   2300   __ B(&call);
   2301 }
   2302 
   2303 
   2304 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   2305   ASM_LOCATION("CallICStub[Miss]");
   2306 
   2307   FrameScope scope(masm, StackFrame::INTERNAL);
   2308 
   2309   // Push the receiver and the function and feedback info.
   2310   __ Push(x1, x2, x3);
   2311 
   2312   // Call the entry.
   2313   __ CallRuntime(Runtime::kCallIC_Miss);
   2314 
   2315   // Move result to edi and exit the internal frame.
   2316   __ Mov(x1, x0);
   2317 }
   2318 
   2319 
   2320 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   2321   // If the receiver is a smi trigger the non-string case.
   2322   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
   2323     __ JumpIfSmi(object_, receiver_not_string_);
   2324 
   2325     // Fetch the instance type of the receiver into result register.
   2326     __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   2327     __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   2328 
   2329     // If the receiver is not a string trigger the non-string case.
   2330     __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
   2331   }
   2332 
   2333   // If the index is non-smi trigger the non-smi case.
   2334   __ JumpIfNotSmi(index_, &index_not_smi_);
   2335 
   2336   __ Bind(&got_smi_index_);
   2337   // Check for index out of range.
   2338   __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
   2339   __ Cmp(result_, Operand::UntagSmi(index_));
   2340   __ B(ls, index_out_of_range_);
   2341 
   2342   __ SmiUntag(index_);
   2343 
   2344   StringCharLoadGenerator::Generate(masm,
   2345                                     object_,
   2346                                     index_.W(),
   2347                                     result_,
   2348                                     &call_runtime_);
   2349   __ SmiTag(result_);
   2350   __ Bind(&exit_);
   2351 }
   2352 
   2353 
   2354 void StringCharCodeAtGenerator::GenerateSlow(
   2355     MacroAssembler* masm, EmbedMode embed_mode,
   2356     const RuntimeCallHelper& call_helper) {
   2357   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
   2358 
   2359   __ Bind(&index_not_smi_);
   2360   // If index is a heap number, try converting it to an integer.
   2361   __ JumpIfNotHeapNumber(index_, index_not_number_);
   2362   call_helper.BeforeCall(masm);
   2363   if (embed_mode == PART_OF_IC_HANDLER) {
   2364     __ Push(LoadWithVectorDescriptor::VectorRegister(),
   2365             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
   2366   } else {
   2367     // Save object_ on the stack and pass index_ as argument for runtime call.
   2368     __ Push(object_, index_);
   2369   }
   2370   __ CallRuntime(Runtime::kNumberToSmi);
   2371   // Save the conversion result before the pop instructions below
   2372   // have a chance to overwrite it.
   2373   __ Mov(index_, x0);
   2374   if (embed_mode == PART_OF_IC_HANDLER) {
   2375     __ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
   2376            LoadWithVectorDescriptor::VectorRegister());
   2377   } else {
   2378     __ Pop(object_);
   2379   }
   2380   // Reload the instance type.
   2381   __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   2382   __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   2383   call_helper.AfterCall(masm);
   2384 
   2385   // If index is still not a smi, it must be out of range.
   2386   __ JumpIfNotSmi(index_, index_out_of_range_);
   2387   // Otherwise, return to the fast path.
   2388   __ B(&got_smi_index_);
   2389 
   2390   // Call runtime. We get here when the receiver is a string and the
   2391   // index is a number, but the code of getting the actual character
   2392   // is too complex (e.g., when the string needs to be flattened).
   2393   __ Bind(&call_runtime_);
   2394   call_helper.BeforeCall(masm);
   2395   __ SmiTag(index_);
   2396   __ Push(object_, index_);
   2397   __ CallRuntime(Runtime::kStringCharCodeAtRT);
   2398   __ Mov(result_, x0);
   2399   call_helper.AfterCall(masm);
   2400   __ B(&exit_);
   2401 
   2402   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
   2403 }
   2404 
   2405 
   2406 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   2407   __ JumpIfNotSmi(code_, &slow_case_);
   2408   __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
   2409   __ B(hi, &slow_case_);
   2410 
   2411   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   2412   // At this point code register contains smi tagged one-byte char code.
   2413   __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
   2414   __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   2415   __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
   2416   __ Bind(&exit_);
   2417 }
   2418 
   2419 
   2420 void StringCharFromCodeGenerator::GenerateSlow(
   2421     MacroAssembler* masm,
   2422     const RuntimeCallHelper& call_helper) {
   2423   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
   2424 
   2425   __ Bind(&slow_case_);
   2426   call_helper.BeforeCall(masm);
   2427   __ Push(code_);
   2428   __ CallRuntime(Runtime::kStringCharFromCode);
   2429   __ Mov(result_, x0);
   2430   call_helper.AfterCall(masm);
   2431   __ B(&exit_);
   2432 
   2433   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
   2434 }
   2435 
   2436 
   2437 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
   2438   // Inputs are in x0 (lhs) and x1 (rhs).
   2439   DCHECK_EQ(CompareICState::BOOLEAN, state());
   2440   ASM_LOCATION("CompareICStub[Booleans]");
   2441   Label miss;
   2442 
   2443   __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   2444   __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   2445   if (!Token::IsEqualityOp(op())) {
   2446     __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
   2447     __ AssertSmi(x1);
   2448     __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
   2449     __ AssertSmi(x0);
   2450   }
   2451   __ Sub(x0, x1, x0);
   2452   __ Ret();
   2453 
   2454   __ Bind(&miss);
   2455   GenerateMiss(masm);
   2456 }
   2457 
   2458 
   2459 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
   2460   // Inputs are in x0 (lhs) and x1 (rhs).
   2461   DCHECK(state() == CompareICState::SMI);
   2462   ASM_LOCATION("CompareICStub[Smis]");
   2463   Label miss;
   2464   // Bail out (to 'miss') unless both x0 and x1 are smis.
   2465   __ JumpIfEitherNotSmi(x0, x1, &miss);
   2466 
   2467   if (GetCondition() == eq) {
   2468     // For equality we do not care about the sign of the result.
   2469     __ Sub(x0, x0, x1);
   2470   } else {
   2471     // Untag before subtracting to avoid handling overflow.
   2472     __ SmiUntag(x1);
   2473     __ Sub(x0, x1, Operand::UntagSmi(x0));
   2474   }
   2475   __ Ret();
   2476 
   2477   __ Bind(&miss);
   2478   GenerateMiss(masm);
   2479 }
   2480 
   2481 
   2482 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
   2483   DCHECK(state() == CompareICState::NUMBER);
   2484   ASM_LOCATION("CompareICStub[HeapNumbers]");
   2485 
   2486   Label unordered, maybe_undefined1, maybe_undefined2;
   2487   Label miss, handle_lhs, values_in_d_regs;
   2488   Label untag_rhs, untag_lhs;
   2489 
   2490   Register result = x0;
   2491   Register rhs = x0;
   2492   Register lhs = x1;
   2493   FPRegister rhs_d = d0;
   2494   FPRegister lhs_d = d1;
   2495 
   2496   if (left() == CompareICState::SMI) {
   2497     __ JumpIfNotSmi(lhs, &miss);
   2498   }
   2499   if (right() == CompareICState::SMI) {
   2500     __ JumpIfNotSmi(rhs, &miss);
   2501   }
   2502 
   2503   __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
   2504   __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
   2505 
   2506   // Load rhs if it's a heap number.
   2507   __ JumpIfSmi(rhs, &handle_lhs);
   2508   __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
   2509   __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
   2510 
   2511   // Load lhs if it's a heap number.
   2512   __ Bind(&handle_lhs);
   2513   __ JumpIfSmi(lhs, &values_in_d_regs);
   2514   __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
   2515   __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
   2516 
   2517   __ Bind(&values_in_d_regs);
   2518   __ Fcmp(lhs_d, rhs_d);
   2519   __ B(vs, &unordered);  // Overflow flag set if either is NaN.
   2520   STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
   2521   __ Cset(result, gt);  // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
   2522   __ Csinv(result, result, xzr, ge);  // lt => -1, gt => 1, eq => 0.
   2523   __ Ret();
   2524 
   2525   __ Bind(&unordered);
   2526   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
   2527                      CompareICState::GENERIC, CompareICState::GENERIC);
   2528   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   2529 
   2530   __ Bind(&maybe_undefined1);
   2531   if (Token::IsOrderedRelationalCompareOp(op())) {
   2532     __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
   2533     __ JumpIfSmi(lhs, &unordered);
   2534     __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
   2535     __ B(&unordered);
   2536   }
   2537 
   2538   __ Bind(&maybe_undefined2);
   2539   if (Token::IsOrderedRelationalCompareOp(op())) {
   2540     __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
   2541   }
   2542 
   2543   __ Bind(&miss);
   2544   GenerateMiss(masm);
   2545 }
   2546 
   2547 
   2548 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
   2549   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
   2550   ASM_LOCATION("CompareICStub[InternalizedStrings]");
   2551   Label miss;
   2552 
   2553   Register result = x0;
   2554   Register rhs = x0;
   2555   Register lhs = x1;
   2556 
   2557   // Check that both operands are heap objects.
   2558   __ JumpIfEitherSmi(lhs, rhs, &miss);
   2559 
   2560   // Check that both operands are internalized strings.
   2561   Register rhs_map = x10;
   2562   Register lhs_map = x11;
   2563   Register rhs_type = x10;
   2564   Register lhs_type = x11;
   2565   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
   2566   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
   2567   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
   2568   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
   2569 
   2570   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
   2571   __ Orr(x12, lhs_type, rhs_type);
   2572   __ TestAndBranchIfAnySet(
   2573       x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
   2574 
   2575   // Internalized strings are compared by identity.
   2576   STATIC_ASSERT(EQUAL == 0);
   2577   __ Cmp(lhs, rhs);
   2578   __ Cset(result, ne);
   2579   __ Ret();
   2580 
   2581   __ Bind(&miss);
   2582   GenerateMiss(masm);
   2583 }
   2584 
   2585 
   2586 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
   2587   DCHECK(state() == CompareICState::UNIQUE_NAME);
   2588   ASM_LOCATION("CompareICStub[UniqueNames]");
   2589   DCHECK(GetCondition() == eq);
   2590   Label miss;
   2591 
   2592   Register result = x0;
   2593   Register rhs = x0;
   2594   Register lhs = x1;
   2595 
   2596   Register lhs_instance_type = w2;
   2597   Register rhs_instance_type = w3;
   2598 
   2599   // Check that both operands are heap objects.
   2600   __ JumpIfEitherSmi(lhs, rhs, &miss);
   2601 
   2602   // Check that both operands are unique names. This leaves the instance
   2603   // types loaded in tmp1 and tmp2.
   2604   __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
   2605   __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
   2606   __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
   2607   __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
   2608 
   2609   // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
   2610   // should have kInternalizedTag set.
   2611   __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
   2612   __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
   2613 
   2614   // Unique names are compared by identity.
   2615   STATIC_ASSERT(EQUAL == 0);
   2616   __ Cmp(lhs, rhs);
   2617   __ Cset(result, ne);
   2618   __ Ret();
   2619 
   2620   __ Bind(&miss);
   2621   GenerateMiss(masm);
   2622 }
   2623 
   2624 
   2625 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
   2626   DCHECK(state() == CompareICState::STRING);
   2627   ASM_LOCATION("CompareICStub[Strings]");
   2628 
   2629   Label miss;
   2630 
   2631   bool equality = Token::IsEqualityOp(op());
   2632 
   2633   Register result = x0;
   2634   Register rhs = x0;
   2635   Register lhs = x1;
   2636 
   2637   // Check that both operands are heap objects.
   2638   __ JumpIfEitherSmi(rhs, lhs, &miss);
   2639 
   2640   // Check that both operands are strings.
   2641   Register rhs_map = x10;
   2642   Register lhs_map = x11;
   2643   Register rhs_type = x10;
   2644   Register lhs_type = x11;
   2645   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
   2646   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
   2647   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
   2648   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
   2649   STATIC_ASSERT(kNotStringTag != 0);
   2650   __ Orr(x12, lhs_type, rhs_type);
   2651   __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
   2652 
   2653   // Fast check for identical strings.
   2654   Label not_equal;
   2655   __ Cmp(lhs, rhs);
   2656   __ B(ne, &not_equal);
   2657   __ Mov(result, EQUAL);
   2658   __ Ret();
   2659 
   2660   __ Bind(&not_equal);
   2661   // Handle not identical strings
   2662 
   2663   // Check that both strings are internalized strings. If they are, we're done
   2664   // because we already know they are not identical. We know they are both
   2665   // strings.
   2666   if (equality) {
   2667     DCHECK(GetCondition() == eq);
   2668     STATIC_ASSERT(kInternalizedTag == 0);
   2669     Label not_internalized_strings;
   2670     __ Orr(x12, lhs_type, rhs_type);
   2671     __ TestAndBranchIfAnySet(
   2672         x12, kIsNotInternalizedMask, &not_internalized_strings);
   2673     // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
   2674     __ Ret();
   2675     __ Bind(&not_internalized_strings);
   2676   }
   2677 
   2678   // Check that both strings are sequential one-byte.
   2679   Label runtime;
   2680   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
   2681                                                     x13, &runtime);
   2682 
   2683   // Compare flat one-byte strings. Returns when done.
   2684   if (equality) {
   2685     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
   2686                                                   x12);
   2687   } else {
   2688     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
   2689                                                     x12, x13);
   2690   }
   2691 
   2692   // Handle more complex cases in runtime.
   2693   __ Bind(&runtime);
   2694   if (equality) {
   2695     {
   2696       FrameScope scope(masm, StackFrame::INTERNAL);
   2697       __ Push(lhs, rhs);
   2698       __ CallRuntime(Runtime::kStringEqual);
   2699     }
   2700     __ LoadRoot(x1, Heap::kTrueValueRootIndex);
   2701     __ Sub(x0, x0, x1);
   2702     __ Ret();
   2703   } else {
   2704     __ Push(lhs, rhs);
   2705     __ TailCallRuntime(Runtime::kStringCompare);
   2706   }
   2707 
   2708   __ Bind(&miss);
   2709   GenerateMiss(masm);
   2710 }
   2711 
   2712 
   2713 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
   2714   DCHECK_EQ(CompareICState::RECEIVER, state());
   2715   ASM_LOCATION("CompareICStub[Receivers]");
   2716 
   2717   Label miss;
   2718 
   2719   Register result = x0;
   2720   Register rhs = x0;
   2721   Register lhs = x1;
   2722 
   2723   __ JumpIfEitherSmi(rhs, lhs, &miss);
   2724 
   2725   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   2726   __ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
   2727   __ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
   2728 
   2729   DCHECK_EQ(eq, GetCondition());
   2730   __ Sub(result, rhs, lhs);
   2731   __ Ret();
   2732 
   2733   __ Bind(&miss);
   2734   GenerateMiss(masm);
   2735 }
   2736 
   2737 
   2738 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
   2739   ASM_LOCATION("CompareICStub[KnownReceivers]");
   2740 
   2741   Label miss;
   2742   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
   2743 
   2744   Register result = x0;
   2745   Register rhs = x0;
   2746   Register lhs = x1;
   2747 
   2748   __ JumpIfEitherSmi(rhs, lhs, &miss);
   2749 
   2750   Register rhs_map = x10;
   2751   Register lhs_map = x11;
   2752   Register map = x12;
   2753   __ GetWeakValue(map, cell);
   2754   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
   2755   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
   2756   __ Cmp(rhs_map, map);
   2757   __ B(ne, &miss);
   2758   __ Cmp(lhs_map, map);
   2759   __ B(ne, &miss);
   2760 
   2761   if (Token::IsEqualityOp(op())) {
   2762   __ Sub(result, rhs, lhs);
   2763   __ Ret();
   2764   } else {
   2765     Register ncr = x2;
   2766     if (op() == Token::LT || op() == Token::LTE) {
   2767       __ Mov(ncr, Smi::FromInt(GREATER));
   2768     } else {
   2769       __ Mov(ncr, Smi::FromInt(LESS));
   2770     }
   2771     __ Push(lhs, rhs, ncr);
   2772     __ TailCallRuntime(Runtime::kCompare);
   2773   }
   2774 
   2775   __ Bind(&miss);
   2776   GenerateMiss(masm);
   2777 }
   2778 
   2779 
   2780 // This method handles the case where a compare stub had the wrong
   2781 // implementation. It calls a miss handler, which re-writes the stub. All other
   2782 // CompareICStub::Generate* methods should fall back into this one if their
   2783 // operands were not the expected types.
   2784 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   2785   ASM_LOCATION("CompareICStub[Miss]");
   2786 
   2787   Register stub_entry = x11;
   2788   {
   2789     FrameScope scope(masm, StackFrame::INTERNAL);
   2790     Register op = x10;
   2791     Register left = x1;
   2792     Register right = x0;
   2793     // Preserve some caller-saved registers.
   2794     __ Push(x1, x0, lr);
   2795     // Push the arguments.
   2796     __ Mov(op, Smi::FromInt(this->op()));
   2797     __ Push(left, right, op);
   2798 
   2799     // Call the miss handler. This also pops the arguments.
   2800     __ CallRuntime(Runtime::kCompareIC_Miss);
   2801 
   2802     // Compute the entry point of the rewritten stub.
   2803     __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
   2804     // Restore caller-saved registers.
   2805     __ Pop(lr, x0, x1);
   2806   }
   2807 
   2808   // Tail-call to the new stub.
   2809   __ Jump(stub_entry);
   2810 }
   2811 
   2812 
   2813 void SubStringStub::Generate(MacroAssembler* masm) {
   2814   ASM_LOCATION("SubStringStub::Generate");
   2815   Label runtime;
   2816 
   2817   // Stack frame on entry.
   2818   //  lr: return address
   2819   //  jssp[0]:  substring "to" offset
   2820   //  jssp[8]:  substring "from" offset
   2821   //  jssp[16]: pointer to string object
   2822 
   2823   // This stub is called from the native-call %_SubString(...), so
   2824   // nothing can be assumed about the arguments. It is tested that:
   2825   //  "string" is a sequential string,
   2826   //  both "from" and "to" are smis, and
   2827   //  0 <= from <= to <= string.length (in debug mode.)
   2828   // If any of these assumptions fail, we call the runtime system.
   2829 
   2830   static const int kToOffset = 0 * kPointerSize;
   2831   static const int kFromOffset = 1 * kPointerSize;
   2832   static const int kStringOffset = 2 * kPointerSize;
   2833 
   2834   Register to = x0;
   2835   Register from = x15;
   2836   Register input_string = x10;
   2837   Register input_length = x11;
   2838   Register input_type = x12;
   2839   Register result_string = x0;
   2840   Register result_length = x1;
   2841   Register temp = x3;
   2842 
   2843   __ Peek(to, kToOffset);
   2844   __ Peek(from, kFromOffset);
   2845 
   2846   // Check that both from and to are smis. If not, jump to runtime.
   2847   __ JumpIfEitherNotSmi(from, to, &runtime);
   2848   __ SmiUntag(from);
   2849   __ SmiUntag(to);
   2850 
   2851   // Calculate difference between from and to. If to < from, branch to runtime.
   2852   __ Subs(result_length, to, from);
   2853   __ B(mi, &runtime);
   2854 
   2855   // Check from is positive.
   2856   __ Tbnz(from, kWSignBit, &runtime);
   2857 
   2858   // Make sure first argument is a string.
   2859   __ Peek(input_string, kStringOffset);
   2860   __ JumpIfSmi(input_string, &runtime);
   2861   __ IsObjectJSStringType(input_string, input_type, &runtime);
   2862 
   2863   Label single_char;
   2864   __ Cmp(result_length, 1);
   2865   __ B(eq, &single_char);
   2866 
   2867   // Short-cut for the case of trivial substring.
   2868   Label return_x0;
   2869   __ Ldrsw(input_length,
   2870            UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
   2871 
   2872   __ Cmp(result_length, input_length);
   2873   __ CmovX(x0, input_string, eq);
   2874   // Return original string.
   2875   __ B(eq, &return_x0);
   2876 
   2877   // Longer than original string's length or negative: unsafe arguments.
   2878   __ B(hi, &runtime);
   2879 
   2880   // Shorter than original string's length: an actual substring.
   2881 
   2882   //   x0   to               substring end character offset
   2883   //   x1   result_length    length of substring result
   2884   //   x10  input_string     pointer to input string object
   2885   //   x10  unpacked_string  pointer to unpacked string object
   2886   //   x11  input_length     length of input string
   2887   //   x12  input_type       instance type of input string
   2888   //   x15  from             substring start character offset
   2889 
   2890   // Deal with different string types: update the index if necessary and put
   2891   // the underlying string into register unpacked_string.
   2892   Label underlying_unpacked, sliced_string, seq_or_external_string;
   2893   Label update_instance_type;
   2894   // If the string is not indirect, it can only be sequential or external.
   2895   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
   2896   STATIC_ASSERT(kIsIndirectStringMask != 0);
   2897 
   2898   // Test for string types, and branch/fall through to appropriate unpacking
   2899   // code.
   2900   __ Tst(input_type, kIsIndirectStringMask);
   2901   __ B(eq, &seq_or_external_string);
   2902   __ Tst(input_type, kSlicedNotConsMask);
   2903   __ B(ne, &sliced_string);
   2904 
   2905   Register unpacked_string = input_string;
   2906 
   2907   // Cons string. Check whether it is flat, then fetch first part.
   2908   __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
   2909   __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
   2910   __ Ldr(unpacked_string,
   2911          FieldMemOperand(input_string, ConsString::kFirstOffset));
   2912   __ B(&update_instance_type);
   2913 
   2914   __ Bind(&sliced_string);
   2915   // Sliced string. Fetch parent and correct start index by offset.
   2916   __ Ldrsw(temp,
   2917            UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
   2918   __ Add(from, from, temp);
   2919   __ Ldr(unpacked_string,
   2920          FieldMemOperand(input_string, SlicedString::kParentOffset));
   2921 
   2922   __ Bind(&update_instance_type);
   2923   __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
   2924   __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
   2925   // Now control must go to &underlying_unpacked. Since the no code is generated
   2926   // before then we fall through instead of generating a useless branch.
   2927 
   2928   __ Bind(&seq_or_external_string);
   2929   // Sequential or external string. Registers unpacked_string and input_string
   2930   // alias, so there's nothing to do here.
   2931   // Note that if code is added here, the above code must be updated.
   2932 
   2933   //   x0   result_string    pointer to result string object (uninit)
   2934   //   x1   result_length    length of substring result
   2935   //   x10  unpacked_string  pointer to unpacked string object
   2936   //   x11  input_length     length of input string
   2937   //   x12  input_type       instance type of input string
   2938   //   x15  from             substring start character offset
   2939   __ Bind(&underlying_unpacked);
   2940 
   2941   if (FLAG_string_slices) {
   2942     Label copy_routine;
   2943     __ Cmp(result_length, SlicedString::kMinLength);
   2944     // Short slice. Copy instead of slicing.
   2945     __ B(lt, &copy_routine);
   2946     // Allocate new sliced string. At this point we do not reload the instance
   2947     // type including the string encoding because we simply rely on the info
   2948     // provided by the original string. It does not matter if the original
   2949     // string's encoding is wrong because we always have to recheck encoding of
   2950     // the newly created string's parent anyway due to externalized strings.
   2951     Label two_byte_slice, set_slice_header;
   2952     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
   2953     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   2954     __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
   2955     __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
   2956                                    &runtime);
   2957     __ B(&set_slice_header);
   2958 
   2959     __ Bind(&two_byte_slice);
   2960     __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
   2961                                    &runtime);
   2962 
   2963     __ Bind(&set_slice_header);
   2964     __ SmiTag(from);
   2965     __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
   2966     __ Str(unpacked_string,
   2967            FieldMemOperand(result_string, SlicedString::kParentOffset));
   2968     __ B(&return_x0);
   2969 
   2970     __ Bind(&copy_routine);
   2971   }
   2972 
   2973   //   x0   result_string    pointer to result string object (uninit)
   2974   //   x1   result_length    length of substring result
   2975   //   x10  unpacked_string  pointer to unpacked string object
   2976   //   x11  input_length     length of input string
   2977   //   x12  input_type       instance type of input string
   2978   //   x13  unpacked_char0   pointer to first char of unpacked string (uninit)
   2979   //   x13  substring_char0  pointer to first char of substring (uninit)
   2980   //   x14  result_char0     pointer to first char of result (uninit)
   2981   //   x15  from             substring start character offset
   2982   Register unpacked_char0 = x13;
   2983   Register substring_char0 = x13;
   2984   Register result_char0 = x14;
   2985   Label two_byte_sequential, sequential_string, allocate_result;
   2986   STATIC_ASSERT(kExternalStringTag != 0);
   2987   STATIC_ASSERT(kSeqStringTag == 0);
   2988 
   2989   __ Tst(input_type, kExternalStringTag);
   2990   __ B(eq, &sequential_string);
   2991 
   2992   __ Tst(input_type, kShortExternalStringTag);
   2993   __ B(ne, &runtime);
   2994   __ Ldr(unpacked_char0,
   2995          FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
   2996   // unpacked_char0 points to the first character of the underlying string.
   2997   __ B(&allocate_result);
   2998 
   2999   __ Bind(&sequential_string);
   3000   // Locate first character of underlying subject string.
   3001   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   3002   __ Add(unpacked_char0, unpacked_string,
   3003          SeqOneByteString::kHeaderSize - kHeapObjectTag);
   3004 
   3005   __ Bind(&allocate_result);
   3006   // Sequential one-byte string. Allocate the result.
   3007   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
   3008   __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
   3009 
   3010   // Allocate and copy the resulting one-byte string.
   3011   __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
   3012 
   3013   // Locate first character of substring to copy.
   3014   __ Add(substring_char0, unpacked_char0, from);
   3015 
   3016   // Locate first character of result.
   3017   __ Add(result_char0, result_string,
   3018          SeqOneByteString::kHeaderSize - kHeapObjectTag);
   3019 
   3020   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   3021   __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
   3022   __ B(&return_x0);
   3023 
   3024   // Allocate and copy the resulting two-byte string.
   3025   __ Bind(&two_byte_sequential);
   3026   __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
   3027 
   3028   // Locate first character of substring to copy.
   3029   __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
   3030 
   3031   // Locate first character of result.
   3032   __ Add(result_char0, result_string,
   3033          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
   3034 
   3035   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   3036   __ Add(result_length, result_length, result_length);
   3037   __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
   3038 
   3039   __ Bind(&return_x0);
   3040   Counters* counters = isolate()->counters();
   3041   __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
   3042   __ Drop(3);
   3043   __ Ret();
   3044 
   3045   __ Bind(&runtime);
   3046   __ TailCallRuntime(Runtime::kSubString);
   3047 
   3048   __ bind(&single_char);
   3049   // x1: result_length
   3050   // x10: input_string
   3051   // x12: input_type
   3052   // x15: from (untagged)
   3053   __ SmiTag(from);
   3054   StringCharAtGenerator generator(input_string, from, result_length, x0,
   3055                                   &runtime, &runtime, &runtime,
   3056                                   RECEIVER_IS_STRING);
   3057   generator.GenerateFast(masm);
   3058   __ Drop(3);
   3059   __ Ret();
   3060   generator.SkipSlow(masm, &runtime);
   3061 }
   3062 
   3063 void ToStringStub::Generate(MacroAssembler* masm) {
   3064   // The ToString stub takes one argument in x0.
   3065   Label is_number;
   3066   __ JumpIfSmi(x0, &is_number);
   3067 
   3068   Label not_string;
   3069   __ JumpIfObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE, &not_string, hs);
   3070   // x0: receiver
   3071   // x1: receiver instance type
   3072   __ Ret();
   3073   __ Bind(&not_string);
   3074 
   3075   Label not_heap_number;
   3076   __ Cmp(x1, HEAP_NUMBER_TYPE);
   3077   __ B(ne, &not_heap_number);
   3078   __ Bind(&is_number);
   3079   NumberToStringStub stub(isolate());
   3080   __ TailCallStub(&stub);
   3081   __ Bind(&not_heap_number);
   3082 
   3083   Label not_oddball;
   3084   __ Cmp(x1, ODDBALL_TYPE);
   3085   __ B(ne, &not_oddball);
   3086   __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
   3087   __ Ret();
   3088   __ Bind(&not_oddball);
   3089 
   3090   __ Push(x0);  // Push argument.
   3091   __ TailCallRuntime(Runtime::kToString);
   3092 }
   3093 
   3094 
   3095 void ToNameStub::Generate(MacroAssembler* masm) {
   3096   // The ToName stub takes one argument in x0.
   3097   Label is_number;
   3098   __ JumpIfSmi(x0, &is_number);
   3099 
   3100   Label not_name;
   3101   STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
   3102   __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &not_name, hi);
   3103   // x0: receiver
   3104   // x1: receiver instance type
   3105   __ Ret();
   3106   __ Bind(&not_name);
   3107 
   3108   Label not_heap_number;
   3109   __ Cmp(x1, HEAP_NUMBER_TYPE);
   3110   __ B(ne, &not_heap_number);
   3111   __ Bind(&is_number);
   3112   NumberToStringStub stub(isolate());
   3113   __ TailCallStub(&stub);
   3114   __ Bind(&not_heap_number);
   3115 
   3116   Label not_oddball;
   3117   __ Cmp(x1, ODDBALL_TYPE);
   3118   __ B(ne, &not_oddball);
   3119   __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
   3120   __ Ret();
   3121   __ Bind(&not_oddball);
   3122 
   3123   __ Push(x0);  // Push argument.
   3124   __ TailCallRuntime(Runtime::kToName);
   3125 }
   3126 
   3127 
   3128 void StringHelper::GenerateFlatOneByteStringEquals(
   3129     MacroAssembler* masm, Register left, Register right, Register scratch1,
   3130     Register scratch2, Register scratch3) {
   3131   DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
   3132   Register result = x0;
   3133   Register left_length = scratch1;
   3134   Register right_length = scratch2;
   3135 
   3136   // Compare lengths. If lengths differ, strings can't be equal. Lengths are
   3137   // smis, and don't need to be untagged.
   3138   Label strings_not_equal, check_zero_length;
   3139   __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
   3140   __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
   3141   __ Cmp(left_length, right_length);
   3142   __ B(eq, &check_zero_length);
   3143 
   3144   __ Bind(&strings_not_equal);
   3145   __ Mov(result, Smi::FromInt(NOT_EQUAL));
   3146   __ Ret();
   3147 
   3148   // Check if the length is zero. If so, the strings must be equal (and empty.)
   3149   Label compare_chars;
   3150   __ Bind(&check_zero_length);
   3151   STATIC_ASSERT(kSmiTag == 0);
   3152   __ Cbnz(left_length, &compare_chars);
   3153   __ Mov(result, Smi::FromInt(EQUAL));
   3154   __ Ret();
   3155 
   3156   // Compare characters. Falls through if all characters are equal.
   3157   __ Bind(&compare_chars);
   3158   GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
   3159                                   scratch3, &strings_not_equal);
   3160 
   3161   // Characters in strings are equal.
   3162   __ Mov(result, Smi::FromInt(EQUAL));
   3163   __ Ret();
   3164 }
   3165 
   3166 
   3167 void StringHelper::GenerateCompareFlatOneByteStrings(
   3168     MacroAssembler* masm, Register left, Register right, Register scratch1,
   3169     Register scratch2, Register scratch3, Register scratch4) {
   3170   DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
   3171   Label result_not_equal, compare_lengths;
   3172 
   3173   // Find minimum length and length difference.
   3174   Register length_delta = scratch3;
   3175   __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
   3176   __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
   3177   __ Subs(length_delta, scratch1, scratch2);
   3178 
   3179   Register min_length = scratch1;
   3180   __ Csel(min_length, scratch2, scratch1, gt);
   3181   __ Cbz(min_length, &compare_lengths);
   3182 
   3183   // Compare loop.
   3184   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
   3185                                   scratch4, &result_not_equal);
   3186 
   3187   // Compare lengths - strings up to min-length are equal.
   3188   __ Bind(&compare_lengths);
   3189 
   3190   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   3191 
   3192   // Use length_delta as result if it's zero.
   3193   Register result = x0;
   3194   __ Subs(result, length_delta, 0);
   3195 
   3196   __ Bind(&result_not_equal);
   3197   Register greater = x10;
   3198   Register less = x11;
   3199   __ Mov(greater, Smi::FromInt(GREATER));
   3200   __ Mov(less, Smi::FromInt(LESS));
   3201   __ CmovX(result, greater, gt);
   3202   __ CmovX(result, less, lt);
   3203   __ Ret();
   3204 }
   3205 
   3206 
   3207 void StringHelper::GenerateOneByteCharsCompareLoop(
   3208     MacroAssembler* masm, Register left, Register right, Register length,
   3209     Register scratch1, Register scratch2, Label* chars_not_equal) {
   3210   DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
   3211 
   3212   // Change index to run from -length to -1 by adding length to string
   3213   // start. This means that loop ends when index reaches zero, which
   3214   // doesn't need an additional compare.
   3215   __ SmiUntag(length);
   3216   __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
   3217   __ Add(left, left, scratch1);
   3218   __ Add(right, right, scratch1);
   3219 
   3220   Register index = length;
   3221   __ Neg(index, length);  // index = -length;
   3222 
   3223   // Compare loop
   3224   Label loop;
   3225   __ Bind(&loop);
   3226   __ Ldrb(scratch1, MemOperand(left, index));
   3227   __ Ldrb(scratch2, MemOperand(right, index));
   3228   __ Cmp(scratch1, scratch2);
   3229   __ B(ne, chars_not_equal);
   3230   __ Add(index, index, 1);
   3231   __ Cbnz(index, &loop);
   3232 }
   3233 
   3234 
   3235 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   3236   // ----------- S t a t e -------------
   3237   //  -- x1    : left
   3238   //  -- x0    : right
   3239   //  -- lr    : return address
   3240   // -----------------------------------
   3241 
   3242   // Load x2 with the allocation site.  We stick an undefined dummy value here
   3243   // and replace it with the real allocation site later when we instantiate this
   3244   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
   3245   __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
   3246 
   3247   // Make sure that we actually patched the allocation site.
   3248   if (FLAG_debug_code) {
   3249     __ AssertNotSmi(x2, kExpectedAllocationSite);
   3250     __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
   3251     __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
   3252                             kExpectedAllocationSite);
   3253   }
   3254 
   3255   // Tail call into the stub that handles binary operations with allocation
   3256   // sites.
   3257   BinaryOpWithAllocationSiteStub stub(isolate(), state());
   3258   __ TailCallStub(&stub);
   3259 }
   3260 
   3261 
   3262 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   3263   // We need some extra registers for this stub, they have been allocated
   3264   // but we need to save them before using them.
   3265   regs_.Save(masm);
   3266 
   3267   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   3268     Label dont_need_remembered_set;
   3269 
   3270     Register val = regs_.scratch0();
   3271     __ Ldr(val, MemOperand(regs_.address()));
   3272     __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
   3273 
   3274     __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
   3275 
   3276     // First notify the incremental marker if necessary, then update the
   3277     // remembered set.
   3278     CheckNeedsToInformIncrementalMarker(
   3279         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
   3280     InformIncrementalMarker(masm);
   3281     regs_.Restore(masm);  // Restore the extra scratch registers we used.
   3282 
   3283     __ RememberedSetHelper(object(), address(),
   3284                            value(),  // scratch1
   3285                            save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
   3286 
   3287     __ Bind(&dont_need_remembered_set);
   3288   }
   3289 
   3290   CheckNeedsToInformIncrementalMarker(
   3291       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
   3292   InformIncrementalMarker(masm);
   3293   regs_.Restore(masm);  // Restore the extra scratch registers we used.
   3294   __ Ret();
   3295 }
   3296 
   3297 
   3298 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
   3299   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   3300   Register address =
   3301     x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
   3302   DCHECK(!address.Is(regs_.object()));
   3303   DCHECK(!address.Is(x0));
   3304   __ Mov(address, regs_.address());
   3305   __ Mov(x0, regs_.object());
   3306   __ Mov(x1, address);
   3307   __ Mov(x2, ExternalReference::isolate_address(isolate()));
   3308 
   3309   AllowExternalCallThatCantCauseGC scope(masm);
   3310   ExternalReference function =
   3311       ExternalReference::incremental_marking_record_write_function(
   3312           isolate());
   3313   __ CallCFunction(function, 3, 0);
   3314 
   3315   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
   3316 }
   3317 
   3318 
   3319 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
   3320     MacroAssembler* masm,
   3321     OnNoNeedToInformIncrementalMarker on_no_need,
   3322     Mode mode) {
   3323   Label on_black;
   3324   Label need_incremental;
   3325   Label need_incremental_pop_scratch;
   3326 
   3327   Register mem_chunk = regs_.scratch0();
   3328   Register counter = regs_.scratch1();
   3329   __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
   3330   __ Ldr(counter,
   3331          MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
   3332   __ Subs(counter, counter, 1);
   3333   __ Str(counter,
   3334          MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
   3335   __ B(mi, &need_incremental);
   3336 
   3337   // If the object is not black we don't have to inform the incremental marker.
   3338   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
   3339 
   3340   regs_.Restore(masm);  // Restore the extra scratch registers we used.
   3341   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   3342     __ RememberedSetHelper(object(), address(),
   3343                            value(),  // scratch1
   3344                            save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
   3345   } else {
   3346     __ Ret();
   3347   }
   3348 
   3349   __ Bind(&on_black);
   3350   // Get the value from the slot.
   3351   Register val = regs_.scratch0();
   3352   __ Ldr(val, MemOperand(regs_.address()));
   3353 
   3354   if (mode == INCREMENTAL_COMPACTION) {
   3355     Label ensure_not_white;
   3356 
   3357     __ CheckPageFlagClear(val, regs_.scratch1(),
   3358                           MemoryChunk::kEvacuationCandidateMask,
   3359                           &ensure_not_white);
   3360 
   3361     __ CheckPageFlagClear(regs_.object(),
   3362                           regs_.scratch1(),
   3363                           MemoryChunk::kSkipEvacuationSlotsRecordingMask,
   3364                           &need_incremental);
   3365 
   3366     __ Bind(&ensure_not_white);
   3367   }
   3368 
   3369   // We need extra registers for this, so we push the object and the address
   3370   // register temporarily.
   3371   __ Push(regs_.address(), regs_.object());
   3372   __ JumpIfWhite(val,
   3373                  regs_.scratch1(),  // Scratch.
   3374                  regs_.object(),    // Scratch.
   3375                  regs_.address(),   // Scratch.
   3376                  regs_.scratch2(),  // Scratch.
   3377                  &need_incremental_pop_scratch);
   3378   __ Pop(regs_.object(), regs_.address());
   3379 
   3380   regs_.Restore(masm);  // Restore the extra scratch registers we used.
   3381   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   3382     __ RememberedSetHelper(object(), address(),
   3383                            value(),  // scratch1
   3384                            save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
   3385   } else {
   3386     __ Ret();
   3387   }
   3388 
   3389   __ Bind(&need_incremental_pop_scratch);
   3390   __ Pop(regs_.object(), regs_.address());
   3391 
   3392   __ Bind(&need_incremental);
   3393   // Fall through when we need to inform the incremental marker.
   3394 }
   3395 
   3396 
   3397 void RecordWriteStub::Generate(MacroAssembler* masm) {
   3398   Label skip_to_incremental_noncompacting;
   3399   Label skip_to_incremental_compacting;
   3400 
   3401   // We patch these two first instructions back and forth between a nop and
   3402   // real branch when we start and stop incremental heap marking.
   3403   // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
   3404   // are generated.
   3405   // See RecordWriteStub::Patch for details.
   3406   {
   3407     InstructionAccurateScope scope(masm, 2);
   3408     __ adr(xzr, &skip_to_incremental_noncompacting);
   3409     __ adr(xzr, &skip_to_incremental_compacting);
   3410   }
   3411 
   3412   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   3413     __ RememberedSetHelper(object(), address(),
   3414                            value(),  // scratch1
   3415                            save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
   3416   }
   3417   __ Ret();
   3418 
   3419   __ Bind(&skip_to_incremental_noncompacting);
   3420   GenerateIncremental(masm, INCREMENTAL);
   3421 
   3422   __ Bind(&skip_to_incremental_compacting);
   3423   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
   3424 }
   3425 
   3426 
   3427 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
   3428   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   3429   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   3430   int parameter_count_offset =
   3431       StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   3432   __ Ldr(x1, MemOperand(fp, parameter_count_offset));
   3433   if (function_mode() == JS_FUNCTION_STUB_MODE) {
   3434     __ Add(x1, x1, 1);
   3435   }
   3436   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   3437   __ Drop(x1);
   3438   // Return to IC Miss stub, continuation still on stack.
   3439   __ Ret();
   3440 }
   3441 
   3442 
   3443 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   3444   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
   3445   LoadICStub stub(isolate());
   3446   stub.GenerateForTrampoline(masm);
   3447 }
   3448 
   3449 
   3450 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   3451   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
   3452   KeyedLoadICStub stub(isolate());
   3453   stub.GenerateForTrampoline(masm);
   3454 }
   3455 
   3456 
   3457 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   3458   __ EmitLoadTypeFeedbackVector(x2);
   3459   CallICStub stub(isolate(), state());
   3460   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   3461 }
   3462 
   3463 
   3464 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
   3465 
   3466 
   3467 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
   3468   GenerateImpl(masm, true);
   3469 }
   3470 
   3471 
   3472 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
   3473                              Register receiver_map, Register scratch1,
   3474                              Register scratch2, bool is_polymorphic,
   3475                              Label* miss) {
   3476   // feedback initially contains the feedback array
   3477   Label next_loop, prepare_next;
   3478   Label load_smi_map, compare_map;
   3479   Label start_polymorphic;
   3480 
   3481   Register cached_map = scratch1;
   3482 
   3483   __ Ldr(cached_map,
   3484          FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
   3485   __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   3486   __ Cmp(receiver_map, cached_map);
   3487   __ B(ne, &start_polymorphic);
   3488   // found, now call handler.
   3489   Register handler = feedback;
   3490   __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
   3491   __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
   3492   __ Jump(feedback);
   3493 
   3494   Register length = scratch2;
   3495   __ Bind(&start_polymorphic);
   3496   __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
   3497   if (!is_polymorphic) {
   3498     __ Cmp(length, Operand(Smi::FromInt(2)));
   3499     __ B(eq, miss);
   3500   }
   3501 
   3502   Register too_far = length;
   3503   Register pointer_reg = feedback;
   3504 
   3505   // +-----+------+------+-----+-----+ ... ----+
   3506   // | map | len  | wm0  | h0  | wm1 |      hN |
   3507   // +-----+------+------+-----+-----+ ... ----+
   3508   //                 0      1     2        len-1
   3509   //                              ^              ^
   3510   //                              |              |
   3511   //                         pointer_reg      too_far
   3512   //                         aka feedback     scratch2
   3513   // also need receiver_map
   3514   // use cached_map (scratch1) to look in the weak map values.
   3515   __ Add(too_far, feedback,
   3516          Operand::UntagSmiAndScale(length, kPointerSizeLog2));
   3517   __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
   3518   __ Add(pointer_reg, feedback,
   3519          FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
   3520 
   3521   __ Bind(&next_loop);
   3522   __ Ldr(cached_map, MemOperand(pointer_reg));
   3523   __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   3524   __ Cmp(receiver_map, cached_map);
   3525   __ B(ne, &prepare_next);
   3526   __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
   3527   __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
   3528   __ Jump(handler);
   3529 
   3530   __ Bind(&prepare_next);
   3531   __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
   3532   __ Cmp(pointer_reg, too_far);
   3533   __ B(lt, &next_loop);
   3534 
   3535   // We exhausted our array of map handler pairs.
   3536   __ jmp(miss);
   3537 }
   3538 
   3539 
   3540 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
   3541                                   Register receiver_map, Register feedback,
   3542                                   Register vector, Register slot,
   3543                                   Register scratch, Label* compare_map,
   3544                                   Label* load_smi_map, Label* try_array) {
   3545   __ JumpIfSmi(receiver, load_smi_map);
   3546   __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   3547   __ bind(compare_map);
   3548   Register cached_map = scratch;
   3549   // Move the weak map into the weak_cell register.
   3550   __ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
   3551   __ Cmp(cached_map, receiver_map);
   3552   __ B(ne, try_array);
   3553 
   3554   Register handler = feedback;
   3555   __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
   3556   __ Ldr(handler,
   3557          FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
   3558   __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
   3559   __ Jump(handler);
   3560 }
   3561 
   3562 
   3563 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   3564   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // x1
   3565   Register name = LoadWithVectorDescriptor::NameRegister();          // x2
   3566   Register vector = LoadWithVectorDescriptor::VectorRegister();      // x3
   3567   Register slot = LoadWithVectorDescriptor::SlotRegister();          // x0
   3568   Register feedback = x4;
   3569   Register receiver_map = x5;
   3570   Register scratch1 = x6;
   3571 
   3572   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
   3573   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   3574 
   3575   // Try to quickly handle the monomorphic case without knowing for sure
   3576   // if we have a weak cell in feedback. We do know it's safe to look
   3577   // at WeakCell::kValueOffset.
   3578   Label try_array, load_smi_map, compare_map;
   3579   Label not_array, miss;
   3580   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   3581                         scratch1, &compare_map, &load_smi_map, &try_array);
   3582 
   3583   // Is it a fixed array?
   3584   __ Bind(&try_array);
   3585   __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   3586   __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
   3587   HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
   3588 
   3589   __ Bind(&not_array);
   3590   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
   3591   Code::Flags code_flags =
   3592       Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   3593   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
   3594                                                receiver, name, feedback,
   3595                                                receiver_map, scratch1, x7);
   3596 
   3597   __ Bind(&miss);
   3598   LoadIC::GenerateMiss(masm);
   3599 
   3600   __ Bind(&load_smi_map);
   3601   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   3602   __ jmp(&compare_map);
   3603 }
   3604 
   3605 
   3606 void KeyedLoadICStub::Generate(MacroAssembler* masm) {
   3607   GenerateImpl(masm, false);
   3608 }
   3609 
   3610 
   3611 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
   3612   GenerateImpl(masm, true);
   3613 }
   3614 
   3615 
   3616 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   3617   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // x1
   3618   Register key = LoadWithVectorDescriptor::NameRegister();           // x2
   3619   Register vector = LoadWithVectorDescriptor::VectorRegister();      // x3
   3620   Register slot = LoadWithVectorDescriptor::SlotRegister();          // x0
   3621   Register feedback = x4;
   3622   Register receiver_map = x5;
   3623   Register scratch1 = x6;
   3624 
   3625   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
   3626   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   3627 
   3628   // Try to quickly handle the monomorphic case without knowing for sure
   3629   // if we have a weak cell in feedback. We do know it's safe to look
   3630   // at WeakCell::kValueOffset.
   3631   Label try_array, load_smi_map, compare_map;
   3632   Label not_array, miss;
   3633   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   3634                         scratch1, &compare_map, &load_smi_map, &try_array);
   3635 
   3636   __ Bind(&try_array);
   3637   // Is it a fixed array?
   3638   __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   3639   __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
   3640 
   3641   // We have a polymorphic element handler.
   3642   Label polymorphic, try_poly_name;
   3643   __ Bind(&polymorphic);
   3644   HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
   3645 
   3646   __ Bind(&not_array);
   3647   // Is it generic?
   3648   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
   3649                    &try_poly_name);
   3650   Handle<Code> megamorphic_stub =
   3651       KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   3652   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
   3653 
   3654   __ Bind(&try_poly_name);
   3655   // We might have a name in feedback, and a fixed array in the next slot.
   3656   __ Cmp(key, feedback);
   3657   __ B(ne, &miss);
   3658   // If the name comparison succeeded, we know we have a fixed array with
   3659   // at least one map/handler pair.
   3660   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
   3661   __ Ldr(feedback,
   3662          FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   3663   HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, false, &miss);
   3664 
   3665   __ Bind(&miss);
   3666   KeyedLoadIC::GenerateMiss(masm);
   3667 
   3668   __ Bind(&load_smi_map);
   3669   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   3670   __ jmp(&compare_map);
   3671 }
   3672 
   3673 
   3674 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   3675   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
   3676   VectorStoreICStub stub(isolate(), state());
   3677   stub.GenerateForTrampoline(masm);
   3678 }
   3679 
   3680 
   3681 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   3682   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
   3683   VectorKeyedStoreICStub stub(isolate(), state());
   3684   stub.GenerateForTrampoline(masm);
   3685 }
   3686 
   3687 
   3688 void VectorStoreICStub::Generate(MacroAssembler* masm) {
   3689   GenerateImpl(masm, false);
   3690 }
   3691 
   3692 
   3693 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
   3694   GenerateImpl(masm, true);
   3695 }
   3696 
   3697 
   3698 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   3699   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // x1
   3700   Register key = VectorStoreICDescriptor::NameRegister();           // x2
   3701   Register vector = VectorStoreICDescriptor::VectorRegister();      // x3
   3702   Register slot = VectorStoreICDescriptor::SlotRegister();          // x4
   3703   DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0));          // x0
   3704   Register feedback = x5;
   3705   Register receiver_map = x6;
   3706   Register scratch1 = x7;
   3707 
   3708   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
   3709   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   3710 
   3711   // Try to quickly handle the monomorphic case without knowing for sure
   3712   // if we have a weak cell in feedback. We do know it's safe to look
   3713   // at WeakCell::kValueOffset.
   3714   Label try_array, load_smi_map, compare_map;
   3715   Label not_array, miss;
   3716   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   3717                         scratch1, &compare_map, &load_smi_map, &try_array);
   3718 
   3719   // Is it a fixed array?
   3720   __ Bind(&try_array);
   3721   __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   3722   __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
   3723   HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, true, &miss);
   3724 
   3725   __ Bind(&not_array);
   3726   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
   3727   Code::Flags code_flags =
   3728       Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   3729   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
   3730                                                receiver, key, feedback,
   3731                                                receiver_map, scratch1, x8);
   3732 
   3733   __ Bind(&miss);
   3734   StoreIC::GenerateMiss(masm);
   3735 
   3736   __ Bind(&load_smi_map);
   3737   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   3738   __ jmp(&compare_map);
   3739 }
   3740 
   3741 
   3742 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
   3743   GenerateImpl(masm, false);
   3744 }
   3745 
   3746 
   3747 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
   3748   GenerateImpl(masm, true);
   3749 }
   3750 
   3751 
   3752 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
   3753                                        Register receiver_map, Register scratch1,
   3754                                        Register scratch2, Label* miss) {
   3755   // feedback initially contains the feedback array
   3756   Label next_loop, prepare_next;
   3757   Label start_polymorphic;
   3758   Label transition_call;
   3759 
   3760   Register cached_map = scratch1;
   3761   Register too_far = scratch2;
   3762   Register pointer_reg = feedback;
   3763 
   3764   __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
   3765 
   3766   // +-----+------+------+-----+-----+-----+ ... ----+
   3767   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
   3768   // +-----+------+------+-----+-----+ ----+ ... ----+
   3769   //                 0      1     2              len-1
   3770   //                 ^                                 ^
   3771   //                 |                                 |
   3772   //             pointer_reg                        too_far
   3773   //             aka feedback                       scratch2
   3774   // also need receiver_map
   3775   // use cached_map (scratch1) to look in the weak map values.
   3776   __ Add(too_far, feedback,
   3777          Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
   3778   __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
   3779   __ Add(pointer_reg, feedback,
   3780          FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
   3781 
   3782   __ Bind(&next_loop);
   3783   __ Ldr(cached_map, MemOperand(pointer_reg));
   3784   __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   3785   __ Cmp(receiver_map, cached_map);
   3786   __ B(ne, &prepare_next);
   3787   // Is it a transitioning store?
   3788   __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
   3789   __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
   3790   __ B(ne, &transition_call);
   3791 
   3792   __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
   3793   __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
   3794   __ Jump(pointer_reg);
   3795 
   3796   __ Bind(&transition_call);
   3797   __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
   3798   __ JumpIfSmi(too_far, miss);
   3799 
   3800   __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
   3801   // Load the map into the correct register.
   3802   DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
   3803   __ mov(feedback, too_far);
   3804   __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
   3805   __ Jump(receiver_map);
   3806 
   3807   __ Bind(&prepare_next);
   3808   __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
   3809   __ Cmp(pointer_reg, too_far);
   3810   __ B(lt, &next_loop);
   3811 
   3812   // We exhausted our array of map handler pairs.
   3813   __ jmp(miss);
   3814 }
   3815 
   3816 
   3817 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   3818   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // x1
   3819   Register key = VectorStoreICDescriptor::NameRegister();           // x2
   3820   Register vector = VectorStoreICDescriptor::VectorRegister();      // x3
   3821   Register slot = VectorStoreICDescriptor::SlotRegister();          // x4
   3822   DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0));          // x0
   3823   Register feedback = x5;
   3824   Register receiver_map = x6;
   3825   Register scratch1 = x7;
   3826 
   3827   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
   3828   __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   3829 
   3830   // Try to quickly handle the monomorphic case without knowing for sure
   3831   // if we have a weak cell in feedback. We do know it's safe to look
   3832   // at WeakCell::kValueOffset.
   3833   Label try_array, load_smi_map, compare_map;
   3834   Label not_array, miss;
   3835   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   3836                         scratch1, &compare_map, &load_smi_map, &try_array);
   3837 
   3838   __ Bind(&try_array);
   3839   // Is it a fixed array?
   3840   __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   3841   __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
   3842 
   3843   // We have a polymorphic element handler.
   3844   Label try_poly_name;
   3845   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
   3846 
   3847   __ Bind(&not_array);
   3848   // Is it generic?
   3849   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
   3850                    &try_poly_name);
   3851   Handle<Code> megamorphic_stub =
   3852       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   3853   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
   3854 
   3855   __ Bind(&try_poly_name);
   3856   // We might have a name in feedback, and a fixed array in the next slot.
   3857   __ Cmp(key, feedback);
   3858   __ B(ne, &miss);
   3859   // If the name comparison succeeded, we know we have a fixed array with
   3860   // at least one map/handler pair.
   3861   __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
   3862   __ Ldr(feedback,
   3863          FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   3864   HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
   3865 
   3866   __ Bind(&miss);
   3867   KeyedStoreIC::GenerateMiss(masm);
   3868 
   3869   __ Bind(&load_smi_map);
   3870   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   3871   __ jmp(&compare_map);
   3872 }
   3873 
   3874 
   3875 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
   3876 // a "Push lr" instruction, followed by a call.
   3877 static const unsigned int kProfileEntryHookCallSize =
   3878     Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
   3879 
   3880 
   3881 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   3882   if (masm->isolate()->function_entry_hook() != NULL) {
   3883     ProfileEntryHookStub stub(masm->isolate());
   3884     Assembler::BlockConstPoolScope no_const_pools(masm);
   3885     DontEmitDebugCodeScope no_debug_code(masm);
   3886     Label entry_hook_call_start;
   3887     __ Bind(&entry_hook_call_start);
   3888     __ Push(lr);
   3889     __ CallStub(&stub);
   3890     DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
   3891            kProfileEntryHookCallSize);
   3892 
   3893     __ Pop(lr);
   3894   }
   3895 }
   3896 
   3897 
   3898 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   3899   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
   3900 
   3901   // Save all kCallerSaved registers (including lr), since this can be called
   3902   // from anywhere.
   3903   // TODO(jbramley): What about FP registers?
   3904   __ PushCPURegList(kCallerSaved);
   3905   DCHECK(kCallerSaved.IncludesAliasOf(lr));
   3906   const int kNumSavedRegs = kCallerSaved.Count();
   3907 
   3908   // Compute the function's address as the first argument.
   3909   __ Sub(x0, lr, kProfileEntryHookCallSize);
   3910 
   3911 #if V8_HOST_ARCH_ARM64
   3912   uintptr_t entry_hook =
   3913       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
   3914   __ Mov(x10, entry_hook);
   3915 #else
   3916   // Under the simulator we need to indirect the entry hook through a trampoline
   3917   // function at a known address.
   3918   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
   3919   __ Mov(x10, Operand(ExternalReference(&dispatcher,
   3920                                         ExternalReference::BUILTIN_CALL,
   3921                                         isolate())));
   3922   // It additionally takes an isolate as a third parameter
   3923   __ Mov(x2, ExternalReference::isolate_address(isolate()));
   3924 #endif
   3925 
   3926   // The caller's return address is above the saved temporaries.
   3927   // Grab its location for the second argument to the hook.
   3928   __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
   3929 
   3930   {
   3931     // Create a dummy frame, as CallCFunction requires this.
   3932     FrameScope frame(masm, StackFrame::MANUAL);
   3933     __ CallCFunction(x10, 2, 0);
   3934   }
   3935 
   3936   __ PopCPURegList(kCallerSaved);
   3937   __ Ret();
   3938 }
   3939 
   3940 
   3941 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   3942   // When calling into C++ code the stack pointer must be csp.
   3943   // Therefore this code must use csp for peek/poke operations when the
   3944   // stub is generated. When the stub is called
   3945   // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
   3946   // and configure the stack pointer *before* doing the call.
   3947   const Register old_stack_pointer = __ StackPointer();
   3948   __ SetStackPointer(csp);
   3949 
   3950   // Put return address on the stack (accessible to GC through exit frame pc).
   3951   __ Poke(lr, 0);
   3952   // Call the C++ function.
   3953   __ Blr(x10);
   3954   // Return to calling code.
   3955   __ Peek(lr, 0);
   3956   __ AssertFPCRState();
   3957   __ Ret();
   3958 
   3959   __ SetStackPointer(old_stack_pointer);
   3960 }
   3961 
   3962 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
   3963                                     Register target) {
   3964   // Make sure the caller configured the stack pointer (see comment in
   3965   // DirectCEntryStub::Generate).
   3966   DCHECK(csp.Is(__ StackPointer()));
   3967 
   3968   intptr_t code =
   3969       reinterpret_cast<intptr_t>(GetCode().location());
   3970   __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
   3971   __ Mov(x10, target);
   3972   // Branch to the stub.
   3973   __ Blr(lr);
   3974 }
   3975 
   3976 
   3977 // Probe the name dictionary in the 'elements' register.
   3978 // Jump to the 'done' label if a property with the given name is found.
   3979 // Jump to the 'miss' label otherwise.
   3980 //
   3981 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
   3982 // 'elements' and 'name' registers are preserved on miss.
   3983 void NameDictionaryLookupStub::GeneratePositiveLookup(
   3984     MacroAssembler* masm,
   3985     Label* miss,
   3986     Label* done,
   3987     Register elements,
   3988     Register name,
   3989     Register scratch1,
   3990     Register scratch2) {
   3991   DCHECK(!AreAliased(elements, name, scratch1, scratch2));
   3992 
   3993   // Assert that name contains a string.
   3994   __ AssertName(name);
   3995 
   3996   // Compute the capacity mask.
   3997   __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
   3998   __ Sub(scratch1, scratch1, 1);
   3999 
   4000   // Generate an unrolled loop that performs a few probes before giving up.
   4001   for (int i = 0; i < kInlinedProbes; i++) {
   4002     // Compute the masked index: (hash + i + i * i) & mask.
   4003     __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
   4004     if (i > 0) {
   4005       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   4006       // the hash in a separate instruction. The value hash + i + i * i is right
   4007       // shifted in the following and instruction.
   4008       DCHECK(NameDictionary::GetProbeOffset(i) <
   4009           1 << (32 - Name::kHashFieldOffset));
   4010       __ Add(scratch2, scratch2, Operand(
   4011           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   4012     }
   4013     __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
   4014 
   4015     // Scale the index by multiplying by the element size.
   4016     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   4017     __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
   4018 
   4019     // Check if the key is identical to the name.
   4020     UseScratchRegisterScope temps(masm);
   4021     Register scratch3 = temps.AcquireX();
   4022     __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
   4023     __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
   4024     __ Cmp(name, scratch3);
   4025     __ B(eq, done);
   4026   }
   4027 
   4028   // The inlined probes didn't find the entry.
   4029   // Call the complete stub to scan the whole dictionary.
   4030 
   4031   CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
   4032   spill_list.Combine(lr);
   4033   spill_list.Remove(scratch1);
   4034   spill_list.Remove(scratch2);
   4035 
   4036   __ PushCPURegList(spill_list);
   4037 
   4038   if (name.is(x0)) {
   4039     DCHECK(!elements.is(x1));
   4040     __ Mov(x1, name);
   4041     __ Mov(x0, elements);
   4042   } else {
   4043     __ Mov(x0, elements);
   4044     __ Mov(x1, name);
   4045   }
   4046 
   4047   Label not_found;
   4048   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
   4049   __ CallStub(&stub);
   4050   __ Cbz(x0, &not_found);
   4051   __ Mov(scratch2, x2);  // Move entry index into scratch2.
   4052   __ PopCPURegList(spill_list);
   4053   __ B(done);
   4054 
   4055   __ Bind(&not_found);
   4056   __ PopCPURegList(spill_list);
   4057   __ B(miss);
   4058 }
   4059 
   4060 
   4061 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
   4062                                                       Label* miss,
   4063                                                       Label* done,
   4064                                                       Register receiver,
   4065                                                       Register properties,
   4066                                                       Handle<Name> name,
   4067                                                       Register scratch0) {
   4068   DCHECK(!AreAliased(receiver, properties, scratch0));
   4069   DCHECK(name->IsUniqueName());
   4070   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   4071   // not equal to the name and kProbes-th slot is not used (its name is the
   4072   // undefined value), it guarantees the hash table doesn't contain the
   4073   // property. It's true even if some slots represent deleted properties
   4074   // (their names are the hole value).
   4075   for (int i = 0; i < kInlinedProbes; i++) {
   4076     // scratch0 points to properties hash.
   4077     // Compute the masked index: (hash + i + i * i) & mask.
   4078     Register index = scratch0;
   4079     // Capacity is smi 2^n.
   4080     __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
   4081     __ Sub(index, index, 1);
   4082     __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
   4083 
   4084     // Scale the index by multiplying by the entry size.
   4085     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   4086     __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
   4087 
   4088     Register entity_name = scratch0;
   4089     // Having undefined at this place means the name is not contained.
   4090     Register tmp = index;
   4091     __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
   4092     __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
   4093 
   4094     __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
   4095 
   4096     // Stop if found the property.
   4097     __ Cmp(entity_name, Operand(name));
   4098     __ B(eq, miss);
   4099 
   4100     Label good;
   4101     __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
   4102 
   4103     // Check if the entry name is not a unique name.
   4104     __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
   4105     __ Ldrb(entity_name,
   4106             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
   4107     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
   4108     __ Bind(&good);
   4109   }
   4110 
   4111   CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
   4112   spill_list.Combine(lr);
   4113   spill_list.Remove(scratch0);  // Scratch registers don't need to be preserved.
   4114 
   4115   __ PushCPURegList(spill_list);
   4116 
   4117   __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   4118   __ Mov(x1, Operand(name));
   4119   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
   4120   __ CallStub(&stub);
   4121   // Move stub return value to scratch0. Note that scratch0 is not included in
   4122   // spill_list and won't be clobbered by PopCPURegList.
   4123   __ Mov(scratch0, x0);
   4124   __ PopCPURegList(spill_list);
   4125 
   4126   __ Cbz(scratch0, done);
   4127   __ B(miss);
   4128 }
   4129 
   4130 
   4131 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   4132   // This stub overrides SometimesSetsUpAFrame() to return false. That means
   4133   // we cannot call anything that could cause a GC from this stub.
   4134   //
   4135   // Arguments are in x0 and x1:
   4136   //   x0: property dictionary.
   4137   //   x1: the name of the property we are looking for.
   4138   //
   4139   // Return value is in x0 and is zero if lookup failed, non zero otherwise.
   4140   // If the lookup is successful, x2 will contains the index of the entry.
   4141 
   4142   Register result = x0;
   4143   Register dictionary = x0;
   4144   Register key = x1;
   4145   Register index = x2;
   4146   Register mask = x3;
   4147   Register hash = x4;
   4148   Register undefined = x5;
   4149   Register entry_key = x6;
   4150 
   4151   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
   4152 
   4153   __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
   4154   __ Sub(mask, mask, 1);
   4155 
   4156   __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
   4157   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
   4158 
   4159   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
   4160     // Compute the masked index: (hash + i + i * i) & mask.
   4161     // Capacity is smi 2^n.
   4162     if (i > 0) {
   4163       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   4164       // the hash in a separate instruction. The value hash + i + i * i is right
   4165       // shifted in the following and instruction.
   4166       DCHECK(NameDictionary::GetProbeOffset(i) <
   4167              1 << (32 - Name::kHashFieldOffset));
   4168       __ Add(index, hash,
   4169              NameDictionary::GetProbeOffset(i) << Name::kHashShift);
   4170     } else {
   4171       __ Mov(index, hash);
   4172     }
   4173     __ And(index, mask, Operand(index, LSR, Name::kHashShift));
   4174 
   4175     // Scale the index by multiplying by the entry size.
   4176     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   4177     __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
   4178 
   4179     __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
   4180     __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
   4181 
   4182     // Having undefined at this place means the name is not contained.
   4183     __ Cmp(entry_key, undefined);
   4184     __ B(eq, &not_in_dictionary);
   4185 
   4186     // Stop if found the property.
   4187     __ Cmp(entry_key, key);
   4188     __ B(eq, &in_dictionary);
   4189 
   4190     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
   4191       // Check if the entry name is not a unique name.
   4192       __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
   4193       __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
   4194       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
   4195     }
   4196   }
   4197 
   4198   __ Bind(&maybe_in_dictionary);
   4199   // If we are doing negative lookup then probing failure should be
   4200   // treated as a lookup success. For positive lookup, probing failure
   4201   // should be treated as lookup failure.
   4202   if (mode() == POSITIVE_LOOKUP) {
   4203     __ Mov(result, 0);
   4204     __ Ret();
   4205   }
   4206 
   4207   __ Bind(&in_dictionary);
   4208   __ Mov(result, 1);
   4209   __ Ret();
   4210 
   4211   __ Bind(&not_in_dictionary);
   4212   __ Mov(result, 0);
   4213   __ Ret();
   4214 }
   4215 
   4216 
   4217 template<class T>
   4218 static void CreateArrayDispatch(MacroAssembler* masm,
   4219                                 AllocationSiteOverrideMode mode) {
   4220   ASM_LOCATION("CreateArrayDispatch");
   4221   if (mode == DISABLE_ALLOCATION_SITES) {
   4222     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
   4223      __ TailCallStub(&stub);
   4224 
   4225   } else if (mode == DONT_OVERRIDE) {
   4226     Register kind = x3;
   4227     int last_index =
   4228         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   4229     for (int i = 0; i <= last_index; ++i) {
   4230       Label next;
   4231       ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
   4232       // TODO(jbramley): Is this the best way to handle this? Can we make the
   4233       // tail calls conditional, rather than hopping over each one?
   4234       __ CompareAndBranch(kind, candidate_kind, ne, &next);
   4235       T stub(masm->isolate(), candidate_kind);
   4236       __ TailCallStub(&stub);
   4237       __ Bind(&next);
   4238     }
   4239 
   4240     // If we reached this point there is a problem.
   4241     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   4242 
   4243   } else {
   4244     UNREACHABLE();
   4245   }
   4246 }
   4247 
   4248 
   4249 // TODO(jbramley): If this needs to be a special case, make it a proper template
   4250 // specialization, and not a separate function.
   4251 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
   4252                                            AllocationSiteOverrideMode mode) {
   4253   ASM_LOCATION("CreateArrayDispatchOneArgument");
   4254   // x0 - argc
   4255   // x1 - constructor?
   4256   // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
   4257   // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
   4258   // sp[0] - last argument
   4259 
   4260   Register allocation_site = x2;
   4261   Register kind = x3;
   4262 
   4263   Label normal_sequence;
   4264   if (mode == DONT_OVERRIDE) {
   4265     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   4266     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   4267     STATIC_ASSERT(FAST_ELEMENTS == 2);
   4268     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   4269     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
   4270     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
   4271 
   4272     // Is the low bit set? If so, the array is holey.
   4273     __ Tbnz(kind, 0, &normal_sequence);
   4274   }
   4275 
   4276   // Look at the last argument.
   4277   // TODO(jbramley): What does a 0 argument represent?
   4278   __ Peek(x10, 0);
   4279   __ Cbz(x10, &normal_sequence);
   4280 
   4281   if (mode == DISABLE_ALLOCATION_SITES) {
   4282     ElementsKind initial = GetInitialFastElementsKind();
   4283     ElementsKind holey_initial = GetHoleyElementsKind(initial);
   4284 
   4285     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
   4286                                                   holey_initial,
   4287                                                   DISABLE_ALLOCATION_SITES);
   4288     __ TailCallStub(&stub_holey);
   4289 
   4290     __ Bind(&normal_sequence);
   4291     ArraySingleArgumentConstructorStub stub(masm->isolate(),
   4292                                             initial,
   4293                                             DISABLE_ALLOCATION_SITES);
   4294     __ TailCallStub(&stub);
   4295   } else if (mode == DONT_OVERRIDE) {
   4296     // We are going to create a holey array, but our kind is non-holey.
   4297     // Fix kind and retry (only if we have an allocation site in the slot).
   4298     __ Orr(kind, kind, 1);
   4299 
   4300     if (FLAG_debug_code) {
   4301       __ Ldr(x10, FieldMemOperand(allocation_site, 0));
   4302       __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
   4303                        &normal_sequence);
   4304       __ Assert(eq, kExpectedAllocationSite);
   4305     }
   4306 
   4307     // Save the resulting elements kind in type info. We can't just store 'kind'
   4308     // in the AllocationSite::transition_info field because elements kind is
   4309     // restricted to a portion of the field; upper bits need to be left alone.
   4310     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   4311     __ Ldr(x11, FieldMemOperand(allocation_site,
   4312                                 AllocationSite::kTransitionInfoOffset));
   4313     __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
   4314     __ Str(x11, FieldMemOperand(allocation_site,
   4315                                 AllocationSite::kTransitionInfoOffset));
   4316 
   4317     __ Bind(&normal_sequence);
   4318     int last_index =
   4319         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   4320     for (int i = 0; i <= last_index; ++i) {
   4321       Label next;
   4322       ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
   4323       __ CompareAndBranch(kind, candidate_kind, ne, &next);
   4324       ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
   4325       __ TailCallStub(&stub);
   4326       __ Bind(&next);
   4327     }
   4328 
   4329     // If we reached this point there is a problem.
   4330     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   4331   } else {
   4332     UNREACHABLE();
   4333   }
   4334 }
   4335 
   4336 
   4337 template<class T>
   4338 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
   4339   int to_index = GetSequenceIndexFromFastElementsKind(
   4340       TERMINAL_FAST_ELEMENTS_KIND);
   4341   for (int i = 0; i <= to_index; ++i) {
   4342     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4343     T stub(isolate, kind);
   4344     stub.GetCode();
   4345     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
   4346       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
   4347       stub1.GetCode();
   4348     }
   4349   }
   4350 }
   4351 
   4352 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   4353   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
   4354       isolate);
   4355   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
   4356       isolate);
   4357   ArrayNArgumentsConstructorStub stub(isolate);
   4358   stub.GetCode();
   4359   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   4360   for (int i = 0; i < 2; i++) {
   4361     // For internal arrays we only need a few things
   4362     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
   4363     stubh1.GetCode();
   4364     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
   4365     stubh2.GetCode();
   4366   }
   4367 }
   4368 
   4369 
   4370 void ArrayConstructorStub::GenerateDispatchToArrayStub(
   4371     MacroAssembler* masm,
   4372     AllocationSiteOverrideMode mode) {
   4373   Register argc = x0;
   4374   if (argument_count() == ANY) {
   4375     Label zero_case, n_case;
   4376     __ Cbz(argc, &zero_case);
   4377     __ Cmp(argc, 1);
   4378     __ B(ne, &n_case);
   4379 
   4380     // One argument.
   4381     CreateArrayDispatchOneArgument(masm, mode);
   4382 
   4383     __ Bind(&zero_case);
   4384     // No arguments.
   4385     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   4386 
   4387     __ Bind(&n_case);
   4388     // N arguments.
   4389     ArrayNArgumentsConstructorStub stub(masm->isolate());
   4390     __ TailCallStub(&stub);
   4391   } else if (argument_count() == NONE) {
   4392     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   4393   } else if (argument_count() == ONE) {
   4394     CreateArrayDispatchOneArgument(masm, mode);
   4395   } else if (argument_count() == MORE_THAN_ONE) {
   4396     ArrayNArgumentsConstructorStub stub(masm->isolate());
   4397     __ TailCallStub(&stub);
   4398   } else {
   4399     UNREACHABLE();
   4400   }
   4401 }
   4402 
   4403 
   4404 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   4405   ASM_LOCATION("ArrayConstructorStub::Generate");
   4406   // ----------- S t a t e -------------
   4407   //  -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
   4408   //  -- x1 : constructor
   4409   //  -- x2 : AllocationSite or undefined
   4410   //  -- x3 : new target
   4411   //  -- sp[0] : last argument
   4412   // -----------------------------------
   4413   Register constructor = x1;
   4414   Register allocation_site = x2;
   4415   Register new_target = x3;
   4416 
   4417   if (FLAG_debug_code) {
   4418     // The array construct code is only set for the global and natives
   4419     // builtin Array functions which always have maps.
   4420 
   4421     Label unexpected_map, map_ok;
   4422     // Initial map for the builtin Array function should be a map.
   4423     __ Ldr(x10, FieldMemOperand(constructor,
   4424                                 JSFunction::kPrototypeOrInitialMapOffset));
   4425     // Will both indicate a NULL and a Smi.
   4426     __ JumpIfSmi(x10, &unexpected_map);
   4427     __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
   4428     __ Bind(&unexpected_map);
   4429     __ Abort(kUnexpectedInitialMapForArrayFunction);
   4430     __ Bind(&map_ok);
   4431 
   4432     // We should either have undefined in the allocation_site register or a
   4433     // valid AllocationSite.
   4434     __ AssertUndefinedOrAllocationSite(allocation_site, x10);
   4435   }
   4436 
   4437   // Enter the context of the Array function.
   4438   __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
   4439 
   4440   Label subclassing;
   4441   __ Cmp(new_target, constructor);
   4442   __ B(ne, &subclassing);
   4443 
   4444   Register kind = x3;
   4445   Label no_info;
   4446   // Get the elements kind and case on that.
   4447   __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
   4448 
   4449   __ Ldrsw(kind,
   4450            UntagSmiFieldMemOperand(allocation_site,
   4451                                    AllocationSite::kTransitionInfoOffset));
   4452   __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
   4453   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
   4454 
   4455   __ Bind(&no_info);
   4456   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
   4457 
   4458   // Subclassing support.
   4459   __ Bind(&subclassing);
   4460   switch (argument_count()) {
   4461     case ANY:
   4462     case MORE_THAN_ONE:
   4463       __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
   4464       __ Add(x0, x0, Operand(3));
   4465       break;
   4466     case NONE:
   4467       __ Poke(constructor, 0 * kPointerSize);
   4468       __ Mov(x0, Operand(3));
   4469       break;
   4470     case ONE:
   4471       __ Poke(constructor, 1 * kPointerSize);
   4472       __ Mov(x0, Operand(4));
   4473       break;
   4474   }
   4475   __ Push(new_target, allocation_site);
   4476   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
   4477 }
   4478 
   4479 
   4480 void InternalArrayConstructorStub::GenerateCase(
   4481     MacroAssembler* masm, ElementsKind kind) {
   4482   Label zero_case, n_case;
   4483   Register argc = x0;
   4484 
   4485   __ Cbz(argc, &zero_case);
   4486   __ CompareAndBranch(argc, 1, ne, &n_case);
   4487 
   4488   // One argument.
   4489   if (IsFastPackedElementsKind(kind)) {
   4490     Label packed_case;
   4491 
   4492     // We might need to create a holey array; look at the first argument.
   4493     __ Peek(x10, 0);
   4494     __ Cbz(x10, &packed_case);
   4495 
   4496     InternalArraySingleArgumentConstructorStub
   4497         stub1_holey(isolate(), GetHoleyElementsKind(kind));
   4498     __ TailCallStub(&stub1_holey);
   4499 
   4500     __ Bind(&packed_case);
   4501   }
   4502   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
   4503   __ TailCallStub(&stub1);
   4504 
   4505   __ Bind(&zero_case);
   4506   // No arguments.
   4507   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   4508   __ TailCallStub(&stub0);
   4509 
   4510   __ Bind(&n_case);
   4511   // N arguments.
   4512   ArrayNArgumentsConstructorStub stubN(isolate());
   4513   __ TailCallStub(&stubN);
   4514 }
   4515 
   4516 
   4517 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
   4518   // ----------- S t a t e -------------
   4519   //  -- x0 : argc
   4520   //  -- x1 : constructor
   4521   //  -- sp[0] : return address
   4522   //  -- sp[4] : last argument
   4523   // -----------------------------------
   4524 
   4525   Register constructor = x1;
   4526 
   4527   if (FLAG_debug_code) {
   4528     // The array construct code is only set for the global and natives
   4529     // builtin Array functions which always have maps.
   4530 
   4531     Label unexpected_map, map_ok;
   4532     // Initial map for the builtin Array function should be a map.
   4533     __ Ldr(x10, FieldMemOperand(constructor,
   4534                                 JSFunction::kPrototypeOrInitialMapOffset));
   4535     // Will both indicate a NULL and a Smi.
   4536     __ JumpIfSmi(x10, &unexpected_map);
   4537     __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
   4538     __ Bind(&unexpected_map);
   4539     __ Abort(kUnexpectedInitialMapForArrayFunction);
   4540     __ Bind(&map_ok);
   4541   }
   4542 
   4543   Register kind = w3;
   4544   // Figure out the right elements kind
   4545   __ Ldr(x10, FieldMemOperand(constructor,
   4546                               JSFunction::kPrototypeOrInitialMapOffset));
   4547 
   4548   // Retrieve elements_kind from map.
   4549   __ LoadElementsKindFromMap(kind, x10);
   4550 
   4551   if (FLAG_debug_code) {
   4552     Label done;
   4553     __ Cmp(x3, FAST_ELEMENTS);
   4554     __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
   4555     __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
   4556   }
   4557 
   4558   Label fast_elements_case;
   4559   __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
   4560   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
   4561 
   4562   __ Bind(&fast_elements_case);
   4563   GenerateCase(masm, FAST_ELEMENTS);
   4564 }
   4565 
   4566 
   4567 void FastNewObjectStub::Generate(MacroAssembler* masm) {
   4568   // ----------- S t a t e -------------
   4569   //  -- x1 : target
   4570   //  -- x3 : new target
   4571   //  -- cp : context
   4572   //  -- lr : return address
   4573   // -----------------------------------
   4574   __ AssertFunction(x1);
   4575   __ AssertReceiver(x3);
   4576 
   4577   // Verify that the new target is a JSFunction.
   4578   Label new_object;
   4579   __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
   4580 
   4581   // Load the initial map and verify that it's in fact a map.
   4582   __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
   4583   __ JumpIfSmi(x2, &new_object);
   4584   __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
   4585 
   4586   // Fall back to runtime if the target differs from the new target's
   4587   // initial map constructor.
   4588   __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
   4589   __ CompareAndBranch(x0, x1, ne, &new_object);
   4590 
   4591   // Allocate the JSObject on the heap.
   4592   Label allocate, done_allocate;
   4593   __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
   4594   __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
   4595   __ Bind(&done_allocate);
   4596 
   4597   // Initialize the JSObject fields.
   4598   STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
   4599   __ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
   4600   __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
   4601   STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
   4602   STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
   4603   __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
   4604   __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
   4605   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
   4606   __ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
   4607 
   4608   // ----------- S t a t e -------------
   4609   //  -- x0 : result (tagged)
   4610   //  -- x1 : result fields (untagged)
   4611   //  -- x5 : result end (untagged)
   4612   //  -- x2 : initial map
   4613   //  -- cp : context
   4614   //  -- lr : return address
   4615   // -----------------------------------
   4616 
   4617   // Perform in-object slack tracking if requested.
   4618   Label slack_tracking;
   4619   STATIC_ASSERT(Map::kNoSlackTracking == 0);
   4620   __ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
   4621   __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
   4622   __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
   4623                            &slack_tracking);
   4624   {
   4625     // Initialize all in-object fields with undefined.
   4626     __ InitializeFieldsWithFiller(x1, x5, x6);
   4627     __ Ret();
   4628   }
   4629   __ Bind(&slack_tracking);
   4630   {
   4631     // Decrease generous allocation count.
   4632     STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
   4633     __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
   4634     __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
   4635 
   4636     // Initialize the in-object fields with undefined.
   4637     __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
   4638     __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
   4639     __ InitializeFieldsWithFiller(x1, x4, x6);
   4640 
   4641     // Initialize the remaining (reserved) fields with one pointer filler map.
   4642     __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
   4643     __ InitializeFieldsWithFiller(x1, x5, x6);
   4644 
   4645     // Check if we can finalize the instance size.
   4646     Label finalize;
   4647     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
   4648     __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
   4649     __ Ret();
   4650 
   4651     // Finalize the instance size.
   4652     __ Bind(&finalize);
   4653     {
   4654       FrameScope scope(masm, StackFrame::INTERNAL);
   4655       __ Push(x0, x2);
   4656       __ CallRuntime(Runtime::kFinalizeInstanceSize);
   4657       __ Pop(x0);
   4658     }
   4659     __ Ret();
   4660   }
   4661 
   4662   // Fall back to %AllocateInNewSpace.
   4663   __ Bind(&allocate);
   4664   {
   4665     FrameScope scope(masm, StackFrame::INTERNAL);
   4666     STATIC_ASSERT(kSmiTag == 0);
   4667     STATIC_ASSERT(kSmiTagSize == 1);
   4668     __ Mov(x4,
   4669            Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
   4670     __ Push(x2, x4);
   4671     __ CallRuntime(Runtime::kAllocateInNewSpace);
   4672     __ Pop(x2);
   4673   }
   4674   __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
   4675   __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
   4676   STATIC_ASSERT(kHeapObjectTag == 1);
   4677   __ Sub(x5, x5, kHeapObjectTag);  // Subtract the tag from end.
   4678   __ B(&done_allocate);
   4679 
   4680   // Fall back to %NewObject.
   4681   __ Bind(&new_object);
   4682   __ Push(x1, x3);
   4683   __ TailCallRuntime(Runtime::kNewObject);
   4684 }
   4685 
   4686 
   4687 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
   4688   // ----------- S t a t e -------------
   4689   //  -- x1 : function
   4690   //  -- cp : context
   4691   //  -- fp : frame pointer
   4692   //  -- lr : return address
   4693   // -----------------------------------
   4694   __ AssertFunction(x1);
   4695 
   4696   // Make x2 point to the JavaScript frame.
   4697   __ Mov(x2, fp);
   4698   if (skip_stub_frame()) {
   4699     // For Ignition we need to skip the handler/stub frame to reach the
   4700     // JavaScript frame for the function.
   4701     __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
   4702   }
   4703   if (FLAG_debug_code) {
   4704     Label ok;
   4705     __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
   4706     __ Cmp(x3, x1);
   4707     __ B(eq, &ok);
   4708     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
   4709     __ Bind(&ok);
   4710   }
   4711 
   4712   // Check if we have rest parameters (only possible if we have an
   4713   // arguments adaptor frame below the function frame).
   4714   Label no_rest_parameters;
   4715   __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
   4716   __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
   4717   __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   4718   __ B(ne, &no_rest_parameters);
   4719 
   4720   // Check if the arguments adaptor frame contains more arguments than
   4721   // specified by the function's internal formal parameter count.
   4722   Label rest_parameters;
   4723   __ Ldrsw(x0, UntagSmiMemOperand(
   4724                    x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
   4725   __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   4726   __ Ldrsw(
   4727       x3, FieldMemOperand(x3, SharedFunctionInfo::kFormalParameterCountOffset));
   4728   __ Subs(x0, x0, x3);
   4729   __ B(gt, &rest_parameters);
   4730 
   4731   // Return an empty rest parameter array.
   4732   __ Bind(&no_rest_parameters);
   4733   {
   4734     // ----------- S t a t e -------------
   4735     //  -- cp : context
   4736     //  -- lr : return address
   4737     // -----------------------------------
   4738 
   4739     // Allocate an empty rest parameter array.
   4740     Label allocate, done_allocate;
   4741     __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, NO_ALLOCATION_FLAGS);
   4742     __ Bind(&done_allocate);
   4743 
   4744     // Setup the rest parameter array in x0.
   4745     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
   4746     __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
   4747     __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
   4748     __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
   4749     __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
   4750     __ Mov(x1, Smi::FromInt(0));
   4751     __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
   4752     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
   4753     __ Ret();
   4754 
   4755     // Fall back to %AllocateInNewSpace.
   4756     __ Bind(&allocate);
   4757     {
   4758       FrameScope scope(masm, StackFrame::INTERNAL);
   4759       __ Push(Smi::FromInt(JSArray::kSize));
   4760       __ CallRuntime(Runtime::kAllocateInNewSpace);
   4761     }
   4762     __ B(&done_allocate);
   4763   }
   4764 
   4765   __ Bind(&rest_parameters);
   4766   {
   4767     // Compute the pointer to the first rest parameter (skippping the receiver).
   4768     __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
   4769     __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
   4770 
   4771     // ----------- S t a t e -------------
   4772     //  -- cp : context
   4773     //  -- x0 : number of rest parameters
   4774     //  -- x1 : function
   4775     //  -- x2 : pointer to first rest parameters
   4776     //  -- lr : return address
   4777     // -----------------------------------
   4778 
   4779     // Allocate space for the rest parameter array plus the backing store.
   4780     Label allocate, done_allocate;
   4781     __ Mov(x6, JSArray::kSize + FixedArray::kHeaderSize);
   4782     __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
   4783     __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
   4784     __ Bind(&done_allocate);
   4785 
   4786     // Compute arguments.length in x6.
   4787     __ SmiTag(x6, x0);
   4788 
   4789     // Setup the elements array in x3.
   4790     __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
   4791     __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
   4792     __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
   4793     __ Add(x4, x3, FixedArray::kHeaderSize);
   4794     {
   4795       Label loop, done_loop;
   4796       __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
   4797       __ Bind(&loop);
   4798       __ Cmp(x4, x0);
   4799       __ B(eq, &done_loop);
   4800       __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
   4801       __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
   4802       __ Sub(x2, x2, Operand(1 * kPointerSize));
   4803       __ Add(x4, x4, Operand(1 * kPointerSize));
   4804       __ B(&loop);
   4805       __ Bind(&done_loop);
   4806     }
   4807 
   4808     // Setup the rest parameter array in x0.
   4809     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
   4810     __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
   4811     __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
   4812     __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
   4813     __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset));
   4814     __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset));
   4815     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
   4816     __ Ret();
   4817 
   4818     // Fall back to %AllocateInNewSpace (if not too big).
   4819     Label too_big_for_new_space;
   4820     __ Bind(&allocate);
   4821     __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
   4822     __ B(gt, &too_big_for_new_space);
   4823     {
   4824       FrameScope scope(masm, StackFrame::INTERNAL);
   4825       __ SmiTag(x0);
   4826       __ SmiTag(x6);
   4827       __ Push(x0, x2, x6);
   4828       __ CallRuntime(Runtime::kAllocateInNewSpace);
   4829       __ Mov(x3, x0);
   4830       __ Pop(x2, x0);
   4831       __ SmiUntag(x0);
   4832     }
   4833     __ B(&done_allocate);
   4834 
   4835     // Fall back to %NewRestParameter.
   4836     __ Bind(&too_big_for_new_space);
   4837     __ Push(x1);
   4838     __ TailCallRuntime(Runtime::kNewRestParameter);
   4839   }
   4840 }
   4841 
   4842 
   4843 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
   4844   // ----------- S t a t e -------------
   4845   //  -- x1 : function
   4846   //  -- cp : context
   4847   //  -- fp : frame pointer
   4848   //  -- lr : return address
   4849   // -----------------------------------
   4850   __ AssertFunction(x1);
   4851 
   4852   // Make x6 point to the JavaScript frame.
   4853   __ Mov(x6, fp);
   4854   if (skip_stub_frame()) {
   4855     // For Ignition we need to skip the handler/stub frame to reach the
   4856     // JavaScript frame for the function.
   4857     __ Ldr(x6, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
   4858   }
   4859   if (FLAG_debug_code) {
   4860     Label ok;
   4861     __ Ldr(x3, MemOperand(x6, StandardFrameConstants::kFunctionOffset));
   4862     __ Cmp(x3, x1);
   4863     __ B(eq, &ok);
   4864     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
   4865     __ Bind(&ok);
   4866   }
   4867 
   4868   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   4869   __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   4870   __ Ldrsw(
   4871       x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
   4872   __ Add(x3, x6, Operand(x2, LSL, kPointerSizeLog2));
   4873   __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
   4874   __ SmiTag(x2);
   4875 
   4876   // x1 : function
   4877   // x2 : number of parameters (tagged)
   4878   // x3 : parameters pointer
   4879   // x6 : JavaScript frame pointer
   4880   //
   4881   // Returns pointer to result object in x0.
   4882 
   4883   // Make an untagged copy of the parameter count.
   4884   // Note: arg_count_smi is an alias of param_count_smi.
   4885   Register function = x1;
   4886   Register arg_count_smi = x2;
   4887   Register param_count_smi = x2;
   4888   Register recv_arg = x3;
   4889   Register param_count = x7;
   4890   __ SmiUntag(param_count, param_count_smi);
   4891 
   4892   // Check if the calling frame is an arguments adaptor frame.
   4893   Register caller_fp = x11;
   4894   Register caller_ctx = x12;
   4895   Label runtime;
   4896   Label adaptor_frame, try_allocate;
   4897   __ Ldr(caller_fp, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
   4898   __ Ldr(
   4899       caller_ctx,
   4900       MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
   4901   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   4902   __ B(eq, &adaptor_frame);
   4903 
   4904   // No adaptor, parameter count = argument count.
   4905 
   4906   //   x1   function      function pointer
   4907   //   x2   arg_count_smi number of function arguments (smi)
   4908   //   x3   recv_arg      pointer to receiver arguments
   4909   //   x4   mapped_params number of mapped params, min(params, args) (uninit)
   4910   //   x7   param_count   number of function parameters
   4911   //   x11  caller_fp     caller's frame pointer
   4912   //   x14  arg_count     number of function arguments (uninit)
   4913 
   4914   Register arg_count = x14;
   4915   Register mapped_params = x4;
   4916   __ Mov(arg_count, param_count);
   4917   __ Mov(mapped_params, param_count);
   4918   __ B(&try_allocate);
   4919 
   4920   // We have an adaptor frame. Patch the parameters pointer.
   4921   __ Bind(&adaptor_frame);
   4922   __ Ldr(arg_count_smi,
   4923          MemOperand(caller_fp,
   4924                     ArgumentsAdaptorFrameConstants::kLengthOffset));
   4925   __ SmiUntag(arg_count, arg_count_smi);
   4926   __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
   4927   __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
   4928 
   4929   // Compute the mapped parameter count = min(param_count, arg_count)
   4930   __ Cmp(param_count, arg_count);
   4931   __ Csel(mapped_params, param_count, arg_count, lt);
   4932 
   4933   __ Bind(&try_allocate);
   4934 
   4935   //   x0   alloc_obj     pointer to allocated objects: param map, backing
   4936   //                      store, arguments (uninit)
   4937   //   x1   function      function pointer
   4938   //   x2   arg_count_smi number of function arguments (smi)
   4939   //   x3   recv_arg      pointer to receiver arguments
   4940   //   x4   mapped_params number of mapped parameters, min(params, args)
   4941   //   x7   param_count   number of function parameters
   4942   //   x10  size          size of objects to allocate (uninit)
   4943   //   x14  arg_count     number of function arguments
   4944 
   4945   // Compute the size of backing store, parameter map, and arguments object.
   4946   // 1. Parameter map, has two extra words containing context and backing
   4947   // store.
   4948   const int kParameterMapHeaderSize =
   4949       FixedArray::kHeaderSize + 2 * kPointerSize;
   4950 
   4951   // Calculate the parameter map size, assuming it exists.
   4952   Register size = x10;
   4953   __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
   4954   __ Add(size, size, kParameterMapHeaderSize);
   4955 
   4956   // If there are no mapped parameters, set the running size total to zero.
   4957   // Otherwise, use the parameter map size calculated earlier.
   4958   __ Cmp(mapped_params, 0);
   4959   __ CzeroX(size, eq);
   4960 
   4961   // 2. Add the size of the backing store and arguments object.
   4962   __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
   4963   __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize);
   4964 
   4965   // Do the allocation of all three objects in one go. Assign this to x0, as it
   4966   // will be returned to the caller.
   4967   Register alloc_obj = x0;
   4968   __ Allocate(size, alloc_obj, x11, x12, &runtime, NO_ALLOCATION_FLAGS);
   4969 
   4970   // Get the arguments boilerplate from the current (global) context.
   4971 
   4972   //   x0   alloc_obj       pointer to allocated objects (param map, backing
   4973   //                        store, arguments)
   4974   //   x1   function        function pointer
   4975   //   x2   arg_count_smi   number of function arguments (smi)
   4976   //   x3   recv_arg        pointer to receiver arguments
   4977   //   x4   mapped_params   number of mapped parameters, min(params, args)
   4978   //   x7   param_count     number of function parameters
   4979   //   x11  sloppy_args_map offset to args (or aliased args) map (uninit)
   4980   //   x14  arg_count       number of function arguments
   4981 
   4982   Register global_ctx = x10;
   4983   Register sloppy_args_map = x11;
   4984   Register aliased_args_map = x10;
   4985   __ Ldr(global_ctx, NativeContextMemOperand());
   4986 
   4987   __ Ldr(sloppy_args_map,
   4988          ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
   4989   __ Ldr(
   4990       aliased_args_map,
   4991       ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
   4992   __ Cmp(mapped_params, 0);
   4993   __ CmovX(sloppy_args_map, aliased_args_map, ne);
   4994 
   4995   // Copy the JS object part.
   4996   __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
   4997   __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
   4998   __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
   4999   __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
   5000 
   5001   // Set up the callee in-object property.
   5002   __ AssertNotSmi(function);
   5003   __ Str(function,
   5004          FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset));
   5005 
   5006   // Use the length and set that as an in-object property.
   5007   __ Str(arg_count_smi,
   5008          FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset));
   5009 
   5010   // Set up the elements pointer in the allocated arguments object.
   5011   // If we allocated a parameter map, "elements" will point there, otherwise
   5012   // it will point to the backing store.
   5013 
   5014   //   x0   alloc_obj     pointer to allocated objects (param map, backing
   5015   //                      store, arguments)
   5016   //   x1   function      function pointer
   5017   //   x2   arg_count_smi number of function arguments (smi)
   5018   //   x3   recv_arg      pointer to receiver arguments
   5019   //   x4   mapped_params number of mapped parameters, min(params, args)
   5020   //   x5   elements      pointer to parameter map or backing store (uninit)
   5021   //   x6   backing_store pointer to backing store (uninit)
   5022   //   x7   param_count   number of function parameters
   5023   //   x14  arg_count     number of function arguments
   5024 
   5025   Register elements = x5;
   5026   __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize);
   5027   __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
   5028 
   5029   // Initialize parameter map. If there are no mapped arguments, we're done.
   5030   Label skip_parameter_map;
   5031   __ Cmp(mapped_params, 0);
   5032   // Set up backing store address, because it is needed later for filling in
   5033   // the unmapped arguments.
   5034   Register backing_store = x6;
   5035   __ CmovX(backing_store, elements, eq);
   5036   __ B(eq, &skip_parameter_map);
   5037 
   5038   __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
   5039   __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
   5040   __ Add(x10, mapped_params, 2);
   5041   __ SmiTag(x10);
   5042   __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
   5043   __ Str(cp, FieldMemOperand(elements,
   5044                              FixedArray::kHeaderSize + 0 * kPointerSize));
   5045   __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
   5046   __ Add(x10, x10, kParameterMapHeaderSize);
   5047   __ Str(x10, FieldMemOperand(elements,
   5048                               FixedArray::kHeaderSize + 1 * kPointerSize));
   5049 
   5050   // Copy the parameter slots and the holes in the arguments.
   5051   // We need to fill in mapped_parameter_count slots. Then index the context,
   5052   // where parameters are stored in reverse order, at:
   5053   //
   5054   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
   5055   //
   5056   // The mapped parameter thus needs to get indices:
   5057   //
   5058   //   MIN_CONTEXT_SLOTS + parameter_count - 1 ..
   5059   //     MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
   5060   //
   5061   // We loop from right to left.
   5062 
   5063   //   x0   alloc_obj     pointer to allocated objects (param map, backing
   5064   //                      store, arguments)
   5065   //   x1   function      function pointer
   5066   //   x2   arg_count_smi number of function arguments (smi)
   5067   //   x3   recv_arg      pointer to receiver arguments
   5068   //   x4   mapped_params number of mapped parameters, min(params, args)
   5069   //   x5   elements      pointer to parameter map or backing store (uninit)
   5070   //   x6   backing_store pointer to backing store (uninit)
   5071   //   x7   param_count   number of function parameters
   5072   //   x11  loop_count    parameter loop counter (uninit)
   5073   //   x12  index         parameter index (smi, uninit)
   5074   //   x13  the_hole      hole value (uninit)
   5075   //   x14  arg_count     number of function arguments
   5076 
   5077   Register loop_count = x11;
   5078   Register index = x12;
   5079   Register the_hole = x13;
   5080   Label parameters_loop, parameters_test;
   5081   __ Mov(loop_count, mapped_params);
   5082   __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
   5083   __ Sub(index, index, mapped_params);
   5084   __ SmiTag(index);
   5085   __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
   5086   __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
   5087   __ Add(backing_store, backing_store, kParameterMapHeaderSize);
   5088 
   5089   __ B(&parameters_test);
   5090 
   5091   __ Bind(&parameters_loop);
   5092   __ Sub(loop_count, loop_count, 1);
   5093   __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
   5094   __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
   5095   __ Str(index, MemOperand(elements, x10));
   5096   __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
   5097   __ Str(the_hole, MemOperand(backing_store, x10));
   5098   __ Add(index, index, Smi::FromInt(1));
   5099   __ Bind(&parameters_test);
   5100   __ Cbnz(loop_count, &parameters_loop);
   5101 
   5102   __ Bind(&skip_parameter_map);
   5103   // Copy arguments header and remaining slots (if there are any.)
   5104   __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
   5105   __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
   5106   __ Str(arg_count_smi, FieldMemOperand(backing_store,
   5107                                         FixedArray::kLengthOffset));
   5108 
   5109   //   x0   alloc_obj     pointer to allocated objects (param map, backing
   5110   //                      store, arguments)
   5111   //   x1   function      function pointer
   5112   //   x2   arg_count_smi number of function arguments (smi)
   5113   //   x3   recv_arg      pointer to receiver arguments
   5114   //   x4   mapped_params number of mapped parameters, min(params, args)
   5115   //   x6   backing_store pointer to backing store (uninit)
   5116   //   x14  arg_count     number of function arguments
   5117 
   5118   Label arguments_loop, arguments_test;
   5119   __ Mov(x10, mapped_params);
   5120   __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
   5121   __ B(&arguments_test);
   5122 
   5123   __ Bind(&arguments_loop);
   5124   __ Sub(recv_arg, recv_arg, kPointerSize);
   5125   __ Ldr(x11, MemOperand(recv_arg));
   5126   __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
   5127   __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
   5128   __ Add(x10, x10, 1);
   5129 
   5130   __ Bind(&arguments_test);
   5131   __ Cmp(x10, arg_count);
   5132   __ B(lt, &arguments_loop);
   5133 
   5134   __ Ret();
   5135 
   5136   // Do the runtime call to allocate the arguments object.
   5137   __ Bind(&runtime);
   5138   __ Push(function, recv_arg, arg_count_smi);
   5139   __ TailCallRuntime(Runtime::kNewSloppyArguments);
   5140 }
   5141 
   5142 
   5143 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
   5144   // ----------- S t a t e -------------
   5145   //  -- x1 : function
   5146   //  -- cp : context
   5147   //  -- fp : frame pointer
   5148   //  -- lr : return address
   5149   // -----------------------------------
   5150   __ AssertFunction(x1);
   5151 
   5152   // Make x2 point to the JavaScript frame.
   5153   __ Mov(x2, fp);
   5154   if (skip_stub_frame()) {
   5155     // For Ignition we need to skip the handler/stub frame to reach the
   5156     // JavaScript frame for the function.
   5157     __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
   5158   }
   5159   if (FLAG_debug_code) {
   5160     Label ok;
   5161     __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
   5162     __ Cmp(x3, x1);
   5163     __ B(eq, &ok);
   5164     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
   5165     __ Bind(&ok);
   5166   }
   5167 
   5168   // Check if we have an arguments adaptor frame below the function frame.
   5169   Label arguments_adaptor, arguments_done;
   5170   __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
   5171   __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
   5172   __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   5173   __ B(eq, &arguments_adaptor);
   5174   {
   5175     __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   5176     __ Ldrsw(x0, FieldMemOperand(
   5177                      x4, SharedFunctionInfo::kFormalParameterCountOffset));
   5178     __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
   5179     __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
   5180   }
   5181   __ B(&arguments_done);
   5182   __ Bind(&arguments_adaptor);
   5183   {
   5184     __ Ldrsw(x0, UntagSmiMemOperand(
   5185                      x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
   5186     __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2));
   5187     __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
   5188   }
   5189   __ Bind(&arguments_done);
   5190 
   5191   // ----------- S t a t e -------------
   5192   //  -- cp : context
   5193   //  -- x0 : number of rest parameters
   5194   //  -- x1 : function
   5195   //  -- x2 : pointer to first rest parameters
   5196   //  -- lr : return address
   5197   // -----------------------------------
   5198 
   5199   // Allocate space for the strict arguments object plus the backing store.
   5200   Label allocate, done_allocate;
   5201   __ Mov(x6, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
   5202   __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
   5203   __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
   5204   __ Bind(&done_allocate);
   5205 
   5206   // Compute arguments.length in x6.
   5207   __ SmiTag(x6, x0);
   5208 
   5209   // Setup the elements array in x3.
   5210   __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
   5211   __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
   5212   __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
   5213   __ Add(x4, x3, FixedArray::kHeaderSize);
   5214   {
   5215     Label loop, done_loop;
   5216     __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
   5217     __ Bind(&loop);
   5218     __ Cmp(x4, x0);
   5219     __ B(eq, &done_loop);
   5220     __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
   5221     __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
   5222     __ Sub(x2, x2, Operand(1 * kPointerSize));
   5223     __ Add(x4, x4, Operand(1 * kPointerSize));
   5224     __ B(&loop);
   5225     __ Bind(&done_loop);
   5226   }
   5227 
   5228   // Setup the strict arguments object in x0.
   5229   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1);
   5230   __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset));
   5231   __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
   5232   __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset));
   5233   __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset));
   5234   __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset));
   5235   STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
   5236   __ Ret();
   5237 
   5238   // Fall back to %AllocateInNewSpace (if not too big).
   5239   Label too_big_for_new_space;
   5240   __ Bind(&allocate);
   5241   __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
   5242   __ B(gt, &too_big_for_new_space);
   5243   {
   5244     FrameScope scope(masm, StackFrame::INTERNAL);
   5245     __ SmiTag(x0);
   5246     __ SmiTag(x6);
   5247     __ Push(x0, x2, x6);
   5248     __ CallRuntime(Runtime::kAllocateInNewSpace);
   5249     __ Mov(x3, x0);
   5250     __ Pop(x2, x0);
   5251     __ SmiUntag(x0);
   5252   }
   5253   __ B(&done_allocate);
   5254 
   5255   // Fall back to %NewStrictArguments.
   5256   __ Bind(&too_big_for_new_space);
   5257   __ Push(x1);
   5258   __ TailCallRuntime(Runtime::kNewStrictArguments);
   5259 }
   5260 
   5261 
   5262 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
   5263   Register context = cp;
   5264   Register value = x0;
   5265   Register slot = x2;
   5266   Register context_temp = x10;
   5267   Register cell = x10;
   5268   Register cell_details = x11;
   5269   Register cell_value = x12;
   5270   Register cell_value_map = x13;
   5271   Register value_map = x14;
   5272   Label fast_heapobject_case, fast_smi_case, slow_case;
   5273 
   5274   if (FLAG_debug_code) {
   5275     __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
   5276     __ Check(ne, kUnexpectedValue);
   5277   }
   5278 
   5279   // Go up the context chain to the script context.
   5280   for (int i = 0; i < depth(); i++) {
   5281     __ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
   5282     context = context_temp;
   5283   }
   5284 
   5285   // Load the PropertyCell at the specified slot.
   5286   __ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
   5287   __ Ldr(cell, ContextMemOperand(cell));
   5288 
   5289   // Load PropertyDetails for the cell (actually only the cell_type and kind).
   5290   __ Ldr(cell_details,
   5291          UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
   5292   __ And(cell_details, cell_details,
   5293          PropertyDetails::PropertyCellTypeField::kMask |
   5294              PropertyDetails::KindField::kMask |
   5295              PropertyDetails::kAttributesReadOnlyMask);
   5296 
   5297   // Check if PropertyCell holds mutable data.
   5298   Label not_mutable_data;
   5299   __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
   5300                            PropertyCellType::kMutable) |
   5301                            PropertyDetails::KindField::encode(kData));
   5302   __ B(ne, &not_mutable_data);
   5303   __ JumpIfSmi(value, &fast_smi_case);
   5304   __ Bind(&fast_heapobject_case);
   5305   __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
   5306   // RecordWriteField clobbers the value register, so we copy it before the
   5307   // call.
   5308   __ Mov(x11, value);
   5309   __ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
   5310                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
   5311                       OMIT_SMI_CHECK);
   5312   __ Ret();
   5313 
   5314   __ Bind(&not_mutable_data);
   5315   // Check if PropertyCell value matches the new value (relevant for Constant,
   5316   // ConstantType and Undefined cells).
   5317   Label not_same_value;
   5318   __ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
   5319   __ Cmp(cell_value, value);
   5320   __ B(ne, &not_same_value);
   5321 
   5322   // Make sure the PropertyCell is not marked READ_ONLY.
   5323   __ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
   5324   __ B(ne, &slow_case);
   5325 
   5326   if (FLAG_debug_code) {
   5327     Label done;
   5328     // This can only be true for Constant, ConstantType and Undefined cells,
   5329     // because we never store the_hole via this stub.
   5330     __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
   5331                              PropertyCellType::kConstant) |
   5332                              PropertyDetails::KindField::encode(kData));
   5333     __ B(eq, &done);
   5334     __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
   5335                              PropertyCellType::kConstantType) |
   5336                              PropertyDetails::KindField::encode(kData));
   5337     __ B(eq, &done);
   5338     __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
   5339                              PropertyCellType::kUndefined) |
   5340                              PropertyDetails::KindField::encode(kData));
   5341     __ Check(eq, kUnexpectedValue);
   5342     __ Bind(&done);
   5343   }
   5344   __ Ret();
   5345   __ Bind(&not_same_value);
   5346 
   5347   // Check if PropertyCell contains data with constant type (and is not
   5348   // READ_ONLY).
   5349   __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
   5350                            PropertyCellType::kConstantType) |
   5351                            PropertyDetails::KindField::encode(kData));
   5352   __ B(ne, &slow_case);
   5353 
   5354   // Now either both old and new values must be smis or both must be heap
   5355   // objects with same map.
   5356   Label value_is_heap_object;
   5357   __ JumpIfNotSmi(value, &value_is_heap_object);
   5358   __ JumpIfNotSmi(cell_value, &slow_case);
   5359   // Old and new values are smis, no need for a write barrier here.
   5360   __ Bind(&fast_smi_case);
   5361   __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
   5362   __ Ret();
   5363 
   5364   __ Bind(&value_is_heap_object);
   5365   __ JumpIfSmi(cell_value, &slow_case);
   5366 
   5367   __ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
   5368   __ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
   5369   __ Cmp(cell_value_map, value_map);
   5370   __ B(eq, &fast_heapobject_case);
   5371 
   5372   // Fall back to the runtime.
   5373   __ Bind(&slow_case);
   5374   __ SmiTag(slot);
   5375   __ Push(slot, value);
   5376   __ TailCallRuntime(is_strict(language_mode())
   5377                          ? Runtime::kStoreGlobalViaContext_Strict
   5378                          : Runtime::kStoreGlobalViaContext_Sloppy);
   5379 }
   5380 
   5381 
   5382 // The number of register that CallApiFunctionAndReturn will need to save on
   5383 // the stack. The space for these registers need to be allocated in the
   5384 // ExitFrame before calling CallApiFunctionAndReturn.
   5385 static const int kCallApiFunctionSpillSpace = 4;
   5386 
   5387 
   5388 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   5389   return static_cast<int>(ref0.address() - ref1.address());
   5390 }
   5391 
   5392 
   5393 // Calls an API function. Allocates HandleScope, extracts returned value
   5394 // from handle and propagates exceptions.
   5395 // 'stack_space' is the space to be unwound on exit (includes the call JS
   5396 // arguments space and the additional space allocated for the fast call).
   5397 // 'spill_offset' is the offset from the stack pointer where
   5398 // CallApiFunctionAndReturn can spill registers.
   5399 static void CallApiFunctionAndReturn(
   5400     MacroAssembler* masm, Register function_address,
   5401     ExternalReference thunk_ref, int stack_space,
   5402     MemOperand* stack_space_operand, int spill_offset,
   5403     MemOperand return_value_operand, MemOperand* context_restore_operand) {
   5404   ASM_LOCATION("CallApiFunctionAndReturn");
   5405   Isolate* isolate = masm->isolate();
   5406   ExternalReference next_address =
   5407       ExternalReference::handle_scope_next_address(isolate);
   5408   const int kNextOffset = 0;
   5409   const int kLimitOffset = AddressOffset(
   5410       ExternalReference::handle_scope_limit_address(isolate), next_address);
   5411   const int kLevelOffset = AddressOffset(
   5412       ExternalReference::handle_scope_level_address(isolate), next_address);
   5413 
   5414   DCHECK(function_address.is(x1) || function_address.is(x2));
   5415 
   5416   Label profiler_disabled;
   5417   Label end_profiler_check;
   5418   __ Mov(x10, ExternalReference::is_profiling_address(isolate));
   5419   __ Ldrb(w10, MemOperand(x10));
   5420   __ Cbz(w10, &profiler_disabled);
   5421   __ Mov(x3, thunk_ref);
   5422   __ B(&end_profiler_check);
   5423 
   5424   __ Bind(&profiler_disabled);
   5425   __ Mov(x3, function_address);
   5426   __ Bind(&end_profiler_check);
   5427 
   5428   // Save the callee-save registers we are going to use.
   5429   // TODO(all): Is this necessary? ARM doesn't do it.
   5430   STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
   5431   __ Poke(x19, (spill_offset + 0) * kXRegSize);
   5432   __ Poke(x20, (spill_offset + 1) * kXRegSize);
   5433   __ Poke(x21, (spill_offset + 2) * kXRegSize);
   5434   __ Poke(x22, (spill_offset + 3) * kXRegSize);
   5435 
   5436   // Allocate HandleScope in callee-save registers.
   5437   // We will need to restore the HandleScope after the call to the API function,
   5438   // by allocating it in callee-save registers they will be preserved by C code.
   5439   Register handle_scope_base = x22;
   5440   Register next_address_reg = x19;
   5441   Register limit_reg = x20;
   5442   Register level_reg = w21;
   5443 
   5444   __ Mov(handle_scope_base, next_address);
   5445   __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
   5446   __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
   5447   __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
   5448   __ Add(level_reg, level_reg, 1);
   5449   __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
   5450 
   5451   if (FLAG_log_timer_events) {
   5452     FrameScope frame(masm, StackFrame::MANUAL);
   5453     __ PushSafepointRegisters();
   5454     __ Mov(x0, ExternalReference::isolate_address(isolate));
   5455     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
   5456                      1);
   5457     __ PopSafepointRegisters();
   5458   }
   5459 
   5460   // Native call returns to the DirectCEntry stub which redirects to the
   5461   // return address pushed on stack (could have moved after GC).
   5462   // DirectCEntry stub itself is generated early and never moves.
   5463   DirectCEntryStub stub(isolate);
   5464   stub.GenerateCall(masm, x3);
   5465 
   5466   if (FLAG_log_timer_events) {
   5467     FrameScope frame(masm, StackFrame::MANUAL);
   5468     __ PushSafepointRegisters();
   5469     __ Mov(x0, ExternalReference::isolate_address(isolate));
   5470     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
   5471                      1);
   5472     __ PopSafepointRegisters();
   5473   }
   5474 
   5475   Label promote_scheduled_exception;
   5476   Label delete_allocated_handles;
   5477   Label leave_exit_frame;
   5478   Label return_value_loaded;
   5479 
   5480   // Load value from ReturnValue.
   5481   __ Ldr(x0, return_value_operand);
   5482   __ Bind(&return_value_loaded);
   5483   // No more valid handles (the result handle was the last one). Restore
   5484   // previous handle scope.
   5485   __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
   5486   if (__ emit_debug_code()) {
   5487     __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
   5488     __ Cmp(w1, level_reg);
   5489     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
   5490   }
   5491   __ Sub(level_reg, level_reg, 1);
   5492   __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
   5493   __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
   5494   __ Cmp(limit_reg, x1);
   5495   __ B(ne, &delete_allocated_handles);
   5496 
   5497   // Leave the API exit frame.
   5498   __ Bind(&leave_exit_frame);
   5499   // Restore callee-saved registers.
   5500   __ Peek(x19, (spill_offset + 0) * kXRegSize);
   5501   __ Peek(x20, (spill_offset + 1) * kXRegSize);
   5502   __ Peek(x21, (spill_offset + 2) * kXRegSize);
   5503   __ Peek(x22, (spill_offset + 3) * kXRegSize);
   5504 
   5505   bool restore_context = context_restore_operand != NULL;
   5506   if (restore_context) {
   5507     __ Ldr(cp, *context_restore_operand);
   5508   }
   5509 
   5510   if (stack_space_operand != NULL) {
   5511     __ Ldr(w2, *stack_space_operand);
   5512   }
   5513 
   5514   __ LeaveExitFrame(false, x1, !restore_context);
   5515 
   5516   // Check if the function scheduled an exception.
   5517   __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
   5518   __ Ldr(x5, MemOperand(x5));
   5519   __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
   5520                    &promote_scheduled_exception);
   5521 
   5522   if (stack_space_operand != NULL) {
   5523     __ Drop(x2, 1);
   5524   } else {
   5525     __ Drop(stack_space);
   5526   }
   5527   __ Ret();
   5528 
   5529   // Re-throw by promoting a scheduled exception.
   5530   __ Bind(&promote_scheduled_exception);
   5531   __ TailCallRuntime(Runtime::kPromoteScheduledException);
   5532 
   5533   // HandleScope limit has changed. Delete allocated extensions.
   5534   __ Bind(&delete_allocated_handles);
   5535   __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
   5536   // Save the return value in a callee-save register.
   5537   Register saved_result = x19;
   5538   __ Mov(saved_result, x0);
   5539   __ Mov(x0, ExternalReference::isolate_address(isolate));
   5540   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
   5541                    1);
   5542   __ Mov(x0, saved_result);
   5543   __ B(&leave_exit_frame);
   5544 }
   5545 
   5546 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   5547   // ----------- S t a t e -------------
   5548   //  -- x0                  : callee
   5549   //  -- x4                  : call_data
   5550   //  -- x2                  : holder
   5551   //  -- x1                  : api_function_address
   5552   //  -- cp                  : context
   5553   //  --
   5554   //  -- sp[0]               : last argument
   5555   //  -- ...
   5556   //  -- sp[(argc - 1) * 8]  : first argument
   5557   //  -- sp[argc * 8]        : receiver
   5558   // -----------------------------------
   5559 
   5560   Register callee = x0;
   5561   Register call_data = x4;
   5562   Register holder = x2;
   5563   Register api_function_address = x1;
   5564   Register context = cp;
   5565 
   5566   typedef FunctionCallbackArguments FCA;
   5567 
   5568   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
   5569   STATIC_ASSERT(FCA::kCalleeIndex == 5);
   5570   STATIC_ASSERT(FCA::kDataIndex == 4);
   5571   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
   5572   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   5573   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   5574   STATIC_ASSERT(FCA::kHolderIndex == 0);
   5575   STATIC_ASSERT(FCA::kNewTargetIndex == 7);
   5576   STATIC_ASSERT(FCA::kArgsLength == 8);
   5577 
   5578   // FunctionCallbackArguments
   5579 
   5580   // new target
   5581   __ PushRoot(Heap::kUndefinedValueRootIndex);
   5582 
   5583   // context, callee and call data.
   5584   __ Push(context, callee, call_data);
   5585 
   5586   if (!is_lazy()) {
   5587     // Load context from callee
   5588     __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   5589   }
   5590 
   5591   if (!call_data_undefined()) {
   5592     __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
   5593   }
   5594   Register isolate_reg = x5;
   5595   __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
   5596 
   5597   // FunctionCallbackArguments:
   5598   //    return value, return value default, isolate, holder.
   5599   __ Push(call_data, call_data, isolate_reg, holder);
   5600 
   5601   // Prepare arguments.
   5602   Register args = x6;
   5603   __ Mov(args, masm->StackPointer());
   5604 
   5605   // Allocate the v8::Arguments structure in the arguments' space, since it's
   5606   // not controlled by GC.
   5607   const int kApiStackSpace = 3;
   5608 
   5609   // Allocate space for CallApiFunctionAndReturn can store some scratch
   5610   // registeres on the stack.
   5611   const int kCallApiFunctionSpillSpace = 4;
   5612 
   5613   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5614   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
   5615 
   5616   DCHECK(!AreAliased(x0, api_function_address));
   5617   // x0 = FunctionCallbackInfo&
   5618   // Arguments is after the return address.
   5619   __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
   5620   // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
   5621   __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
   5622   __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
   5623   // FunctionCallbackInfo::length_ = argc
   5624   __ Mov(x10, argc());
   5625   __ Str(x10, MemOperand(x0, 2 * kPointerSize));
   5626 
   5627   ExternalReference thunk_ref =
   5628       ExternalReference::invoke_function_callback(masm->isolate());
   5629 
   5630   AllowExternalCallThatCantCauseGC scope(masm);
   5631   MemOperand context_restore_operand(
   5632       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   5633   // Stores return the first js argument
   5634   int return_value_offset = 0;
   5635   if (is_store()) {
   5636     return_value_offset = 2 + FCA::kArgsLength;
   5637   } else {
   5638     return_value_offset = 2 + FCA::kReturnValueOffset;
   5639   }
   5640   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   5641   int stack_space = 0;
   5642   MemOperand length_operand =
   5643       MemOperand(masm->StackPointer(), 3 * kPointerSize);
   5644   MemOperand* stack_space_operand = &length_operand;
   5645   stack_space = argc() + FCA::kArgsLength + 1;
   5646   stack_space_operand = NULL;
   5647 
   5648   const int spill_offset = 1 + kApiStackSpace;
   5649   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
   5650                            stack_space_operand, spill_offset,
   5651                            return_value_operand, &context_restore_operand);
   5652 }
   5653 
   5654 
   5655 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   5656   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
   5657   // name below the exit frame to make GC aware of them.
   5658   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
   5659   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
   5660   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
   5661   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
   5662   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
   5663   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
   5664   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
   5665   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
   5666 
   5667   Register receiver = ApiGetterDescriptor::ReceiverRegister();
   5668   Register holder = ApiGetterDescriptor::HolderRegister();
   5669   Register callback = ApiGetterDescriptor::CallbackRegister();
   5670   Register scratch = x4;
   5671   Register scratch2 = x5;
   5672   Register scratch3 = x6;
   5673   DCHECK(!AreAliased(receiver, holder, callback, scratch));
   5674 
   5675   __ Push(receiver);
   5676 
   5677   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   5678   __ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
   5679   __ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
   5680   __ Push(scratch3, scratch, scratch, scratch2, holder);
   5681   __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
   5682   __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
   5683   __ Push(scratch);
   5684 
   5685   // v8::PropertyCallbackInfo::args_ array and name handle.
   5686   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
   5687 
   5688   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
   5689   __ Mov(x0, masm->StackPointer());  // x0 = Handle<Name>
   5690   __ Add(x1, x0, 1 * kPointerSize);  // x1 = v8::PCI::args_
   5691 
   5692   const int kApiStackSpace = 1;
   5693 
   5694   // Allocate space for CallApiFunctionAndReturn can store some scratch
   5695   // registeres on the stack.
   5696   const int kCallApiFunctionSpillSpace = 4;
   5697 
   5698   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5699   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
   5700 
   5701   // Create v8::PropertyCallbackInfo object on the stack and initialize
   5702   // it's args_ field.
   5703   __ Poke(x1, 1 * kPointerSize);
   5704   __ Add(x1, masm->StackPointer(), 1 * kPointerSize);
   5705   // x1 = v8::PropertyCallbackInfo&
   5706 
   5707   ExternalReference thunk_ref =
   5708       ExternalReference::invoke_accessor_getter_callback(isolate());
   5709 
   5710   Register api_function_address = x2;
   5711   __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
   5712   __ Ldr(api_function_address,
   5713          FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
   5714 
   5715   const int spill_offset = 1 + kApiStackSpace;
   5716   // +3 is to skip prolog, return address and name handle.
   5717   MemOperand return_value_operand(
   5718       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   5719   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
   5720                            kStackUnwindSpace, NULL, spill_offset,
   5721                            return_value_operand, NULL);
   5722 }
   5723 
   5724 #undef __
   5725 
   5726 }  // namespace internal
   5727 }  // namespace v8
   5728 
   5729 #endif  // V8_TARGET_ARCH_ARM64
   5730