Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #if V8_TARGET_ARCH_ARM64
      8 
      9 #include "src/bootstrapper.h"
     10 #include "src/code-stubs.h"
     11 #include "src/regexp-macro-assembler.h"
     12 #include "src/stub-cache.h"
     13 
     14 namespace v8 {
     15 namespace internal {
     16 
     17 
     18 void FastNewClosureStub::InitializeInterfaceDescriptor(
     19     CodeStubInterfaceDescriptor* descriptor) {
     20   // x2: function info
     21   static Register registers[] = { x2 };
     22   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
     23   descriptor->register_params_ = registers;
     24   descriptor->deoptimization_handler_ =
     25       Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
     26 }
     27 
     28 
     29 void FastNewContextStub::InitializeInterfaceDescriptor(
     30     CodeStubInterfaceDescriptor* descriptor) {
     31   // x1: function
     32   static Register registers[] = { x1 };
     33   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
     34   descriptor->register_params_ = registers;
     35   descriptor->deoptimization_handler_ = NULL;
     36 }
     37 
     38 
     39 void ToNumberStub::InitializeInterfaceDescriptor(
     40     CodeStubInterfaceDescriptor* descriptor) {
     41   // x0: value
     42   static Register registers[] = { x0 };
     43   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
     44   descriptor->register_params_ = registers;
     45   descriptor->deoptimization_handler_ = NULL;
     46 }
     47 
     48 
     49 void NumberToStringStub::InitializeInterfaceDescriptor(
     50     CodeStubInterfaceDescriptor* descriptor) {
     51   // x0: value
     52   static Register registers[] = { x0 };
     53   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
     54   descriptor->register_params_ = registers;
     55   descriptor->deoptimization_handler_ =
     56       Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
     57 }
     58 
     59 
     60 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     61     CodeStubInterfaceDescriptor* descriptor) {
     62   // x3: array literals array
     63   // x2: array literal index
     64   // x1: constant elements
     65   static Register registers[] = { x3, x2, x1 };
     66   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
     67   descriptor->register_params_ = registers;
     68   static Representation representations[] = {
     69     Representation::Tagged(),
     70     Representation::Smi(),
     71     Representation::Tagged() };
     72   descriptor->register_param_representations_ = representations;
     73   descriptor->deoptimization_handler_ =
     74       Runtime::FunctionForId(
     75           Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
     76 }
     77 
     78 
     79 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
     80     CodeStubInterfaceDescriptor* descriptor) {
     81   // x3: object literals array
     82   // x2: object literal index
     83   // x1: constant properties
     84   // x0: object literal flags
     85   static Register registers[] = { x3, x2, x1, x0 };
     86   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
     87   descriptor->register_params_ = registers;
     88   descriptor->deoptimization_handler_ =
     89       Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
     90 }
     91 
     92 
     93 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
     94     CodeStubInterfaceDescriptor* descriptor) {
     95   // x2: feedback vector
     96   // x3: call feedback slot
     97   static Register registers[] = { x2, x3 };
     98   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
     99   descriptor->register_params_ = registers;
    100   descriptor->deoptimization_handler_ = NULL;
    101 }
    102 
    103 
    104 void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
    105     CodeStubInterfaceDescriptor* descriptor) {
    106   static Register registers[] = { x1, x0 };
    107   descriptor->register_param_count_ = 2;
    108   descriptor->register_params_ = registers;
    109   descriptor->deoptimization_handler_ =
    110       Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
    111 }
    112 
    113 
    114 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
    115     CodeStubInterfaceDescriptor* descriptor) {
    116   // x1: receiver
    117   // x0: key
    118   static Register registers[] = { x1, x0 };
    119   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    120   descriptor->register_params_ = registers;
    121   descriptor->deoptimization_handler_ =
    122       FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
    123 }
    124 
    125 
    126 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
    127     CodeStubInterfaceDescriptor* descriptor) {
    128   // x1: receiver
    129   // x0: key
    130   static Register registers[] = { x1, x0 };
    131   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    132   descriptor->register_params_ = registers;
    133   descriptor->deoptimization_handler_ =
    134       FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
    135 }
    136 
    137 
    138 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
    139     CodeStubInterfaceDescriptor* descriptor) {
    140   // x2: length
    141   // x1: index (of last match)
    142   // x0: string
    143   static Register registers[] = { x2, x1, x0 };
    144   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    145   descriptor->register_params_ = registers;
    146   descriptor->deoptimization_handler_ =
    147       Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
    148 }
    149 
    150 
    151 void LoadFieldStub::InitializeInterfaceDescriptor(
    152     CodeStubInterfaceDescriptor* descriptor) {
    153   // x0: receiver
    154   static Register registers[] = { x0 };
    155   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    156   descriptor->register_params_ = registers;
    157   descriptor->deoptimization_handler_ = NULL;
    158 }
    159 
    160 
    161 void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
    162     CodeStubInterfaceDescriptor* descriptor) {
    163   // x1: receiver
    164   static Register registers[] = { x1 };
    165   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    166   descriptor->register_params_ = registers;
    167   descriptor->deoptimization_handler_ = NULL;
    168 }
    169 
    170 
    171 void StringLengthStub::InitializeInterfaceDescriptor(
    172     CodeStubInterfaceDescriptor* descriptor) {
    173   static Register registers[] = { x0, x2 };
    174   descriptor->register_param_count_ = 2;
    175   descriptor->register_params_ = registers;
    176   descriptor->deoptimization_handler_ = NULL;
    177 }
    178 
    179 
    180 void KeyedStringLengthStub::InitializeInterfaceDescriptor(
    181     CodeStubInterfaceDescriptor* descriptor) {
    182   static Register registers[] = { x1, x0 };
    183   descriptor->register_param_count_ = 2;
    184   descriptor->register_params_ = registers;
    185   descriptor->deoptimization_handler_ = NULL;
    186 }
    187 
    188 
    189 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
    190     CodeStubInterfaceDescriptor* descriptor) {
    191   // x2: receiver
    192   // x1: key
    193   // x0: value
    194   static Register registers[] = { x2, x1, x0 };
    195   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    196   descriptor->register_params_ = registers;
    197   descriptor->deoptimization_handler_ =
    198       FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
    199 }
    200 
    201 
    202 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
    203     CodeStubInterfaceDescriptor* descriptor) {
    204   // x0: value (js_array)
    205   // x1: to_map
    206   static Register registers[] = { x0, x1 };
    207   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    208   descriptor->register_params_ = registers;
    209   Address entry =
    210       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
    211   descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
    212 }
    213 
    214 
    215 void CompareNilICStub::InitializeInterfaceDescriptor(
    216     CodeStubInterfaceDescriptor* descriptor) {
    217   // x0: value to compare
    218   static Register registers[] = { x0 };
    219   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    220   descriptor->register_params_ = registers;
    221   descriptor->deoptimization_handler_ =
    222       FUNCTION_ADDR(CompareNilIC_Miss);
    223   descriptor->SetMissHandler(
    224       ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
    225 }
    226 
    227 
    228 static void InitializeArrayConstructorDescriptor(
    229     CodeStubInterfaceDescriptor* descriptor,
    230     int constant_stack_parameter_count) {
    231   // x1: function
    232   // x2: allocation site with elements kind
    233   // x0: number of arguments to the constructor function
    234   static Register registers_variable_args[] = { x1, x2, x0 };
    235   static Register registers_no_args[] = { x1, x2 };
    236 
    237   if (constant_stack_parameter_count == 0) {
    238     descriptor->register_param_count_ =
    239         sizeof(registers_no_args) / sizeof(registers_no_args[0]);
    240     descriptor->register_params_ = registers_no_args;
    241   } else {
    242     // stack param count needs (constructor pointer, and single argument)
    243     descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
    244     descriptor->stack_parameter_count_ = x0;
    245     descriptor->register_param_count_ =
    246         sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
    247     descriptor->register_params_ = registers_variable_args;
    248     static Representation representations[] = {
    249         Representation::Tagged(),
    250         Representation::Tagged(),
    251         Representation::Integer32() };
    252     descriptor->register_param_representations_ = representations;
    253   }
    254 
    255   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
    256   descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
    257   descriptor->deoptimization_handler_ =
    258       Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
    259 }
    260 
    261 
    262 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
    263     CodeStubInterfaceDescriptor* descriptor) {
    264   InitializeArrayConstructorDescriptor(descriptor, 0);
    265 }
    266 
    267 
    268 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
    269     CodeStubInterfaceDescriptor* descriptor) {
    270   InitializeArrayConstructorDescriptor(descriptor, 1);
    271 }
    272 
    273 
    274 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
    275     CodeStubInterfaceDescriptor* descriptor) {
    276   InitializeArrayConstructorDescriptor(descriptor, -1);
    277 }
    278 
    279 
    280 static void InitializeInternalArrayConstructorDescriptor(
    281     CodeStubInterfaceDescriptor* descriptor,
    282     int constant_stack_parameter_count) {
    283   // x1: constructor function
    284   // x0: number of arguments to the constructor function
    285   static Register registers_variable_args[] = { x1, x0 };
    286   static Register registers_no_args[] = { x1 };
    287 
    288   if (constant_stack_parameter_count == 0) {
    289     descriptor->register_param_count_ =
    290         sizeof(registers_no_args) / sizeof(registers_no_args[0]);
    291     descriptor->register_params_ = registers_no_args;
    292   } else {
    293     // stack param count needs (constructor pointer, and single argument)
    294     descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
    295     descriptor->stack_parameter_count_ = x0;
    296     descriptor->register_param_count_ =
    297         sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
    298     descriptor->register_params_ = registers_variable_args;
    299     static Representation representations[] = {
    300         Representation::Tagged(),
    301         Representation::Integer32() };
    302     descriptor->register_param_representations_ = representations;
    303   }
    304 
    305   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
    306   descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
    307   descriptor->deoptimization_handler_ =
    308       Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
    309 }
    310 
    311 
    312 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
    313     CodeStubInterfaceDescriptor* descriptor) {
    314   InitializeInternalArrayConstructorDescriptor(descriptor, 0);
    315 }
    316 
    317 
    318 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
    319     CodeStubInterfaceDescriptor* descriptor) {
    320   InitializeInternalArrayConstructorDescriptor(descriptor, 1);
    321 }
    322 
    323 
    324 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
    325     CodeStubInterfaceDescriptor* descriptor) {
    326   InitializeInternalArrayConstructorDescriptor(descriptor, -1);
    327 }
    328 
    329 
    330 void ToBooleanStub::InitializeInterfaceDescriptor(
    331     CodeStubInterfaceDescriptor* descriptor) {
    332   // x0: value
    333   static Register registers[] = { x0 };
    334   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    335   descriptor->register_params_ = registers;
    336   descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
    337   descriptor->SetMissHandler(
    338       ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
    339 }
    340 
    341 
    342 void StoreGlobalStub::InitializeInterfaceDescriptor(
    343     CodeStubInterfaceDescriptor* descriptor) {
    344   // x1: receiver
    345   // x2: key (unused)
    346   // x0: value
    347   static Register registers[] = { x1, x2, x0 };
    348   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    349   descriptor->register_params_ = registers;
    350   descriptor->deoptimization_handler_ =
    351       FUNCTION_ADDR(StoreIC_MissFromStubFailure);
    352 }
    353 
    354 
    355 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
    356     CodeStubInterfaceDescriptor* descriptor) {
    357   // x0: value
    358   // x3: target map
    359   // x1: key
    360   // x2: receiver
    361   static Register registers[] = { x0, x3, x1, x2 };
    362   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    363   descriptor->register_params_ = registers;
    364   descriptor->deoptimization_handler_ =
    365       FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
    366 }
    367 
    368 
    369 void BinaryOpICStub::InitializeInterfaceDescriptor(
    370     CodeStubInterfaceDescriptor* descriptor) {
    371   // x1: left operand
    372   // x0: right operand
    373   static Register registers[] = { x1, x0 };
    374   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    375   descriptor->register_params_ = registers;
    376   descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
    377   descriptor->SetMissHandler(
    378       ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
    379 }
    380 
    381 
    382 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
    383     CodeStubInterfaceDescriptor* descriptor) {
    384   // x2: allocation site
    385   // x1: left operand
    386   // x0: right operand
    387   static Register registers[] = { x2, x1, x0 };
    388   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    389   descriptor->register_params_ = registers;
    390   descriptor->deoptimization_handler_ =
    391       FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
    392 }
    393 
    394 
    395 void StringAddStub::InitializeInterfaceDescriptor(
    396     CodeStubInterfaceDescriptor* descriptor) {
    397   // x1: left operand
    398   // x0: right operand
    399   static Register registers[] = { x1, x0 };
    400   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
    401   descriptor->register_params_ = registers;
    402   descriptor->deoptimization_handler_ =
    403       Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
    404 }
    405 
    406 
    407 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
    408   static PlatformCallInterfaceDescriptor default_descriptor =
    409       PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
    410 
    411   static PlatformCallInterfaceDescriptor noInlineDescriptor =
    412       PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
    413 
    414   {
    415     CallInterfaceDescriptor* descriptor =
    416         isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
    417     static Register registers[] = { x1,  // JSFunction
    418                                     cp,  // context
    419                                     x0,  // actual number of arguments
    420                                     x2,  // expected number of arguments
    421     };
    422     static Representation representations[] = {
    423         Representation::Tagged(),     // JSFunction
    424         Representation::Tagged(),     // context
    425         Representation::Integer32(),  // actual number of arguments
    426         Representation::Integer32(),  // expected number of arguments
    427     };
    428     descriptor->register_param_count_ = 4;
    429     descriptor->register_params_ = registers;
    430     descriptor->param_representations_ = representations;
    431     descriptor->platform_specific_descriptor_ = &default_descriptor;
    432   }
    433   {
    434     CallInterfaceDescriptor* descriptor =
    435         isolate->call_descriptor(Isolate::KeyedCall);
    436     static Register registers[] = { cp,  // context
    437                                     x2,  // key
    438     };
    439     static Representation representations[] = {
    440         Representation::Tagged(),     // context
    441         Representation::Tagged(),     // key
    442     };
    443     descriptor->register_param_count_ = 2;
    444     descriptor->register_params_ = registers;
    445     descriptor->param_representations_ = representations;
    446     descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
    447   }
    448   {
    449     CallInterfaceDescriptor* descriptor =
    450         isolate->call_descriptor(Isolate::NamedCall);
    451     static Register registers[] = { cp,  // context
    452                                     x2,  // name
    453     };
    454     static Representation representations[] = {
    455         Representation::Tagged(),     // context
    456         Representation::Tagged(),     // name
    457     };
    458     descriptor->register_param_count_ = 2;
    459     descriptor->register_params_ = registers;
    460     descriptor->param_representations_ = representations;
    461     descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
    462   }
    463   {
    464     CallInterfaceDescriptor* descriptor =
    465         isolate->call_descriptor(Isolate::CallHandler);
    466     static Register registers[] = { cp,  // context
    467                                     x0,  // receiver
    468     };
    469     static Representation representations[] = {
    470         Representation::Tagged(),  // context
    471         Representation::Tagged(),  // receiver
    472     };
    473     descriptor->register_param_count_ = 2;
    474     descriptor->register_params_ = registers;
    475     descriptor->param_representations_ = representations;
    476     descriptor->platform_specific_descriptor_ = &default_descriptor;
    477   }
    478   {
    479     CallInterfaceDescriptor* descriptor =
    480         isolate->call_descriptor(Isolate::ApiFunctionCall);
    481     static Register registers[] = { x0,  // callee
    482                                     x4,  // call_data
    483                                     x2,  // holder
    484                                     x1,  // api_function_address
    485                                     cp,  // context
    486     };
    487     static Representation representations[] = {
    488         Representation::Tagged(),    // callee
    489         Representation::Tagged(),    // call_data
    490         Representation::Tagged(),    // holder
    491         Representation::External(),  // api_function_address
    492         Representation::Tagged(),    // context
    493     };
    494     descriptor->register_param_count_ = 5;
    495     descriptor->register_params_ = registers;
    496     descriptor->param_representations_ = representations;
    497     descriptor->platform_specific_descriptor_ = &default_descriptor;
    498   }
    499 }
    500 
    501 
    502 #define __ ACCESS_MASM(masm)
    503 
    504 
    505 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
    506   // Update the static counter each time a new code stub is generated.
    507   isolate()->counters()->code_stubs()->Increment();
    508 
    509   CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
    510   int param_count = descriptor->register_param_count_;
    511   {
    512     // Call the runtime system in a fresh internal frame.
    513     FrameScope scope(masm, StackFrame::INTERNAL);
    514     ASSERT((descriptor->register_param_count_ == 0) ||
    515            x0.Is(descriptor->register_params_[param_count - 1]));
    516 
    517     // Push arguments
    518     MacroAssembler::PushPopQueue queue(masm);
    519     for (int i = 0; i < param_count; ++i) {
    520       queue.Queue(descriptor->register_params_[i]);
    521     }
    522     queue.PushQueued();
    523 
    524     ExternalReference miss = descriptor->miss_handler();
    525     __ CallExternalReference(miss, descriptor->register_param_count_);
    526   }
    527 
    528   __ Ret();
    529 }
    530 
    531 
    532 void DoubleToIStub::Generate(MacroAssembler* masm) {
    533   Label done;
    534   Register input = source();
    535   Register result = destination();
    536   ASSERT(is_truncating());
    537 
    538   ASSERT(result.Is64Bits());
    539   ASSERT(jssp.Is(masm->StackPointer()));
    540 
    541   int double_offset = offset();
    542 
    543   DoubleRegister double_scratch = d0;  // only used if !skip_fastpath()
    544   Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
    545   Register scratch2 =
    546       GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
    547 
    548   __ Push(scratch1, scratch2);
    549   // Account for saved regs if input is jssp.
    550   if (input.is(jssp)) double_offset += 2 * kPointerSize;
    551 
    552   if (!skip_fastpath()) {
    553     __ Push(double_scratch);
    554     if (input.is(jssp)) double_offset += 1 * kDoubleSize;
    555     __ Ldr(double_scratch, MemOperand(input, double_offset));
    556     // Try to convert with a FPU convert instruction.  This handles all
    557     // non-saturating cases.
    558     __ TryConvertDoubleToInt64(result, double_scratch, &done);
    559     __ Fmov(result, double_scratch);
    560   } else {
    561     __ Ldr(result, MemOperand(input, double_offset));
    562   }
    563 
    564   // If we reach here we need to manually convert the input to an int32.
    565 
    566   // Extract the exponent.
    567   Register exponent = scratch1;
    568   __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
    569           HeapNumber::kExponentBits);
    570 
    571   // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
    572   // the mantissa gets shifted completely out of the int32_t result.
    573   __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
    574   __ CzeroX(result, ge);
    575   __ B(ge, &done);
    576 
    577   // The Fcvtzs sequence handles all cases except where the conversion causes
    578   // signed overflow in the int64_t target. Since we've already handled
    579   // exponents >= 84, we can guarantee that 63 <= exponent < 84.
    580 
    581   if (masm->emit_debug_code()) {
    582     __ Cmp(exponent, HeapNumber::kExponentBias + 63);
    583     // Exponents less than this should have been handled by the Fcvt case.
    584     __ Check(ge, kUnexpectedValue);
    585   }
    586 
    587   // Isolate the mantissa bits, and set the implicit '1'.
    588   Register mantissa = scratch2;
    589   __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
    590   __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
    591 
    592   // Negate the mantissa if necessary.
    593   __ Tst(result, kXSignMask);
    594   __ Cneg(mantissa, mantissa, ne);
    595 
    596   // Shift the mantissa bits in the correct place. We know that we have to shift
    597   // it left here, because exponent >= 63 >= kMantissaBits.
    598   __ Sub(exponent, exponent,
    599          HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
    600   __ Lsl(result, mantissa, exponent);
    601 
    602   __ Bind(&done);
    603   if (!skip_fastpath()) {
    604     __ Pop(double_scratch);
    605   }
    606   __ Pop(scratch2, scratch1);
    607   __ Ret();
    608 }
    609 
    610 
    611 // See call site for description.
    612 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
    613                                           Register left,
    614                                           Register right,
    615                                           Register scratch,
    616                                           FPRegister double_scratch,
    617                                           Label* slow,
    618                                           Condition cond) {
    619   ASSERT(!AreAliased(left, right, scratch));
    620   Label not_identical, return_equal, heap_number;
    621   Register result = x0;
    622 
    623   __ Cmp(right, left);
    624   __ B(ne, &not_identical);
    625 
    626   // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
    627   // so we do the second best thing - test it ourselves.
    628   // They are both equal and they are not both Smis so both of them are not
    629   // Smis.  If it's not a heap number, then return equal.
    630   if ((cond == lt) || (cond == gt)) {
    631     __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
    632                         ge);
    633   } else {
    634     Register right_type = scratch;
    635     __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
    636                         &heap_number);
    637     // Comparing JS objects with <=, >= is complicated.
    638     if (cond != eq) {
    639       __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
    640       __ B(ge, slow);
    641       // Normally here we fall through to return_equal, but undefined is
    642       // special: (undefined == undefined) == true, but
    643       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
    644       if ((cond == le) || (cond == ge)) {
    645         __ Cmp(right_type, ODDBALL_TYPE);
    646         __ B(ne, &return_equal);
    647         __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
    648         if (cond == le) {
    649           // undefined <= undefined should fail.
    650           __ Mov(result, GREATER);
    651         } else  {
    652           // undefined >= undefined should fail.
    653           __ Mov(result, LESS);
    654         }
    655         __ Ret();
    656       }
    657     }
    658   }
    659 
    660   __ Bind(&return_equal);
    661   if (cond == lt) {
    662     __ Mov(result, GREATER);  // Things aren't less than themselves.
    663   } else if (cond == gt) {
    664     __ Mov(result, LESS);     // Things aren't greater than themselves.
    665   } else {
    666     __ Mov(result, EQUAL);    // Things are <=, >=, ==, === themselves.
    667   }
    668   __ Ret();
    669 
    670   // Cases lt and gt have been handled earlier, and case ne is never seen, as
    671   // it is handled in the parser (see Parser::ParseBinaryExpression). We are
    672   // only concerned with cases ge, le and eq here.
    673   if ((cond != lt) && (cond != gt)) {
    674     ASSERT((cond == ge) || (cond == le) || (cond == eq));
    675     __ Bind(&heap_number);
    676     // Left and right are identical pointers to a heap number object. Return
    677     // non-equal if the heap number is a NaN, and equal otherwise. Comparing
    678     // the number to itself will set the overflow flag iff the number is NaN.
    679     __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
    680     __ Fcmp(double_scratch, double_scratch);
    681     __ B(vc, &return_equal);  // Not NaN, so treat as normal heap number.
    682 
    683     if (cond == le) {
    684       __ Mov(result, GREATER);
    685     } else {
    686       __ Mov(result, LESS);
    687     }
    688     __ Ret();
    689   }
    690 
    691   // No fall through here.
    692   if (FLAG_debug_code) {
    693     __ Unreachable();
    694   }
    695 
    696   __ Bind(&not_identical);
    697 }
    698 
    699 
    700 // See call site for description.
    701 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
    702                                            Register left,
    703                                            Register right,
    704                                            Register left_type,
    705                                            Register right_type,
    706                                            Register scratch) {
    707   ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
    708 
    709   if (masm->emit_debug_code()) {
    710     // We assume that the arguments are not identical.
    711     __ Cmp(left, right);
    712     __ Assert(ne, kExpectedNonIdenticalObjects);
    713   }
    714 
    715   // If either operand is a JS object or an oddball value, then they are not
    716   // equal since their pointers are different.
    717   // There is no test for undetectability in strict equality.
    718   STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
    719   Label right_non_object;
    720 
    721   __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
    722   __ B(lt, &right_non_object);
    723 
    724   // Return non-zero - x0 already contains a non-zero pointer.
    725   ASSERT(left.is(x0) || right.is(x0));
    726   Label return_not_equal;
    727   __ Bind(&return_not_equal);
    728   __ Ret();
    729 
    730   __ Bind(&right_non_object);
    731 
    732   // Check for oddballs: true, false, null, undefined.
    733   __ Cmp(right_type, ODDBALL_TYPE);
    734 
    735   // If right is not ODDBALL, test left. Otherwise, set eq condition.
    736   __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
    737 
    738   // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
    739   // Otherwise, right or left is ODDBALL, so set a ge condition.
    740   __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
    741 
    742   __ B(ge, &return_not_equal);
    743 
    744   // Internalized strings are unique, so they can only be equal if they are the
    745   // same object. We have already tested that case, so if left and right are
    746   // both internalized strings, they cannot be equal.
    747   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
    748   __ Orr(scratch, left_type, right_type);
    749   __ TestAndBranchIfAllClear(
    750       scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
    751 }
    752 
    753 
    754 // See call site for description.
    755 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
    756                                     Register left,
    757                                     Register right,
    758                                     FPRegister left_d,
    759                                     FPRegister right_d,
    760                                     Register scratch,
    761                                     Label* slow,
    762                                     bool strict) {
    763   ASSERT(!AreAliased(left, right, scratch));
    764   ASSERT(!AreAliased(left_d, right_d));
    765   ASSERT((left.is(x0) && right.is(x1)) ||
    766          (right.is(x0) && left.is(x1)));
    767   Register result = x0;
    768 
    769   Label right_is_smi, done;
    770   __ JumpIfSmi(right, &right_is_smi);
    771 
    772   // Left is the smi. Check whether right is a heap number.
    773   if (strict) {
    774     // If right is not a number and left is a smi, then strict equality cannot
    775     // succeed. Return non-equal.
    776     Label is_heap_number;
    777     __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
    778                         &is_heap_number);
    779     // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
    780     if (!right.is(result)) {
    781       __ Mov(result, NOT_EQUAL);
    782     }
    783     __ Ret();
    784     __ Bind(&is_heap_number);
    785   } else {
    786     // Smi compared non-strictly with a non-smi, non-heap-number. Call the
    787     // runtime.
    788     __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
    789   }
    790 
    791   // Left is the smi. Right is a heap number. Load right value into right_d, and
    792   // convert left smi into double in left_d.
    793   __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
    794   __ SmiUntagToDouble(left_d, left);
    795   __ B(&done);
    796 
    797   __ Bind(&right_is_smi);
    798   // Right is a smi. Check whether the non-smi left is a heap number.
    799   if (strict) {
    800     // If left is not a number and right is a smi then strict equality cannot
    801     // succeed. Return non-equal.
    802     Label is_heap_number;
    803     __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
    804                         &is_heap_number);
    805     // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
    806     if (!left.is(result)) {
    807       __ Mov(result, NOT_EQUAL);
    808     }
    809     __ Ret();
    810     __ Bind(&is_heap_number);
    811   } else {
    812     // Smi compared non-strictly with a non-smi, non-heap-number. Call the
    813     // runtime.
    814     __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
    815   }
    816 
    817   // Right is the smi. Left is a heap number. Load left value into left_d, and
    818   // convert right smi into double in right_d.
    819   __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
    820   __ SmiUntagToDouble(right_d, right);
    821 
    822   // Fall through to both_loaded_as_doubles.
    823   __ Bind(&done);
    824 }
    825 
    826 
    827 // Fast negative check for internalized-to-internalized equality.
    828 // See call site for description.
    829 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
    830                                                      Register left,
    831                                                      Register right,
    832                                                      Register left_map,
    833                                                      Register right_map,
    834                                                      Register left_type,
    835                                                      Register right_type,
    836                                                      Label* possible_strings,
    837                                                      Label* not_both_strings) {
    838   ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
    839   Register result = x0;
    840 
    841   Label object_test;
    842   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
    843   // TODO(all): reexamine this branch sequence for optimisation wrt branch
    844   // prediction.
    845   __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
    846   __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
    847   __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
    848   __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
    849 
    850   // Both are internalized. We already checked that they weren't the same
    851   // pointer, so they are not equal.
    852   __ Mov(result, NOT_EQUAL);
    853   __ Ret();
    854 
    855   __ Bind(&object_test);
    856 
    857   __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
    858 
    859   // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
    860   // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
    861   __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
    862 
    863   __ B(lt, not_both_strings);
    864 
    865   // If both objects are undetectable, they are equal. Otherwise, they are not
    866   // equal, since they are different objects and an object is not equal to
    867   // undefined.
    868 
    869   // Returning here, so we can corrupt right_type and left_type.
    870   Register right_bitfield = right_type;
    871   Register left_bitfield = left_type;
    872   __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
    873   __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
    874   __ And(result, right_bitfield, left_bitfield);
    875   __ And(result, result, 1 << Map::kIsUndetectable);
    876   __ Eor(result, result, 1 << Map::kIsUndetectable);
    877   __ Ret();
    878 }
    879 
    880 
    881 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
    882                                          Register input,
    883                                          Register scratch,
    884                                          CompareIC::State expected,
    885                                          Label* fail) {
    886   Label ok;
    887   if (expected == CompareIC::SMI) {
    888     __ JumpIfNotSmi(input, fail);
    889   } else if (expected == CompareIC::NUMBER) {
    890     __ JumpIfSmi(input, &ok);
    891     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
    892                 DONT_DO_SMI_CHECK);
    893   }
    894   // We could be strict about internalized/non-internalized here, but as long as
    895   // hydrogen doesn't care, the stub doesn't have to care either.
    896   __ Bind(&ok);
    897 }
    898 
    899 
    900 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
    901   Register lhs = x1;
    902   Register rhs = x0;
    903   Register result = x0;
    904   Condition cond = GetCondition();
    905 
    906   Label miss;
    907   ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
    908   ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
    909 
    910   Label slow;  // Call builtin.
    911   Label not_smis, both_loaded_as_doubles;
    912   Label not_two_smis, smi_done;
    913   __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
    914   __ SmiUntag(lhs);
    915   __ Sub(result, lhs, Operand::UntagSmi(rhs));
    916   __ Ret();
    917 
    918   __ Bind(&not_two_smis);
    919 
    920   // NOTICE! This code is only reached after a smi-fast-case check, so it is
    921   // certain that at least one operand isn't a smi.
    922 
    923   // Handle the case where the objects are identical. Either returns the answer
    924   // or goes to slow. Only falls through if the objects were not identical.
    925   EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
    926 
    927   // If either is a smi (we know that at least one is not a smi), then they can
    928   // only be strictly equal if the other is a HeapNumber.
    929   __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
    930 
    931   // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
    932   // can:
    933   //  1) Return the answer.
    934   //  2) Branch to the slow case.
    935   //  3) Fall through to both_loaded_as_doubles.
    936   // In case 3, we have found out that we were dealing with a number-number
    937   // comparison. The double values of the numbers have been loaded, right into
    938   // rhs_d, left into lhs_d.
    939   FPRegister rhs_d = d0;
    940   FPRegister lhs_d = d1;
    941   EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
    942 
    943   __ Bind(&both_loaded_as_doubles);
    944   // The arguments have been converted to doubles and stored in rhs_d and
    945   // lhs_d.
    946   Label nan;
    947   __ Fcmp(lhs_d, rhs_d);
    948   __ B(vs, &nan);  // Overflow flag set if either is NaN.
    949   STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
    950   __ Cset(result, gt);  // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
    951   __ Csinv(result, result, xzr, ge);  // lt => -1, gt => 1, eq => 0.
    952   __ Ret();
    953 
    954   __ Bind(&nan);
    955   // Left and/or right is a NaN. Load the result register with whatever makes
    956   // the comparison fail, since comparisons with NaN always fail (except ne,
    957   // which is filtered out at a higher level.)
    958   ASSERT(cond != ne);
    959   if ((cond == lt) || (cond == le)) {
    960     __ Mov(result, GREATER);
    961   } else {
    962     __ Mov(result, LESS);
    963   }
    964   __ Ret();
    965 
    966   __ Bind(&not_smis);
    967   // At this point we know we are dealing with two different objects, and
    968   // neither of them is a smi. The objects are in rhs_ and lhs_.
    969 
    970   // Load the maps and types of the objects.
    971   Register rhs_map = x10;
    972   Register rhs_type = x11;
    973   Register lhs_map = x12;
    974   Register lhs_type = x13;
    975   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
    976   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
    977   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
    978   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
    979 
    980   if (strict()) {
    981     // This emits a non-equal return sequence for some object types, or falls
    982     // through if it was not lucky.
    983     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
    984   }
    985 
    986   Label check_for_internalized_strings;
    987   Label flat_string_check;
    988   // Check for heap number comparison. Branch to earlier double comparison code
    989   // if they are heap numbers, otherwise, branch to internalized string check.
    990   __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
    991   __ B(ne, &check_for_internalized_strings);
    992   __ Cmp(lhs_map, rhs_map);
    993 
    994   // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
    995   // string check.
    996   __ B(ne, &flat_string_check);
    997 
    998   // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
    999   // comparison code.
   1000   __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
   1001   __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
   1002   __ B(&both_loaded_as_doubles);
   1003 
   1004   __ Bind(&check_for_internalized_strings);
   1005   // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
   1006   // of internalized strings.
   1007   if ((cond == eq) && !strict()) {
   1008     // Returns an answer for two internalized strings or two detectable objects.
   1009     // Otherwise branches to the string case or not both strings case.
   1010     EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
   1011                                              lhs_type, rhs_type,
   1012                                              &flat_string_check, &slow);
   1013   }
   1014 
   1015   // Check for both being sequential ASCII strings, and inline if that is the
   1016   // case.
   1017   __ Bind(&flat_string_check);
   1018   __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
   1019                                                   x15, &slow);
   1020 
   1021   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
   1022                       x11);
   1023   if (cond == eq) {
   1024     StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
   1025                                                      x10, x11, x12);
   1026   } else {
   1027     StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
   1028                                                        x10, x11, x12, x13);
   1029   }
   1030 
   1031   // Never fall through to here.
   1032   if (FLAG_debug_code) {
   1033     __ Unreachable();
   1034   }
   1035 
   1036   __ Bind(&slow);
   1037 
   1038   __ Push(lhs, rhs);
   1039   // Figure out which native to call and setup the arguments.
   1040   Builtins::JavaScript native;
   1041   if (cond == eq) {
   1042     native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
   1043   } else {
   1044     native = Builtins::COMPARE;
   1045     int ncr;  // NaN compare result
   1046     if ((cond == lt) || (cond == le)) {
   1047       ncr = GREATER;
   1048     } else {
   1049       ASSERT((cond == gt) || (cond == ge));  // remaining cases
   1050       ncr = LESS;
   1051     }
   1052     __ Mov(x10, Smi::FromInt(ncr));
   1053     __ Push(x10);
   1054   }
   1055 
   1056   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
   1057   // tagged as a small integer.
   1058   __ InvokeBuiltin(native, JUMP_FUNCTION);
   1059 
   1060   __ Bind(&miss);
   1061   GenerateMiss(masm);
   1062 }
   1063 
   1064 
   1065 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
   1066   CPURegList saved_regs = kCallerSaved;
   1067   CPURegList saved_fp_regs = kCallerSavedFP;
   1068 
   1069   // We don't allow a GC during a store buffer overflow so there is no need to
   1070   // store the registers in any particular way, but we do have to store and
   1071   // restore them.
   1072 
   1073   // We don't care if MacroAssembler scratch registers are corrupted.
   1074   saved_regs.Remove(*(masm->TmpList()));
   1075   saved_fp_regs.Remove(*(masm->FPTmpList()));
   1076 
   1077   __ PushCPURegList(saved_regs);
   1078   if (save_doubles_ == kSaveFPRegs) {
   1079     __ PushCPURegList(saved_fp_regs);
   1080   }
   1081 
   1082   AllowExternalCallThatCantCauseGC scope(masm);
   1083   __ Mov(x0, ExternalReference::isolate_address(isolate()));
   1084   __ CallCFunction(
   1085       ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
   1086 
   1087   if (save_doubles_ == kSaveFPRegs) {
   1088     __ PopCPURegList(saved_fp_regs);
   1089   }
   1090   __ PopCPURegList(saved_regs);
   1091   __ Ret();
   1092 }
   1093 
   1094 
   1095 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
   1096     Isolate* isolate) {
   1097   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
   1098   stub1.GetCode();
   1099   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
   1100   stub2.GetCode();
   1101 }
   1102 
   1103 
   1104 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
   1105   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
   1106   UseScratchRegisterScope temps(masm);
   1107   Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
   1108   Register return_address = temps.AcquireX();
   1109   __ Mov(return_address, lr);
   1110   // Restore lr with the value it had before the call to this stub (the value
   1111   // which must be pushed).
   1112   __ Mov(lr, saved_lr);
   1113   if (save_doubles_ == kSaveFPRegs) {
   1114     __ PushSafepointRegistersAndDoubles();
   1115   } else {
   1116     __ PushSafepointRegisters();
   1117   }
   1118   __ Ret(return_address);
   1119 }
   1120 
   1121 
   1122 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
   1123   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
   1124   UseScratchRegisterScope temps(masm);
   1125   Register return_address = temps.AcquireX();
   1126   // Preserve the return address (lr will be clobbered by the pop).
   1127   __ Mov(return_address, lr);
   1128   if (save_doubles_ == kSaveFPRegs) {
   1129     __ PopSafepointRegistersAndDoubles();
   1130   } else {
   1131     __ PopSafepointRegisters();
   1132   }
   1133   __ Ret(return_address);
   1134 }
   1135 
   1136 
   1137 void MathPowStub::Generate(MacroAssembler* masm) {
   1138   // Stack on entry:
   1139   // jssp[0]: Exponent (as a tagged value).
   1140   // jssp[1]: Base (as a tagged value).
   1141   //
   1142   // The (tagged) result will be returned in x0, as a heap number.
   1143 
   1144   Register result_tagged = x0;
   1145   Register base_tagged = x10;
   1146   Register exponent_tagged = x11;
   1147   Register exponent_integer = x12;
   1148   Register scratch1 = x14;
   1149   Register scratch0 = x15;
   1150   Register saved_lr = x19;
   1151   FPRegister result_double = d0;
   1152   FPRegister base_double = d0;
   1153   FPRegister exponent_double = d1;
   1154   FPRegister base_double_copy = d2;
   1155   FPRegister scratch1_double = d6;
   1156   FPRegister scratch0_double = d7;
   1157 
   1158   // A fast-path for integer exponents.
   1159   Label exponent_is_smi, exponent_is_integer;
   1160   // Bail out to runtime.
   1161   Label call_runtime;
   1162   // Allocate a heap number for the result, and return it.
   1163   Label done;
   1164 
   1165   // Unpack the inputs.
   1166   if (exponent_type_ == ON_STACK) {
   1167     Label base_is_smi;
   1168     Label unpack_exponent;
   1169 
   1170     __ Pop(exponent_tagged, base_tagged);
   1171 
   1172     __ JumpIfSmi(base_tagged, &base_is_smi);
   1173     __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
   1174     // base_tagged is a heap number, so load its double value.
   1175     __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
   1176     __ B(&unpack_exponent);
   1177     __ Bind(&base_is_smi);
   1178     // base_tagged is a SMI, so untag it and convert it to a double.
   1179     __ SmiUntagToDouble(base_double, base_tagged);
   1180 
   1181     __ Bind(&unpack_exponent);
   1182     //  x10   base_tagged       The tagged base (input).
   1183     //  x11   exponent_tagged   The tagged exponent (input).
   1184     //  d1    base_double       The base as a double.
   1185     __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
   1186     __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
   1187     // exponent_tagged is a heap number, so load its double value.
   1188     __ Ldr(exponent_double,
   1189            FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
   1190   } else if (exponent_type_ == TAGGED) {
   1191     __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
   1192     __ Ldr(exponent_double,
   1193            FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
   1194   }
   1195 
   1196   // Handle double (heap number) exponents.
   1197   if (exponent_type_ != INTEGER) {
   1198     // Detect integer exponents stored as doubles and handle those in the
   1199     // integer fast-path.
   1200     __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
   1201                                  scratch0_double, &exponent_is_integer);
   1202 
   1203     if (exponent_type_ == ON_STACK) {
   1204       FPRegister  half_double = d3;
   1205       FPRegister  minus_half_double = d4;
   1206       // Detect square root case. Crankshaft detects constant +/-0.5 at compile
   1207       // time and uses DoMathPowHalf instead. We then skip this check for
   1208       // non-constant cases of +/-0.5 as these hardly occur.
   1209 
   1210       __ Fmov(minus_half_double, -0.5);
   1211       __ Fmov(half_double, 0.5);
   1212       __ Fcmp(minus_half_double, exponent_double);
   1213       __ Fccmp(half_double, exponent_double, NZFlag, ne);
   1214       // Condition flags at this point:
   1215       //    0.5;  nZCv    // Identified by eq && pl
   1216       //   -0.5:  NZcv    // Identified by eq && mi
   1217       //  other:  ?z??    // Identified by ne
   1218       __ B(ne, &call_runtime);
   1219 
   1220       // The exponent is 0.5 or -0.5.
   1221 
   1222       // Given that exponent is known to be either 0.5 or -0.5, the following
   1223       // special cases could apply (according to ECMA-262 15.8.2.13):
   1224       //
   1225       //  base.isNaN():                   The result is NaN.
   1226       //  (base == +INFINITY) || (base == -INFINITY)
   1227       //    exponent == 0.5:              The result is +INFINITY.
   1228       //    exponent == -0.5:             The result is +0.
   1229       //  (base == +0) || (base == -0)
   1230       //    exponent == 0.5:              The result is +0.
   1231       //    exponent == -0.5:             The result is +INFINITY.
   1232       //  (base < 0) && base.isFinite():  The result is NaN.
   1233       //
   1234       // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
   1235       // where base is -INFINITY or -0.
   1236 
   1237       // Add +0 to base. This has no effect other than turning -0 into +0.
   1238       __ Fadd(base_double, base_double, fp_zero);
   1239       // The operation -0+0 results in +0 in all cases except where the
   1240       // FPCR rounding mode is 'round towards minus infinity' (RM). The
   1241       // ARM64 simulator does not currently simulate FPCR (where the rounding
   1242       // mode is set), so test the operation with some debug code.
   1243       if (masm->emit_debug_code()) {
   1244         UseScratchRegisterScope temps(masm);
   1245         Register temp = temps.AcquireX();
   1246         __ Fneg(scratch0_double, fp_zero);
   1247         // Verify that we correctly generated +0.0 and -0.0.
   1248         //  bits(+0.0) = 0x0000000000000000
   1249         //  bits(-0.0) = 0x8000000000000000
   1250         __ Fmov(temp, fp_zero);
   1251         __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
   1252         __ Fmov(temp, scratch0_double);
   1253         __ Eor(temp, temp, kDSignMask);
   1254         __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
   1255         // Check that -0.0 + 0.0 == +0.0.
   1256         __ Fadd(scratch0_double, scratch0_double, fp_zero);
   1257         __ Fmov(temp, scratch0_double);
   1258         __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
   1259       }
   1260 
   1261       // If base is -INFINITY, make it +INFINITY.
   1262       //  * Calculate base - base: All infinities will become NaNs since both
   1263       //    -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
   1264       //  * If the result is NaN, calculate abs(base).
   1265       __ Fsub(scratch0_double, base_double, base_double);
   1266       __ Fcmp(scratch0_double, 0.0);
   1267       __ Fabs(scratch1_double, base_double);
   1268       __ Fcsel(base_double, scratch1_double, base_double, vs);
   1269 
   1270       // Calculate the square root of base.
   1271       __ Fsqrt(result_double, base_double);
   1272       __ Fcmp(exponent_double, 0.0);
   1273       __ B(ge, &done);  // Finish now for exponents of 0.5.
   1274       // Find the inverse for exponents of -0.5.
   1275       __ Fmov(scratch0_double, 1.0);
   1276       __ Fdiv(result_double, scratch0_double, result_double);
   1277       __ B(&done);
   1278     }
   1279 
   1280     {
   1281       AllowExternalCallThatCantCauseGC scope(masm);
   1282       __ Mov(saved_lr, lr);
   1283       __ CallCFunction(
   1284           ExternalReference::power_double_double_function(isolate()),
   1285           0, 2);
   1286       __ Mov(lr, saved_lr);
   1287       __ B(&done);
   1288     }
   1289 
   1290     // Handle SMI exponents.
   1291     __ Bind(&exponent_is_smi);
   1292     //  x10   base_tagged       The tagged base (input).
   1293     //  x11   exponent_tagged   The tagged exponent (input).
   1294     //  d1    base_double       The base as a double.
   1295     __ SmiUntag(exponent_integer, exponent_tagged);
   1296   }
   1297 
   1298   __ Bind(&exponent_is_integer);
   1299   //  x10   base_tagged       The tagged base (input).
   1300   //  x11   exponent_tagged   The tagged exponent (input).
   1301   //  x12   exponent_integer  The exponent as an integer.
   1302   //  d1    base_double       The base as a double.
   1303 
   1304   // Find abs(exponent). For negative exponents, we can find the inverse later.
   1305   Register exponent_abs = x13;
   1306   __ Cmp(exponent_integer, 0);
   1307   __ Cneg(exponent_abs, exponent_integer, mi);
   1308   //  x13   exponent_abs      The value of abs(exponent_integer).
   1309 
   1310   // Repeatedly multiply to calculate the power.
   1311   //  result = 1.0;
   1312   //  For each bit n (exponent_integer{n}) {
   1313   //    if (exponent_integer{n}) {
   1314   //      result *= base;
   1315   //    }
   1316   //    base *= base;
   1317   //    if (remaining bits in exponent_integer are all zero) {
   1318   //      break;
   1319   //    }
   1320   //  }
   1321   Label power_loop, power_loop_entry, power_loop_exit;
   1322   __ Fmov(scratch1_double, base_double);
   1323   __ Fmov(base_double_copy, base_double);
   1324   __ Fmov(result_double, 1.0);
   1325   __ B(&power_loop_entry);
   1326 
   1327   __ Bind(&power_loop);
   1328   __ Fmul(scratch1_double, scratch1_double, scratch1_double);
   1329   __ Lsr(exponent_abs, exponent_abs, 1);
   1330   __ Cbz(exponent_abs, &power_loop_exit);
   1331 
   1332   __ Bind(&power_loop_entry);
   1333   __ Tbz(exponent_abs, 0, &power_loop);
   1334   __ Fmul(result_double, result_double, scratch1_double);
   1335   __ B(&power_loop);
   1336 
   1337   __ Bind(&power_loop_exit);
   1338 
   1339   // If the exponent was positive, result_double holds the result.
   1340   __ Tbz(exponent_integer, kXSignBit, &done);
   1341 
   1342   // The exponent was negative, so find the inverse.
   1343   __ Fmov(scratch0_double, 1.0);
   1344   __ Fdiv(result_double, scratch0_double, result_double);
   1345   // ECMA-262 only requires Math.pow to return an 'implementation-dependent
   1346   // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
   1347   // to calculate the subnormal value 2^-1074. This method of calculating
   1348   // negative powers doesn't work because 2^1074 overflows to infinity. To
   1349   // catch this corner-case, we bail out if the result was 0. (This can only
   1350   // occur if the divisor is infinity or the base is zero.)
   1351   __ Fcmp(result_double, 0.0);
   1352   __ B(&done, ne);
   1353 
   1354   if (exponent_type_ == ON_STACK) {
   1355     // Bail out to runtime code.
   1356     __ Bind(&call_runtime);
   1357     // Put the arguments back on the stack.
   1358     __ Push(base_tagged, exponent_tagged);
   1359     __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
   1360 
   1361     // Return.
   1362     __ Bind(&done);
   1363     __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
   1364                           result_double);
   1365     ASSERT(result_tagged.is(x0));
   1366     __ IncrementCounter(
   1367         isolate()->counters()->math_pow(), 1, scratch0, scratch1);
   1368     __ Ret();
   1369   } else {
   1370     AllowExternalCallThatCantCauseGC scope(masm);
   1371     __ Mov(saved_lr, lr);
   1372     __ Fmov(base_double, base_double_copy);
   1373     __ Scvtf(exponent_double, exponent_integer);
   1374     __ CallCFunction(
   1375         ExternalReference::power_double_double_function(isolate()),
   1376         0, 2);
   1377     __ Mov(lr, saved_lr);
   1378     __ Bind(&done);
   1379     __ IncrementCounter(
   1380         isolate()->counters()->math_pow(), 1, scratch0, scratch1);
   1381     __ Ret();
   1382   }
   1383 }
   1384 
   1385 
   1386 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   1387   // It is important that the following stubs are generated in this order
   1388   // because pregenerated stubs can only call other pregenerated stubs.
   1389   // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
   1390   // CEntryStub.
   1391   CEntryStub::GenerateAheadOfTime(isolate);
   1392   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   1393   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   1394   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
   1395   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   1396   BinaryOpICStub::GenerateAheadOfTime(isolate);
   1397   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
   1398   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
   1399   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
   1400 }
   1401 
   1402 
   1403 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
   1404   StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
   1405   stub1.GetCode();
   1406   StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
   1407   stub2.GetCode();
   1408 }
   1409 
   1410 
   1411 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
   1412   RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
   1413   stub1.GetCode();
   1414   RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
   1415   stub2.GetCode();
   1416 }
   1417 
   1418 
   1419 void CodeStub::GenerateFPStubs(Isolate* isolate) {
   1420   // Floating-point code doesn't get special handling in ARM64, so there's
   1421   // nothing to do here.
   1422   USE(isolate);
   1423 }
   1424 
   1425 
   1426 bool CEntryStub::NeedsImmovableCode() {
   1427   // CEntryStub stores the return address on the stack before calling into
   1428   // C++ code. In some cases, the VM accesses this address, but it is not used
   1429   // when the C++ code returns to the stub because LR holds the return address
   1430   // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
   1431   // returning to dead code.
   1432   // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
   1433   // find any comment to confirm this, and I don't hit any crashes whatever
   1434   // this function returns. The anaylsis should be properly confirmed.
   1435   return true;
   1436 }
   1437 
   1438 
   1439 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
   1440   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
   1441   stub.GetCode();
   1442   CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
   1443   stub_fp.GetCode();
   1444 }
   1445 
   1446 
   1447 void CEntryStub::Generate(MacroAssembler* masm) {
   1448   // The Abort mechanism relies on CallRuntime, which in turn relies on
   1449   // CEntryStub, so until this stub has been generated, we have to use a
   1450   // fall-back Abort mechanism.
   1451   //
   1452   // Note that this stub must be generated before any use of Abort.
   1453   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
   1454 
   1455   ASM_LOCATION("CEntryStub::Generate entry");
   1456   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1457 
   1458   // Register parameters:
   1459   //    x0: argc (including receiver, untagged)
   1460   //    x1: target
   1461   //
   1462   // The stack on entry holds the arguments and the receiver, with the receiver
   1463   // at the highest address:
   1464   //
   1465   //    jssp]argc-1]: receiver
   1466   //    jssp[argc-2]: arg[argc-2]
   1467   //    ...           ...
   1468   //    jssp[1]:      arg[1]
   1469   //    jssp[0]:      arg[0]
   1470   //
   1471   // The arguments are in reverse order, so that arg[argc-2] is actually the
   1472   // first argument to the target function and arg[0] is the last.
   1473   ASSERT(jssp.Is(__ StackPointer()));
   1474   const Register& argc_input = x0;
   1475   const Register& target_input = x1;
   1476 
   1477   // Calculate argv, argc and the target address, and store them in
   1478   // callee-saved registers so we can retry the call without having to reload
   1479   // these arguments.
   1480   // TODO(jbramley): If the first call attempt succeeds in the common case (as
   1481   // it should), then we might be better off putting these parameters directly
   1482   // into their argument registers, rather than using callee-saved registers and
   1483   // preserving them on the stack.
   1484   const Register& argv = x21;
   1485   const Register& argc = x22;
   1486   const Register& target = x23;
   1487 
   1488   // Derive argv from the stack pointer so that it points to the first argument
   1489   // (arg[argc-2]), or just below the receiver in case there are no arguments.
   1490   //  - Adjust for the arg[] array.
   1491   Register temp_argv = x11;
   1492   __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
   1493   //  - Adjust for the receiver.
   1494   __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
   1495 
   1496   // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
   1497   // registers.
   1498   FrameScope scope(masm, StackFrame::MANUAL);
   1499   __ EnterExitFrame(save_doubles_, x10, 3);
   1500   ASSERT(csp.Is(__ StackPointer()));
   1501 
   1502   // Poke callee-saved registers into reserved space.
   1503   __ Poke(argv, 1 * kPointerSize);
   1504   __ Poke(argc, 2 * kPointerSize);
   1505   __ Poke(target, 3 * kPointerSize);
   1506 
   1507   // We normally only keep tagged values in callee-saved registers, as they
   1508   // could be pushed onto the stack by called stubs and functions, and on the
   1509   // stack they can confuse the GC. However, we're only calling C functions
   1510   // which can push arbitrary data onto the stack anyway, and so the GC won't
   1511   // examine that part of the stack.
   1512   __ Mov(argc, argc_input);
   1513   __ Mov(target, target_input);
   1514   __ Mov(argv, temp_argv);
   1515 
   1516   // x21 : argv
   1517   // x22 : argc
   1518   // x23 : call target
   1519   //
   1520   // The stack (on entry) holds the arguments and the receiver, with the
   1521   // receiver at the highest address:
   1522   //
   1523   //         argv[8]:     receiver
   1524   // argv -> argv[0]:     arg[argc-2]
   1525   //         ...          ...
   1526   //         argv[...]:   arg[1]
   1527   //         argv[...]:   arg[0]
   1528   //
   1529   // Immediately below (after) this is the exit frame, as constructed by
   1530   // EnterExitFrame:
   1531   //         fp[8]:    CallerPC (lr)
   1532   //   fp -> fp[0]:    CallerFP (old fp)
   1533   //         fp[-8]:   Space reserved for SPOffset.
   1534   //         fp[-16]:  CodeObject()
   1535   //         csp[...]: Saved doubles, if saved_doubles is true.
   1536   //         csp[32]:  Alignment padding, if necessary.
   1537   //         csp[24]:  Preserved x23 (used for target).
   1538   //         csp[16]:  Preserved x22 (used for argc).
   1539   //         csp[8]:   Preserved x21 (used for argv).
   1540   //  csp -> csp[0]:   Space reserved for the return address.
   1541   //
   1542   // After a successful call, the exit frame, preserved registers (x21-x23) and
   1543   // the arguments (including the receiver) are dropped or popped as
   1544   // appropriate. The stub then returns.
   1545   //
   1546   // After an unsuccessful call, the exit frame and suchlike are left
   1547   // untouched, and the stub either throws an exception by jumping to one of
   1548   // the exception_returned label.
   1549 
   1550   ASSERT(csp.Is(__ StackPointer()));
   1551 
   1552   // Prepare AAPCS64 arguments to pass to the builtin.
   1553   __ Mov(x0, argc);
   1554   __ Mov(x1, argv);
   1555   __ Mov(x2, ExternalReference::isolate_address(isolate()));
   1556 
   1557   Label return_location;
   1558   __ Adr(x12, &return_location);
   1559   __ Poke(x12, 0);
   1560 
   1561   if (__ emit_debug_code()) {
   1562     // Verify that the slot below fp[kSPOffset]-8 points to the return location
   1563     // (currently in x12).
   1564     UseScratchRegisterScope temps(masm);
   1565     Register temp = temps.AcquireX();
   1566     __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
   1567     __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
   1568     __ Cmp(temp, x12);
   1569     __ Check(eq, kReturnAddressNotFoundInFrame);
   1570   }
   1571 
   1572   // Call the builtin.
   1573   __ Blr(target);
   1574   __ Bind(&return_location);
   1575 
   1576   //  x0    result      The return code from the call.
   1577   //  x21   argv
   1578   //  x22   argc
   1579   //  x23   target
   1580   const Register& result = x0;
   1581 
   1582   // Check result for exception sentinel.
   1583   Label exception_returned;
   1584   __ CompareRoot(result, Heap::kExceptionRootIndex);
   1585   __ B(eq, &exception_returned);
   1586 
   1587   // The call succeeded, so unwind the stack and return.
   1588 
   1589   // Restore callee-saved registers x21-x23.
   1590   __ Mov(x11, argc);
   1591 
   1592   __ Peek(argv, 1 * kPointerSize);
   1593   __ Peek(argc, 2 * kPointerSize);
   1594   __ Peek(target, 3 * kPointerSize);
   1595 
   1596   __ LeaveExitFrame(save_doubles_, x10, true);
   1597   ASSERT(jssp.Is(__ StackPointer()));
   1598   // Pop or drop the remaining stack slots and return from the stub.
   1599   //         jssp[24]:    Arguments array (of size argc), including receiver.
   1600   //         jssp[16]:    Preserved x23 (used for target).
   1601   //         jssp[8]:     Preserved x22 (used for argc).
   1602   //         jssp[0]:     Preserved x21 (used for argv).
   1603   __ Drop(x11);
   1604   __ AssertFPCRState();
   1605   __ Ret();
   1606 
   1607   // The stack pointer is still csp if we aren't returning, and the frame
   1608   // hasn't changed (except for the return address).
   1609   __ SetStackPointer(csp);
   1610 
   1611   // Handling of exception.
   1612   __ Bind(&exception_returned);
   1613 
   1614   // Retrieve the pending exception.
   1615   ExternalReference pending_exception_address(
   1616       Isolate::kPendingExceptionAddress, isolate());
   1617   const Register& exception = result;
   1618   const Register& exception_address = x11;
   1619   __ Mov(exception_address, Operand(pending_exception_address));
   1620   __ Ldr(exception, MemOperand(exception_address));
   1621 
   1622   // Clear the pending exception.
   1623   __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
   1624   __ Str(x10, MemOperand(exception_address));
   1625 
   1626   //  x0    exception   The exception descriptor.
   1627   //  x21   argv
   1628   //  x22   argc
   1629   //  x23   target
   1630 
   1631   // Special handling of termination exceptions, which are uncatchable by
   1632   // JavaScript code.
   1633   Label throw_termination_exception;
   1634   __ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
   1635   __ B(eq, &throw_termination_exception);
   1636 
   1637   // We didn't execute a return case, so the stack frame hasn't been updated
   1638   // (except for the return address slot). However, we don't need to initialize
   1639   // jssp because the throw method will immediately overwrite it when it
   1640   // unwinds the stack.
   1641   __ SetStackPointer(jssp);
   1642 
   1643   ASM_LOCATION("Throw normal");
   1644   __ Mov(argv, 0);
   1645   __ Mov(argc, 0);
   1646   __ Mov(target, 0);
   1647   __ Throw(x0, x10, x11, x12, x13);
   1648 
   1649   __ Bind(&throw_termination_exception);
   1650   ASM_LOCATION("Throw termination");
   1651   __ Mov(argv, 0);
   1652   __ Mov(argc, 0);
   1653   __ Mov(target, 0);
   1654   __ ThrowUncatchable(x0, x10, x11, x12, x13);
   1655 }
   1656 
   1657 
   1658 // This is the entry point from C++. 5 arguments are provided in x0-x4.
   1659 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
   1660 // Input:
   1661 //   x0: code entry.
   1662 //   x1: function.
   1663 //   x2: receiver.
   1664 //   x3: argc.
   1665 //   x4: argv.
   1666 // Output:
   1667 //   x0: result.
   1668 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   1669   ASSERT(jssp.Is(__ StackPointer()));
   1670   Register code_entry = x0;
   1671 
   1672   // Enable instruction instrumentation. This only works on the simulator, and
   1673   // will have no effect on the model or real hardware.
   1674   __ EnableInstrumentation();
   1675 
   1676   Label invoke, handler_entry, exit;
   1677 
   1678   // Push callee-saved registers and synchronize the system stack pointer (csp)
   1679   // and the JavaScript stack pointer (jssp).
   1680   //
   1681   // We must not write to jssp until after the PushCalleeSavedRegisters()
   1682   // call, since jssp is itself a callee-saved register.
   1683   __ SetStackPointer(csp);
   1684   __ PushCalleeSavedRegisters();
   1685   __ Mov(jssp, csp);
   1686   __ SetStackPointer(jssp);
   1687 
   1688   // Configure the FPCR. We don't restore it, so this is technically not allowed
   1689   // according to AAPCS64. However, we only set default-NaN mode and this will
   1690   // be harmless for most C code. Also, it works for ARM.
   1691   __ ConfigureFPCR();
   1692 
   1693   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1694 
   1695   // Set up the reserved register for 0.0.
   1696   __ Fmov(fp_zero, 0.0);
   1697 
   1698   // Build an entry frame (see layout below).
   1699   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
   1700   int64_t bad_frame_pointer = -1L;  // Bad frame pointer to fail if it is used.
   1701   __ Mov(x13, bad_frame_pointer);
   1702   __ Mov(x12, Smi::FromInt(marker));
   1703   __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
   1704   __ Ldr(x10, MemOperand(x11));
   1705 
   1706   __ Push(x13, xzr, x12, x10);
   1707   // Set up fp.
   1708   __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
   1709 
   1710   // Push the JS entry frame marker. Also set js_entry_sp if this is the
   1711   // outermost JS call.
   1712   Label non_outermost_js, done;
   1713   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
   1714   __ Mov(x10, ExternalReference(js_entry_sp));
   1715   __ Ldr(x11, MemOperand(x10));
   1716   __ Cbnz(x11, &non_outermost_js);
   1717   __ Str(fp, MemOperand(x10));
   1718   __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1719   __ Push(x12);
   1720   __ B(&done);
   1721   __ Bind(&non_outermost_js);
   1722   // We spare one instruction by pushing xzr since the marker is 0.
   1723   ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
   1724   __ Push(xzr);
   1725   __ Bind(&done);
   1726 
   1727   // The frame set up looks like this:
   1728   // jssp[0] : JS entry frame marker.
   1729   // jssp[1] : C entry FP.
   1730   // jssp[2] : stack frame marker.
   1731   // jssp[3] : stack frmae marker.
   1732   // jssp[4] : bad frame pointer 0xfff...ff   <- fp points here.
   1733 
   1734 
   1735   // Jump to a faked try block that does the invoke, with a faked catch
   1736   // block that sets the pending exception.
   1737   __ B(&invoke);
   1738 
   1739   // Prevent the constant pool from being emitted between the record of the
   1740   // handler_entry position and the first instruction of the sequence here.
   1741   // There is no risk because Assembler::Emit() emits the instruction before
   1742   // checking for constant pool emission, but we do not want to depend on
   1743   // that.
   1744   {
   1745     Assembler::BlockPoolsScope block_pools(masm);
   1746     __ bind(&handler_entry);
   1747     handler_offset_ = handler_entry.pos();
   1748     // Caught exception: Store result (exception) in the pending exception
   1749     // field in the JSEnv and return a failure sentinel. Coming in here the
   1750     // fp will be invalid because the PushTryHandler below sets it to 0 to
   1751     // signal the existence of the JSEntry frame.
   1752     __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1753                                           isolate())));
   1754   }
   1755   __ Str(code_entry, MemOperand(x10));
   1756   __ LoadRoot(x0, Heap::kExceptionRootIndex);
   1757   __ B(&exit);
   1758 
   1759   // Invoke: Link this frame into the handler chain.  There's only one
   1760   // handler block in this code object, so its index is 0.
   1761   __ Bind(&invoke);
   1762   __ PushTryHandler(StackHandler::JS_ENTRY, 0);
   1763   // If an exception not caught by another handler occurs, this handler
   1764   // returns control to the code after the B(&invoke) above, which
   1765   // restores all callee-saved registers (including cp and fp) to their
   1766   // saved values before returning a failure to C.
   1767 
   1768   // Clear any pending exceptions.
   1769   __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
   1770   __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1771                                         isolate())));
   1772   __ Str(x10, MemOperand(x11));
   1773 
   1774   // Invoke the function by calling through the JS entry trampoline builtin.
   1775   // Notice that we cannot store a reference to the trampoline code directly in
   1776   // this stub, because runtime stubs are not traversed when doing GC.
   1777 
   1778   // Expected registers by Builtins::JSEntryTrampoline
   1779   // x0: code entry.
   1780   // x1: function.
   1781   // x2: receiver.
   1782   // x3: argc.
   1783   // x4: argv.
   1784   ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
   1785                                        : Builtins::kJSEntryTrampoline,
   1786                           isolate());
   1787   __ Mov(x10, entry);
   1788 
   1789   // Call the JSEntryTrampoline.
   1790   __ Ldr(x11, MemOperand(x10));  // Dereference the address.
   1791   __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
   1792   __ Blr(x12);
   1793 
   1794   // Unlink this frame from the handler chain.
   1795   __ PopTryHandler();
   1796 
   1797 
   1798   __ Bind(&exit);
   1799   // x0 holds the result.
   1800   // The stack pointer points to the top of the entry frame pushed on entry from
   1801   // C++ (at the beginning of this stub):
   1802   // jssp[0] : JS entry frame marker.
   1803   // jssp[1] : C entry FP.
   1804   // jssp[2] : stack frame marker.
   1805   // jssp[3] : stack frmae marker.
   1806   // jssp[4] : bad frame pointer 0xfff...ff   <- fp points here.
   1807 
   1808   // Check if the current stack frame is marked as the outermost JS frame.
   1809   Label non_outermost_js_2;
   1810   __ Pop(x10);
   1811   __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1812   __ B(ne, &non_outermost_js_2);
   1813   __ Mov(x11, ExternalReference(js_entry_sp));
   1814   __ Str(xzr, MemOperand(x11));
   1815   __ Bind(&non_outermost_js_2);
   1816 
   1817   // Restore the top frame descriptors from the stack.
   1818   __ Pop(x10);
   1819   __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
   1820   __ Str(x10, MemOperand(x11));
   1821 
   1822   // Reset the stack to the callee saved registers.
   1823   __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
   1824   // Restore the callee-saved registers and return.
   1825   ASSERT(jssp.Is(__ StackPointer()));
   1826   __ Mov(csp, jssp);
   1827   __ SetStackPointer(csp);
   1828   __ PopCalleeSavedRegisters();
   1829   // After this point, we must not modify jssp because it is a callee-saved
   1830   // register which we have just restored.
   1831   __ Ret();
   1832 }
   1833 
   1834 
   1835 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   1836   Label miss;
   1837   Register receiver;
   1838   if (kind() == Code::KEYED_LOAD_IC) {
   1839     // ----------- S t a t e -------------
   1840     //  -- lr    : return address
   1841     //  -- x1    : receiver
   1842     //  -- x0    : key
   1843     // -----------------------------------
   1844     Register key = x0;
   1845     receiver = x1;
   1846     __ Cmp(key, Operand(isolate()->factory()->prototype_string()));
   1847     __ B(ne, &miss);
   1848   } else {
   1849     ASSERT(kind() == Code::LOAD_IC);
   1850     // ----------- S t a t e -------------
   1851     //  -- lr    : return address
   1852     //  -- x2    : name
   1853     //  -- x0    : receiver
   1854     //  -- sp[0] : receiver
   1855     // -----------------------------------
   1856     receiver = x0;
   1857   }
   1858 
   1859   StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
   1860 
   1861   __ Bind(&miss);
   1862   StubCompiler::TailCallBuiltin(masm,
   1863                                 BaseLoadStoreStubCompiler::MissBuiltin(kind()));
   1864 }
   1865 
   1866 
   1867 void InstanceofStub::Generate(MacroAssembler* masm) {
   1868   // Stack on entry:
   1869   // jssp[0]: function.
   1870   // jssp[8]: object.
   1871   //
   1872   // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
   1873   // instanceof.
   1874 
   1875   Register result = x0;
   1876   Register function = right();
   1877   Register object = left();
   1878   Register scratch1 = x6;
   1879   Register scratch2 = x7;
   1880   Register res_true = x8;
   1881   Register res_false = x9;
   1882   // Only used if there was an inline map check site. (See
   1883   // LCodeGen::DoInstanceOfKnownGlobal().)
   1884   Register map_check_site = x4;
   1885   // Delta for the instructions generated between the inline map check and the
   1886   // instruction setting the result.
   1887   const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
   1888 
   1889   Label not_js_object, slow;
   1890 
   1891   if (!HasArgsInRegisters()) {
   1892     __ Pop(function, object);
   1893   }
   1894 
   1895   if (ReturnTrueFalseObject()) {
   1896     __ LoadTrueFalseRoots(res_true, res_false);
   1897   } else {
   1898     // This is counter-intuitive, but correct.
   1899     __ Mov(res_true, Smi::FromInt(0));
   1900     __ Mov(res_false, Smi::FromInt(1));
   1901   }
   1902 
   1903   // Check that the left hand side is a JS object and load its map as a side
   1904   // effect.
   1905   Register map = x12;
   1906   __ JumpIfSmi(object, &not_js_object);
   1907   __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
   1908 
   1909   // If there is a call site cache, don't look in the global cache, but do the
   1910   // real lookup and update the call site cache.
   1911   if (!HasCallSiteInlineCheck()) {
   1912     Label miss;
   1913     __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
   1914     __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
   1915     __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
   1916     __ Ret();
   1917     __ Bind(&miss);
   1918   }
   1919 
   1920   // Get the prototype of the function.
   1921   Register prototype = x13;
   1922   __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
   1923                              MacroAssembler::kMissOnBoundFunction);
   1924 
   1925   // Check that the function prototype is a JS object.
   1926   __ JumpIfSmi(prototype, &slow);
   1927   __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
   1928 
   1929   // Update the global instanceof or call site inlined cache with the current
   1930   // map and function. The cached answer will be set when it is known below.
   1931   if (HasCallSiteInlineCheck()) {
   1932     // Patch the (relocated) inlined map check.
   1933     __ GetRelocatedValueLocation(map_check_site, scratch1);
   1934     // We have a cell, so need another level of dereferencing.
   1935     __ Ldr(scratch1, MemOperand(scratch1));
   1936     __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
   1937   } else {
   1938     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
   1939     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   1940   }
   1941 
   1942   Label return_true, return_result;
   1943   {
   1944     // Loop through the prototype chain looking for the function prototype.
   1945     Register chain_map = x1;
   1946     Register chain_prototype = x14;
   1947     Register null_value = x15;
   1948     Label loop;
   1949     __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
   1950     __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   1951     // Speculatively set a result.
   1952     __ Mov(result, res_false);
   1953 
   1954     __ Bind(&loop);
   1955 
   1956     // If the chain prototype is the object prototype, return true.
   1957     __ Cmp(chain_prototype, prototype);
   1958     __ B(eq, &return_true);
   1959 
   1960     // If the chain prototype is null, we've reached the end of the chain, so
   1961     // return false.
   1962     __ Cmp(chain_prototype, null_value);
   1963     __ B(eq, &return_result);
   1964 
   1965     // Otherwise, load the next prototype in the chain, and loop.
   1966     __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
   1967     __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
   1968     __ B(&loop);
   1969   }
   1970 
   1971   // Return sequence when no arguments are on the stack.
   1972   // We cannot fall through to here.
   1973   __ Bind(&return_true);
   1974   __ Mov(result, res_true);
   1975   __ Bind(&return_result);
   1976   if (HasCallSiteInlineCheck()) {
   1977     ASSERT(ReturnTrueFalseObject());
   1978     __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
   1979     __ GetRelocatedValueLocation(map_check_site, scratch2);
   1980     __ Str(result, MemOperand(scratch2));
   1981   } else {
   1982     __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
   1983   }
   1984   __ Ret();
   1985 
   1986   Label object_not_null, object_not_null_or_smi;
   1987 
   1988   __ Bind(&not_js_object);
   1989   Register object_type = x14;
   1990   //   x0   result        result return register (uninit)
   1991   //   x10  function      pointer to function
   1992   //   x11  object        pointer to object
   1993   //   x14  object_type   type of object (uninit)
   1994 
   1995   // Before null, smi and string checks, check that the rhs is a function.
   1996   // For a non-function rhs, an exception must be thrown.
   1997   __ JumpIfSmi(function, &slow);
   1998   __ JumpIfNotObjectType(
   1999       function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
   2000 
   2001   __ Mov(result, res_false);
   2002 
   2003   // Null is not instance of anything.
   2004   __ Cmp(object_type, Operand(isolate()->factory()->null_value()));
   2005   __ B(ne, &object_not_null);
   2006   __ Ret();
   2007 
   2008   __ Bind(&object_not_null);
   2009   // Smi values are not instances of anything.
   2010   __ JumpIfNotSmi(object, &object_not_null_or_smi);
   2011   __ Ret();
   2012 
   2013   __ Bind(&object_not_null_or_smi);
   2014   // String values are not instances of anything.
   2015   __ IsObjectJSStringType(object, scratch2, &slow);
   2016   __ Ret();
   2017 
   2018   // Slow-case. Tail call builtin.
   2019   __ Bind(&slow);
   2020   {
   2021     FrameScope scope(masm, StackFrame::INTERNAL);
   2022     // Arguments have either been passed into registers or have been previously
   2023     // popped. We need to push them before calling builtin.
   2024     __ Push(object, function);
   2025     __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
   2026   }
   2027   if (ReturnTrueFalseObject()) {
   2028     // Reload true/false because they were clobbered in the builtin call.
   2029     __ LoadTrueFalseRoots(res_true, res_false);
   2030     __ Cmp(result, 0);
   2031     __ Csel(result, res_true, res_false, eq);
   2032   }
   2033   __ Ret();
   2034 }
   2035 
   2036 
   2037 Register InstanceofStub::left() {
   2038   // Object to check (instanceof lhs).
   2039   return x11;
   2040 }
   2041 
   2042 
   2043 Register InstanceofStub::right() {
   2044   // Constructor function (instanceof rhs).
   2045   return x10;
   2046 }
   2047 
   2048 
   2049 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   2050   Register arg_count = x0;
   2051   Register key = x1;
   2052 
   2053   // The displacement is the offset of the last parameter (if any) relative
   2054   // to the frame pointer.
   2055   static const int kDisplacement =
   2056       StandardFrameConstants::kCallerSPOffset - kPointerSize;
   2057 
   2058   // Check that the key is a smi.
   2059   Label slow;
   2060   __ JumpIfNotSmi(key, &slow);
   2061 
   2062   // Check if the calling frame is an arguments adaptor frame.
   2063   Register local_fp = x11;
   2064   Register caller_fp = x11;
   2065   Register caller_ctx = x12;
   2066   Label skip_adaptor;
   2067   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   2068   __ Ldr(caller_ctx, MemOperand(caller_fp,
   2069                                 StandardFrameConstants::kContextOffset));
   2070   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   2071   __ Csel(local_fp, fp, caller_fp, ne);
   2072   __ B(ne, &skip_adaptor);
   2073 
   2074   // Load the actual arguments limit found in the arguments adaptor frame.
   2075   __ Ldr(arg_count, MemOperand(caller_fp,
   2076                                ArgumentsAdaptorFrameConstants::kLengthOffset));
   2077   __ Bind(&skip_adaptor);
   2078 
   2079   // Check index against formal parameters count limit. Use unsigned comparison
   2080   // to get negative check for free: branch if key < 0 or key >= arg_count.
   2081   __ Cmp(key, arg_count);
   2082   __ B(hs, &slow);
   2083 
   2084   // Read the argument from the stack and return it.
   2085   __ Sub(x10, arg_count, key);
   2086   __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
   2087   __ Ldr(x0, MemOperand(x10, kDisplacement));
   2088   __ Ret();
   2089 
   2090   // Slow case: handle non-smi or out-of-bounds access to arguments by calling
   2091   // the runtime system.
   2092   __ Bind(&slow);
   2093   __ Push(key);
   2094   __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
   2095 }
   2096 
   2097 
   2098 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
   2099   // Stack layout on entry.
   2100   //  jssp[0]:  number of parameters (tagged)
   2101   //  jssp[8]:  address of receiver argument
   2102   //  jssp[16]: function
   2103 
   2104   // Check if the calling frame is an arguments adaptor frame.
   2105   Label runtime;
   2106   Register caller_fp = x10;
   2107   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   2108   // Load and untag the context.
   2109   STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
   2110   __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
   2111                          (kSmiShift / kBitsPerByte)));
   2112   __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
   2113   __ B(ne, &runtime);
   2114 
   2115   // Patch the arguments.length and parameters pointer in the current frame.
   2116   __ Ldr(x11, MemOperand(caller_fp,
   2117                          ArgumentsAdaptorFrameConstants::kLengthOffset));
   2118   __ Poke(x11, 0 * kXRegSize);
   2119   __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
   2120   __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
   2121   __ Poke(x10, 1 * kXRegSize);
   2122 
   2123   __ Bind(&runtime);
   2124   __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
   2125 }
   2126 
   2127 
   2128 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
   2129   // Stack layout on entry.
   2130   //  jssp[0]:  number of parameters (tagged)
   2131   //  jssp[8]:  address of receiver argument
   2132   //  jssp[16]: function
   2133   //
   2134   // Returns pointer to result object in x0.
   2135 
   2136   // Note: arg_count_smi is an alias of param_count_smi.
   2137   Register arg_count_smi = x3;
   2138   Register param_count_smi = x3;
   2139   Register param_count = x7;
   2140   Register recv_arg = x14;
   2141   Register function = x4;
   2142   __ Pop(param_count_smi, recv_arg, function);
   2143   __ SmiUntag(param_count, param_count_smi);
   2144 
   2145   // Check if the calling frame is an arguments adaptor frame.
   2146   Register caller_fp = x11;
   2147   Register caller_ctx = x12;
   2148   Label runtime;
   2149   Label adaptor_frame, try_allocate;
   2150   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   2151   __ Ldr(caller_ctx, MemOperand(caller_fp,
   2152                                 StandardFrameConstants::kContextOffset));
   2153   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   2154   __ B(eq, &adaptor_frame);
   2155 
   2156   // No adaptor, parameter count = argument count.
   2157 
   2158   //   x1   mapped_params number of mapped params, min(params, args) (uninit)
   2159   //   x2   arg_count     number of function arguments (uninit)
   2160   //   x3   arg_count_smi number of function arguments (smi)
   2161   //   x4   function      function pointer
   2162   //   x7   param_count   number of function parameters
   2163   //   x11  caller_fp     caller's frame pointer
   2164   //   x14  recv_arg      pointer to receiver arguments
   2165 
   2166   Register arg_count = x2;
   2167   __ Mov(arg_count, param_count);
   2168   __ B(&try_allocate);
   2169 
   2170   // We have an adaptor frame. Patch the parameters pointer.
   2171   __ Bind(&adaptor_frame);
   2172   __ Ldr(arg_count_smi,
   2173          MemOperand(caller_fp,
   2174                     ArgumentsAdaptorFrameConstants::kLengthOffset));
   2175   __ SmiUntag(arg_count, arg_count_smi);
   2176   __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
   2177   __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
   2178 
   2179   // Compute the mapped parameter count = min(param_count, arg_count)
   2180   Register mapped_params = x1;
   2181   __ Cmp(param_count, arg_count);
   2182   __ Csel(mapped_params, param_count, arg_count, lt);
   2183 
   2184   __ Bind(&try_allocate);
   2185 
   2186   //   x0   alloc_obj     pointer to allocated objects: param map, backing
   2187   //                      store, arguments (uninit)
   2188   //   x1   mapped_params number of mapped parameters, min(params, args)
   2189   //   x2   arg_count     number of function arguments
   2190   //   x3   arg_count_smi number of function arguments (smi)
   2191   //   x4   function      function pointer
   2192   //   x7   param_count   number of function parameters
   2193   //   x10  size          size of objects to allocate (uninit)
   2194   //   x14  recv_arg      pointer to receiver arguments
   2195 
   2196   // Compute the size of backing store, parameter map, and arguments object.
   2197   // 1. Parameter map, has two extra words containing context and backing
   2198   // store.
   2199   const int kParameterMapHeaderSize =
   2200       FixedArray::kHeaderSize + 2 * kPointerSize;
   2201 
   2202   // Calculate the parameter map size, assuming it exists.
   2203   Register size = x10;
   2204   __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
   2205   __ Add(size, size, kParameterMapHeaderSize);
   2206 
   2207   // If there are no mapped parameters, set the running size total to zero.
   2208   // Otherwise, use the parameter map size calculated earlier.
   2209   __ Cmp(mapped_params, 0);
   2210   __ CzeroX(size, eq);
   2211 
   2212   // 2. Add the size of the backing store and arguments object.
   2213   __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
   2214   __ Add(size, size,
   2215          FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
   2216 
   2217   // Do the allocation of all three objects in one go. Assign this to x0, as it
   2218   // will be returned to the caller.
   2219   Register alloc_obj = x0;
   2220   __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
   2221 
   2222   // Get the arguments boilerplate from the current (global) context.
   2223 
   2224   //   x0   alloc_obj     pointer to allocated objects (param map, backing
   2225   //                      store, arguments)
   2226   //   x1   mapped_params number of mapped parameters, min(params, args)
   2227   //   x2   arg_count     number of function arguments
   2228   //   x3   arg_count_smi number of function arguments (smi)
   2229   //   x4   function      function pointer
   2230   //   x7   param_count   number of function parameters
   2231   //   x11  args_offset   offset to args (or aliased args) boilerplate (uninit)
   2232   //   x14  recv_arg      pointer to receiver arguments
   2233 
   2234   Register global_object = x10;
   2235   Register global_ctx = x10;
   2236   Register args_offset = x11;
   2237   Register aliased_args_offset = x10;
   2238   __ Ldr(global_object, GlobalObjectMemOperand());
   2239   __ Ldr(global_ctx, FieldMemOperand(global_object,
   2240                                      GlobalObject::kNativeContextOffset));
   2241 
   2242   __ Ldr(args_offset,
   2243          ContextMemOperand(global_ctx,
   2244                            Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
   2245   __ Ldr(aliased_args_offset,
   2246          ContextMemOperand(global_ctx,
   2247                            Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
   2248   __ Cmp(mapped_params, 0);
   2249   __ CmovX(args_offset, aliased_args_offset, ne);
   2250 
   2251   // Copy the JS object part.
   2252   __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
   2253                 JSObject::kHeaderSize / kPointerSize);
   2254 
   2255   // Set up the callee in-object property.
   2256   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   2257   const int kCalleeOffset = JSObject::kHeaderSize +
   2258                             Heap::kArgumentsCalleeIndex * kPointerSize;
   2259   __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
   2260 
   2261   // Use the length and set that as an in-object property.
   2262   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   2263   const int kLengthOffset = JSObject::kHeaderSize +
   2264                             Heap::kArgumentsLengthIndex * kPointerSize;
   2265   __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
   2266 
   2267   // Set up the elements pointer in the allocated arguments object.
   2268   // If we allocated a parameter map, "elements" will point there, otherwise
   2269   // it will point to the backing store.
   2270 
   2271   //   x0   alloc_obj     pointer to allocated objects (param map, backing
   2272   //                      store, arguments)
   2273   //   x1   mapped_params number of mapped parameters, min(params, args)
   2274   //   x2   arg_count     number of function arguments
   2275   //   x3   arg_count_smi number of function arguments (smi)
   2276   //   x4   function      function pointer
   2277   //   x5   elements      pointer to parameter map or backing store (uninit)
   2278   //   x6   backing_store pointer to backing store (uninit)
   2279   //   x7   param_count   number of function parameters
   2280   //   x14  recv_arg      pointer to receiver arguments
   2281 
   2282   Register elements = x5;
   2283   __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
   2284   __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
   2285 
   2286   // Initialize parameter map. If there are no mapped arguments, we're done.
   2287   Label skip_parameter_map;
   2288   __ Cmp(mapped_params, 0);
   2289   // Set up backing store address, because it is needed later for filling in
   2290   // the unmapped arguments.
   2291   Register backing_store = x6;
   2292   __ CmovX(backing_store, elements, eq);
   2293   __ B(eq, &skip_parameter_map);
   2294 
   2295   __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
   2296   __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
   2297   __ Add(x10, mapped_params, 2);
   2298   __ SmiTag(x10);
   2299   __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
   2300   __ Str(cp, FieldMemOperand(elements,
   2301                              FixedArray::kHeaderSize + 0 * kPointerSize));
   2302   __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
   2303   __ Add(x10, x10, kParameterMapHeaderSize);
   2304   __ Str(x10, FieldMemOperand(elements,
   2305                               FixedArray::kHeaderSize + 1 * kPointerSize));
   2306 
   2307   // Copy the parameter slots and the holes in the arguments.
   2308   // We need to fill in mapped_parameter_count slots. Then index the context,
   2309   // where parameters are stored in reverse order, at:
   2310   //
   2311   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
   2312   //
   2313   // The mapped parameter thus needs to get indices:
   2314   //
   2315   //   MIN_CONTEXT_SLOTS + parameter_count - 1 ..
   2316   //     MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
   2317   //
   2318   // We loop from right to left.
   2319 
   2320   //   x0   alloc_obj     pointer to allocated objects (param map, backing
   2321   //                      store, arguments)
   2322   //   x1   mapped_params number of mapped parameters, min(params, args)
   2323   //   x2   arg_count     number of function arguments
   2324   //   x3   arg_count_smi number of function arguments (smi)
   2325   //   x4   function      function pointer
   2326   //   x5   elements      pointer to parameter map or backing store (uninit)
   2327   //   x6   backing_store pointer to backing store (uninit)
   2328   //   x7   param_count   number of function parameters
   2329   //   x11  loop_count    parameter loop counter (uninit)
   2330   //   x12  index         parameter index (smi, uninit)
   2331   //   x13  the_hole      hole value (uninit)
   2332   //   x14  recv_arg      pointer to receiver arguments
   2333 
   2334   Register loop_count = x11;
   2335   Register index = x12;
   2336   Register the_hole = x13;
   2337   Label parameters_loop, parameters_test;
   2338   __ Mov(loop_count, mapped_params);
   2339   __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
   2340   __ Sub(index, index, mapped_params);
   2341   __ SmiTag(index);
   2342   __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
   2343   __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
   2344   __ Add(backing_store, backing_store, kParameterMapHeaderSize);
   2345 
   2346   __ B(&parameters_test);
   2347 
   2348   __ Bind(&parameters_loop);
   2349   __ Sub(loop_count, loop_count, 1);
   2350   __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
   2351   __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
   2352   __ Str(index, MemOperand(elements, x10));
   2353   __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
   2354   __ Str(the_hole, MemOperand(backing_store, x10));
   2355   __ Add(index, index, Smi::FromInt(1));
   2356   __ Bind(&parameters_test);
   2357   __ Cbnz(loop_count, &parameters_loop);
   2358 
   2359   __ Bind(&skip_parameter_map);
   2360   // Copy arguments header and remaining slots (if there are any.)
   2361   __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
   2362   __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
   2363   __ Str(arg_count_smi, FieldMemOperand(backing_store,
   2364                                         FixedArray::kLengthOffset));
   2365 
   2366   //   x0   alloc_obj     pointer to allocated objects (param map, backing
   2367   //                      store, arguments)
   2368   //   x1   mapped_params number of mapped parameters, min(params, args)
   2369   //   x2   arg_count     number of function arguments
   2370   //   x4   function      function pointer
   2371   //   x3   arg_count_smi number of function arguments (smi)
   2372   //   x6   backing_store pointer to backing store (uninit)
   2373   //   x14  recv_arg      pointer to receiver arguments
   2374 
   2375   Label arguments_loop, arguments_test;
   2376   __ Mov(x10, mapped_params);
   2377   __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
   2378   __ B(&arguments_test);
   2379 
   2380   __ Bind(&arguments_loop);
   2381   __ Sub(recv_arg, recv_arg, kPointerSize);
   2382   __ Ldr(x11, MemOperand(recv_arg));
   2383   __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
   2384   __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
   2385   __ Add(x10, x10, 1);
   2386 
   2387   __ Bind(&arguments_test);
   2388   __ Cmp(x10, arg_count);
   2389   __ B(lt, &arguments_loop);
   2390 
   2391   __ Ret();
   2392 
   2393   // Do the runtime call to allocate the arguments object.
   2394   __ Bind(&runtime);
   2395   __ Push(function, recv_arg, arg_count_smi);
   2396   __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
   2397 }
   2398 
   2399 
   2400 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
   2401   // Stack layout on entry.
   2402   //  jssp[0]:  number of parameters (tagged)
   2403   //  jssp[8]:  address of receiver argument
   2404   //  jssp[16]: function
   2405   //
   2406   // Returns pointer to result object in x0.
   2407 
   2408   // Get the stub arguments from the frame, and make an untagged copy of the
   2409   // parameter count.
   2410   Register param_count_smi = x1;
   2411   Register params = x2;
   2412   Register function = x3;
   2413   Register param_count = x13;
   2414   __ Pop(param_count_smi, params, function);
   2415   __ SmiUntag(param_count, param_count_smi);
   2416 
   2417   // Test if arguments adaptor needed.
   2418   Register caller_fp = x11;
   2419   Register caller_ctx = x12;
   2420   Label try_allocate, runtime;
   2421   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   2422   __ Ldr(caller_ctx, MemOperand(caller_fp,
   2423                                 StandardFrameConstants::kContextOffset));
   2424   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   2425   __ B(ne, &try_allocate);
   2426 
   2427   //   x1   param_count_smi   number of parameters passed to function (smi)
   2428   //   x2   params            pointer to parameters
   2429   //   x3   function          function pointer
   2430   //   x11  caller_fp         caller's frame pointer
   2431   //   x13  param_count       number of parameters passed to function
   2432 
   2433   // Patch the argument length and parameters pointer.
   2434   __ Ldr(param_count_smi,
   2435          MemOperand(caller_fp,
   2436                     ArgumentsAdaptorFrameConstants::kLengthOffset));
   2437   __ SmiUntag(param_count, param_count_smi);
   2438   __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
   2439   __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
   2440 
   2441   // Try the new space allocation. Start out with computing the size of the
   2442   // arguments object and the elements array in words.
   2443   Register size = x10;
   2444   __ Bind(&try_allocate);
   2445   __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
   2446   __ Cmp(param_count, 0);
   2447   __ CzeroX(size, eq);
   2448   __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
   2449 
   2450   // Do the allocation of both objects in one go. Assign this to x0, as it will
   2451   // be returned to the caller.
   2452   Register alloc_obj = x0;
   2453   __ Allocate(size, alloc_obj, x11, x12, &runtime,
   2454               static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
   2455 
   2456   // Get the arguments boilerplate from the current (native) context.
   2457   Register global_object = x10;
   2458   Register global_ctx = x10;
   2459   Register args_offset = x4;
   2460   __ Ldr(global_object, GlobalObjectMemOperand());
   2461   __ Ldr(global_ctx, FieldMemOperand(global_object,
   2462                                      GlobalObject::kNativeContextOffset));
   2463   __ Ldr(args_offset,
   2464          ContextMemOperand(global_ctx,
   2465                            Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
   2466 
   2467   //   x0   alloc_obj         pointer to allocated objects: parameter array and
   2468   //                          arguments object
   2469   //   x1   param_count_smi   number of parameters passed to function (smi)
   2470   //   x2   params            pointer to parameters
   2471   //   x3   function          function pointer
   2472   //   x4   args_offset       offset to arguments boilerplate
   2473   //   x13  param_count       number of parameters passed to function
   2474 
   2475   // Copy the JS object part.
   2476   __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
   2477                 JSObject::kHeaderSize / kPointerSize);
   2478 
   2479   // Set the smi-tagged length as an in-object property.
   2480   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   2481   const int kLengthOffset = JSObject::kHeaderSize +
   2482                             Heap::kArgumentsLengthIndex * kPointerSize;
   2483   __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
   2484 
   2485   // If there are no actual arguments, we're done.
   2486   Label done;
   2487   __ Cbz(param_count, &done);
   2488 
   2489   // Set up the elements pointer in the allocated arguments object and
   2490   // initialize the header in the elements fixed array.
   2491   Register elements = x5;
   2492   __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
   2493   __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
   2494   __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
   2495   __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
   2496   __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
   2497 
   2498   //   x0   alloc_obj         pointer to allocated objects: parameter array and
   2499   //                          arguments object
   2500   //   x1   param_count_smi   number of parameters passed to function (smi)
   2501   //   x2   params            pointer to parameters
   2502   //   x3   function          function pointer
   2503   //   x4   array             pointer to array slot (uninit)
   2504   //   x5   elements          pointer to elements array of alloc_obj
   2505   //   x13  param_count       number of parameters passed to function
   2506 
   2507   // Copy the fixed array slots.
   2508   Label loop;
   2509   Register array = x4;
   2510   // Set up pointer to first array slot.
   2511   __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
   2512 
   2513   __ Bind(&loop);
   2514   // Pre-decrement the parameters pointer by kPointerSize on each iteration.
   2515   // Pre-decrement in order to skip receiver.
   2516   __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
   2517   // Post-increment elements by kPointerSize on each iteration.
   2518   __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
   2519   __ Sub(param_count, param_count, 1);
   2520   __ Cbnz(param_count, &loop);
   2521 
   2522   // Return from stub.
   2523   __ Bind(&done);
   2524   __ Ret();
   2525 
   2526   // Do the runtime call to allocate the arguments object.
   2527   __ Bind(&runtime);
   2528   __ Push(function, params, param_count_smi);
   2529   __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
   2530 }
   2531 
   2532 
   2533 void RegExpExecStub::Generate(MacroAssembler* masm) {
   2534 #ifdef V8_INTERPRETED_REGEXP
   2535   __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
   2536 #else  // V8_INTERPRETED_REGEXP
   2537 
   2538   // Stack frame on entry.
   2539   //  jssp[0]: last_match_info (expected JSArray)
   2540   //  jssp[8]: previous index
   2541   //  jssp[16]: subject string
   2542   //  jssp[24]: JSRegExp object
   2543   Label runtime;
   2544 
   2545   // Use of registers for this function.
   2546 
   2547   // Variable registers:
   2548   //   x10-x13                                  used as scratch registers
   2549   //   w0       string_type                     type of subject string
   2550   //   x2       jsstring_length                 subject string length
   2551   //   x3       jsregexp_object                 JSRegExp object
   2552   //   w4       string_encoding                 ASCII or UC16
   2553   //   w5       sliced_string_offset            if the string is a SlicedString
   2554   //                                            offset to the underlying string
   2555   //   w6       string_representation           groups attributes of the string:
   2556   //                                              - is a string
   2557   //                                              - type of the string
   2558   //                                              - is a short external string
   2559   Register string_type = w0;
   2560   Register jsstring_length = x2;
   2561   Register jsregexp_object = x3;
   2562   Register string_encoding = w4;
   2563   Register sliced_string_offset = w5;
   2564   Register string_representation = w6;
   2565 
   2566   // These are in callee save registers and will be preserved by the call
   2567   // to the native RegExp code, as this code is called using the normal
   2568   // C calling convention. When calling directly from generated code the
   2569   // native RegExp code will not do a GC and therefore the content of
   2570   // these registers are safe to use after the call.
   2571 
   2572   //   x19       subject                        subject string
   2573   //   x20       regexp_data                    RegExp data (FixedArray)
   2574   //   x21       last_match_info_elements       info relative to the last match
   2575   //                                            (FixedArray)
   2576   //   x22       code_object                    generated regexp code
   2577   Register subject = x19;
   2578   Register regexp_data = x20;
   2579   Register last_match_info_elements = x21;
   2580   Register code_object = x22;
   2581 
   2582   // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
   2583   CPURegList used_callee_saved_registers(subject,
   2584                                          regexp_data,
   2585                                          last_match_info_elements,
   2586                                          code_object);
   2587   __ PushCPURegList(used_callee_saved_registers);
   2588 
   2589   // Stack frame.
   2590   //  jssp[0] : x19
   2591   //  jssp[8] : x20
   2592   //  jssp[16]: x21
   2593   //  jssp[24]: x22
   2594   //  jssp[32]: last_match_info (JSArray)
   2595   //  jssp[40]: previous index
   2596   //  jssp[48]: subject string
   2597   //  jssp[56]: JSRegExp object
   2598 
   2599   const int kLastMatchInfoOffset = 4 * kPointerSize;
   2600   const int kPreviousIndexOffset = 5 * kPointerSize;
   2601   const int kSubjectOffset = 6 * kPointerSize;
   2602   const int kJSRegExpOffset = 7 * kPointerSize;
   2603 
   2604   // Ensure that a RegExp stack is allocated.
   2605   ExternalReference address_of_regexp_stack_memory_address =
   2606       ExternalReference::address_of_regexp_stack_memory_address(isolate());
   2607   ExternalReference address_of_regexp_stack_memory_size =
   2608       ExternalReference::address_of_regexp_stack_memory_size(isolate());
   2609   __ Mov(x10, address_of_regexp_stack_memory_size);
   2610   __ Ldr(x10, MemOperand(x10));
   2611   __ Cbz(x10, &runtime);
   2612 
   2613   // Check that the first argument is a JSRegExp object.
   2614   ASSERT(jssp.Is(__ StackPointer()));
   2615   __ Peek(jsregexp_object, kJSRegExpOffset);
   2616   __ JumpIfSmi(jsregexp_object, &runtime);
   2617   __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
   2618 
   2619   // Check that the RegExp has been compiled (data contains a fixed array).
   2620   __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
   2621   if (FLAG_debug_code) {
   2622     STATIC_ASSERT(kSmiTag == 0);
   2623     __ Tst(regexp_data, kSmiTagMask);
   2624     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
   2625     __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
   2626     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
   2627   }
   2628 
   2629   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   2630   __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
   2631   __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
   2632   __ B(ne, &runtime);
   2633 
   2634   // Check that the number of captures fit in the static offsets vector buffer.
   2635   // We have always at least one capture for the whole match, plus additional
   2636   // ones due to capturing parentheses. A capture takes 2 registers.
   2637   // The number of capture registers then is (number_of_captures + 1) * 2.
   2638   __ Ldrsw(x10,
   2639            UntagSmiFieldMemOperand(regexp_data,
   2640                                    JSRegExp::kIrregexpCaptureCountOffset));
   2641   // Check (number_of_captures + 1) * 2 <= offsets vector size
   2642   //             number_of_captures * 2 <= offsets vector size - 2
   2643   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
   2644   __ Add(x10, x10, x10);
   2645   __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
   2646   __ B(hi, &runtime);
   2647 
   2648   // Initialize offset for possibly sliced string.
   2649   __ Mov(sliced_string_offset, 0);
   2650 
   2651   ASSERT(jssp.Is(__ StackPointer()));
   2652   __ Peek(subject, kSubjectOffset);
   2653   __ JumpIfSmi(subject, &runtime);
   2654 
   2655   __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
   2656   __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
   2657 
   2658   __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
   2659 
   2660   // Handle subject string according to its encoding and representation:
   2661   // (1) Sequential string?  If yes, go to (5).
   2662   // (2) Anything but sequential or cons?  If yes, go to (6).
   2663   // (3) Cons string.  If the string is flat, replace subject with first string.
   2664   //     Otherwise bailout.
   2665   // (4) Is subject external?  If yes, go to (7).
   2666   // (5) Sequential string.  Load regexp code according to encoding.
   2667   // (E) Carry on.
   2668   /// [...]
   2669 
   2670   // Deferred code at the end of the stub:
   2671   // (6) Not a long external string?  If yes, go to (8).
   2672   // (7) External string.  Make it, offset-wise, look like a sequential string.
   2673   //     Go to (5).
   2674   // (8) Short external string or not a string?  If yes, bail out to runtime.
   2675   // (9) Sliced string.  Replace subject with parent.  Go to (4).
   2676 
   2677   Label check_underlying;   // (4)
   2678   Label seq_string;         // (5)
   2679   Label not_seq_nor_cons;   // (6)
   2680   Label external_string;    // (7)
   2681   Label not_long_external;  // (8)
   2682 
   2683   // (1) Sequential string?  If yes, go to (5).
   2684   __ And(string_representation,
   2685          string_type,
   2686          kIsNotStringMask |
   2687              kStringRepresentationMask |
   2688              kShortExternalStringMask);
   2689   // We depend on the fact that Strings of type
   2690   // SeqString and not ShortExternalString are defined
   2691   // by the following pattern:
   2692   //   string_type: 0XX0 XX00
   2693   //                ^  ^   ^^
   2694   //                |  |   ||
   2695   //                |  |   is a SeqString
   2696   //                |  is not a short external String
   2697   //                is a String
   2698   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   2699   STATIC_ASSERT(kShortExternalStringTag != 0);
   2700   __ Cbz(string_representation, &seq_string);  // Go to (5).
   2701 
   2702   // (2) Anything but sequential or cons?  If yes, go to (6).
   2703   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   2704   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   2705   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   2706   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   2707   __ Cmp(string_representation, kExternalStringTag);
   2708   __ B(ge, &not_seq_nor_cons);  // Go to (6).
   2709 
   2710   // (3) Cons string.  Check that it's flat.
   2711   __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
   2712   __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
   2713   // Replace subject with first string.
   2714   __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   2715 
   2716   // (4) Is subject external?  If yes, go to (7).
   2717   __ Bind(&check_underlying);
   2718   // Reload the string type.
   2719   __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
   2720   __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
   2721   STATIC_ASSERT(kSeqStringTag == 0);
   2722   // The underlying external string is never a short external string.
   2723   STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
   2724   STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
   2725   __ TestAndBranchIfAnySet(string_type.X(),
   2726                            kStringRepresentationMask,
   2727                            &external_string);  // Go to (7).
   2728 
   2729   // (5) Sequential string.  Load regexp code according to encoding.
   2730   __ Bind(&seq_string);
   2731 
   2732   // Check that the third argument is a positive smi less than the subject
   2733   // string length. A negative value will be greater (unsigned comparison).
   2734   ASSERT(jssp.Is(__ StackPointer()));
   2735   __ Peek(x10, kPreviousIndexOffset);
   2736   __ JumpIfNotSmi(x10, &runtime);
   2737   __ Cmp(jsstring_length, x10);
   2738   __ B(ls, &runtime);
   2739 
   2740   // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
   2741   // before entering the exit frame.
   2742   __ SmiUntag(x1, x10);
   2743 
   2744   // The third bit determines the string encoding in string_type.
   2745   STATIC_ASSERT(kOneByteStringTag == 0x04);
   2746   STATIC_ASSERT(kTwoByteStringTag == 0x00);
   2747   STATIC_ASSERT(kStringEncodingMask == 0x04);
   2748 
   2749   // Find the code object based on the assumptions above.
   2750   // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
   2751   // of kPointerSize to reach the latter.
   2752   ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
   2753             JSRegExp::kDataUC16CodeOffset);
   2754   __ Mov(x10, kPointerSize);
   2755   // We will need the encoding later: ASCII = 0x04
   2756   //                                  UC16  = 0x00
   2757   __ Ands(string_encoding, string_type, kStringEncodingMask);
   2758   __ CzeroX(x10, ne);
   2759   __ Add(x10, regexp_data, x10);
   2760   __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
   2761 
   2762   // (E) Carry on.  String handling is done.
   2763 
   2764   // Check that the irregexp code has been generated for the actual string
   2765   // encoding. If it has, the field contains a code object otherwise it contains
   2766   // a smi (code flushing support).
   2767   __ JumpIfSmi(code_object, &runtime);
   2768 
   2769   // All checks done. Now push arguments for native regexp code.
   2770   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
   2771                       x10,
   2772                       x11);
   2773 
   2774   // Isolates: note we add an additional parameter here (isolate pointer).
   2775   __ EnterExitFrame(false, x10, 1);
   2776   ASSERT(csp.Is(__ StackPointer()));
   2777 
   2778   // We have 9 arguments to pass to the regexp code, therefore we have to pass
   2779   // one on the stack and the rest as registers.
   2780 
   2781   // Note that the placement of the argument on the stack isn't standard
   2782   // AAPCS64:
   2783   // csp[0]: Space for the return address placed by DirectCEntryStub.
   2784   // csp[8]: Argument 9, the current isolate address.
   2785 
   2786   __ Mov(x10, ExternalReference::isolate_address(isolate()));
   2787   __ Poke(x10, kPointerSize);
   2788 
   2789   Register length = w11;
   2790   Register previous_index_in_bytes = w12;
   2791   Register start = x13;
   2792 
   2793   // Load start of the subject string.
   2794   __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
   2795   // Load the length from the original subject string from the previous stack
   2796   // frame. Therefore we have to use fp, which points exactly to two pointer
   2797   // sizes below the previous sp. (Because creating a new stack frame pushes
   2798   // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
   2799   __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
   2800   __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
   2801 
   2802   // Handle UC16 encoding, two bytes make one character.
   2803   //   string_encoding: if ASCII: 0x04
   2804   //                    if UC16:  0x00
   2805   STATIC_ASSERT(kStringEncodingMask == 0x04);
   2806   __ Ubfx(string_encoding, string_encoding, 2, 1);
   2807   __ Eor(string_encoding, string_encoding, 1);
   2808   //   string_encoding: if ASCII: 0
   2809   //                    if UC16:  1
   2810 
   2811   // Convert string positions from characters to bytes.
   2812   // Previous index is in x1.
   2813   __ Lsl(previous_index_in_bytes, w1, string_encoding);
   2814   __ Lsl(length, length, string_encoding);
   2815   __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
   2816 
   2817   // Argument 1 (x0): Subject string.
   2818   __ Mov(x0, subject);
   2819 
   2820   // Argument 2 (x1): Previous index, already there.
   2821 
   2822   // Argument 3 (x2): Get the start of input.
   2823   // Start of input = start of string + previous index + substring offset
   2824   //                                                     (0 if the string
   2825   //                                                      is not sliced).
   2826   __ Add(w10, previous_index_in_bytes, sliced_string_offset);
   2827   __ Add(x2, start, Operand(w10, UXTW));
   2828 
   2829   // Argument 4 (x3):
   2830   // End of input = start of input + (length of input - previous index)
   2831   __ Sub(w10, length, previous_index_in_bytes);
   2832   __ Add(x3, x2, Operand(w10, UXTW));
   2833 
   2834   // Argument 5 (x4): static offsets vector buffer.
   2835   __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
   2836 
   2837   // Argument 6 (x5): Set the number of capture registers to zero to force
   2838   // global regexps to behave as non-global. This stub is not used for global
   2839   // regexps.
   2840   __ Mov(x5, 0);
   2841 
   2842   // Argument 7 (x6): Start (high end) of backtracking stack memory area.
   2843   __ Mov(x10, address_of_regexp_stack_memory_address);
   2844   __ Ldr(x10, MemOperand(x10));
   2845   __ Mov(x11, address_of_regexp_stack_memory_size);
   2846   __ Ldr(x11, MemOperand(x11));
   2847   __ Add(x6, x10, x11);
   2848 
   2849   // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
   2850   __ Mov(x7, 1);
   2851 
   2852   // Locate the code entry and call it.
   2853   __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
   2854   DirectCEntryStub stub(isolate());
   2855   stub.GenerateCall(masm, code_object);
   2856 
   2857   __ LeaveExitFrame(false, x10, true);
   2858 
   2859   // The generated regexp code returns an int32 in w0.
   2860   Label failure, exception;
   2861   __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
   2862   __ CompareAndBranch(w0,
   2863                       NativeRegExpMacroAssembler::EXCEPTION,
   2864                       eq,
   2865                       &exception);
   2866   __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
   2867 
   2868   // Success: process the result from the native regexp code.
   2869   Register number_of_capture_registers = x12;
   2870 
   2871   // Calculate number of capture registers (number_of_captures + 1) * 2
   2872   // and store it in the last match info.
   2873   __ Ldrsw(x10,
   2874            UntagSmiFieldMemOperand(regexp_data,
   2875                                    JSRegExp::kIrregexpCaptureCountOffset));
   2876   __ Add(x10, x10, x10);
   2877   __ Add(number_of_capture_registers, x10, 2);
   2878 
   2879   // Check that the fourth object is a JSArray object.
   2880   ASSERT(jssp.Is(__ StackPointer()));
   2881   __ Peek(x10, kLastMatchInfoOffset);
   2882   __ JumpIfSmi(x10, &runtime);
   2883   __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
   2884 
   2885   // Check that the JSArray is the fast case.
   2886   __ Ldr(last_match_info_elements,
   2887          FieldMemOperand(x10, JSArray::kElementsOffset));
   2888   __ Ldr(x10,
   2889          FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   2890   __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
   2891 
   2892   // Check that the last match info has space for the capture registers and the
   2893   // additional information (overhead).
   2894   //     (number_of_captures + 1) * 2 + overhead <= last match info size
   2895   //     (number_of_captures * 2) + 2 + overhead <= last match info size
   2896   //      number_of_capture_registers + overhead <= last match info size
   2897   __ Ldrsw(x10,
   2898            UntagSmiFieldMemOperand(last_match_info_elements,
   2899                                    FixedArray::kLengthOffset));
   2900   __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
   2901   __ Cmp(x11, x10);
   2902   __ B(gt, &runtime);
   2903 
   2904   // Store the capture count.
   2905   __ SmiTag(x10, number_of_capture_registers);
   2906   __ Str(x10,
   2907          FieldMemOperand(last_match_info_elements,
   2908                          RegExpImpl::kLastCaptureCountOffset));
   2909   // Store last subject and last input.
   2910   __ Str(subject,
   2911          FieldMemOperand(last_match_info_elements,
   2912                          RegExpImpl::kLastSubjectOffset));
   2913   // Use x10 as the subject string in order to only need
   2914   // one RecordWriteStub.
   2915   __ Mov(x10, subject);
   2916   __ RecordWriteField(last_match_info_elements,
   2917                       RegExpImpl::kLastSubjectOffset,
   2918                       x10,
   2919                       x11,
   2920                       kLRHasNotBeenSaved,
   2921                       kDontSaveFPRegs);
   2922   __ Str(subject,
   2923          FieldMemOperand(last_match_info_elements,
   2924                          RegExpImpl::kLastInputOffset));
   2925   __ Mov(x10, subject);
   2926   __ RecordWriteField(last_match_info_elements,
   2927                       RegExpImpl::kLastInputOffset,
   2928                       x10,
   2929                       x11,
   2930                       kLRHasNotBeenSaved,
   2931                       kDontSaveFPRegs);
   2932 
   2933   Register last_match_offsets = x13;
   2934   Register offsets_vector_index = x14;
   2935   Register current_offset = x15;
   2936 
   2937   // Get the static offsets vector filled by the native regexp code
   2938   // and fill the last match info.
   2939   ExternalReference address_of_static_offsets_vector =
   2940       ExternalReference::address_of_static_offsets_vector(isolate());
   2941   __ Mov(offsets_vector_index, address_of_static_offsets_vector);
   2942 
   2943   Label next_capture, done;
   2944   // Capture register counter starts from number of capture registers and
   2945   // iterates down to zero (inclusive).
   2946   __ Add(last_match_offsets,
   2947          last_match_info_elements,
   2948          RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
   2949   __ Bind(&next_capture);
   2950   __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
   2951   __ B(mi, &done);
   2952   // Read two 32 bit values from the static offsets vector buffer into
   2953   // an X register
   2954   __ Ldr(current_offset,
   2955          MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
   2956   // Store the smi values in the last match info.
   2957   __ SmiTag(x10, current_offset);
   2958   // Clearing the 32 bottom bits gives us a Smi.
   2959   STATIC_ASSERT(kSmiShift == 32);
   2960   __ And(x11, current_offset, ~kWRegMask);
   2961   __ Stp(x10,
   2962          x11,
   2963          MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
   2964   __ B(&next_capture);
   2965   __ Bind(&done);
   2966 
   2967   // Return last match info.
   2968   __ Peek(x0, kLastMatchInfoOffset);
   2969   __ PopCPURegList(used_callee_saved_registers);
   2970   // Drop the 4 arguments of the stub from the stack.
   2971   __ Drop(4);
   2972   __ Ret();
   2973 
   2974   __ Bind(&exception);
   2975   Register exception_value = x0;
   2976   // A stack overflow (on the backtrack stack) may have occured
   2977   // in the RegExp code but no exception has been created yet.
   2978   // If there is no pending exception, handle that in the runtime system.
   2979   __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
   2980   __ Mov(x11,
   2981          Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   2982                                    isolate())));
   2983   __ Ldr(exception_value, MemOperand(x11));
   2984   __ Cmp(x10, exception_value);
   2985   __ B(eq, &runtime);
   2986 
   2987   __ Str(x10, MemOperand(x11));  // Clear pending exception.
   2988 
   2989   // Check if the exception is a termination. If so, throw as uncatchable.
   2990   Label termination_exception;
   2991   __ JumpIfRoot(exception_value,
   2992                 Heap::kTerminationExceptionRootIndex,
   2993                 &termination_exception);
   2994 
   2995   __ Throw(exception_value, x10, x11, x12, x13);
   2996 
   2997   __ Bind(&termination_exception);
   2998   __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
   2999 
   3000   __ Bind(&failure);
   3001   __ Mov(x0, Operand(isolate()->factory()->null_value()));
   3002   __ PopCPURegList(used_callee_saved_registers);
   3003   // Drop the 4 arguments of the stub from the stack.
   3004   __ Drop(4);
   3005   __ Ret();
   3006 
   3007   __ Bind(&runtime);
   3008   __ PopCPURegList(used_callee_saved_registers);
   3009   __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
   3010 
   3011   // Deferred code for string handling.
   3012   // (6) Not a long external string?  If yes, go to (8).
   3013   __ Bind(&not_seq_nor_cons);
   3014   // Compare flags are still set.
   3015   __ B(ne, &not_long_external);  // Go to (8).
   3016 
   3017   // (7) External string. Make it, offset-wise, look like a sequential string.
   3018   __ Bind(&external_string);
   3019   if (masm->emit_debug_code()) {
   3020     // Assert that we do not have a cons or slice (indirect strings) here.
   3021     // Sequential strings have already been ruled out.
   3022     __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
   3023     __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
   3024     __ Tst(x10, kIsIndirectStringMask);
   3025     __ Check(eq, kExternalStringExpectedButNotFound);
   3026     __ And(x10, x10, kStringRepresentationMask);
   3027     __ Cmp(x10, 0);
   3028     __ Check(ne, kExternalStringExpectedButNotFound);
   3029   }
   3030   __ Ldr(subject,
   3031          FieldMemOperand(subject, ExternalString::kResourceDataOffset));
   3032   // Move the pointer so that offset-wise, it looks like a sequential string.
   3033   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   3034   __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
   3035   __ B(&seq_string);    // Go to (5).
   3036 
   3037   // (8) If this is a short external string or not a string, bail out to
   3038   // runtime.
   3039   __ Bind(&not_long_external);
   3040   STATIC_ASSERT(kShortExternalStringTag != 0);
   3041   __ TestAndBranchIfAnySet(string_representation,
   3042                            kShortExternalStringMask | kIsNotStringMask,
   3043                            &runtime);
   3044 
   3045   // (9) Sliced string. Replace subject with parent.
   3046   __ Ldr(sliced_string_offset,
   3047          UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
   3048   __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   3049   __ B(&check_underlying);    // Go to (4).
   3050 #endif
   3051 }
   3052 
   3053 
   3054 static void GenerateRecordCallTarget(MacroAssembler* masm,
   3055                                      Register argc,
   3056                                      Register function,
   3057                                      Register feedback_vector,
   3058                                      Register index,
   3059                                      Register scratch1,
   3060                                      Register scratch2) {
   3061   ASM_LOCATION("GenerateRecordCallTarget");
   3062   ASSERT(!AreAliased(scratch1, scratch2,
   3063                      argc, function, feedback_vector, index));
   3064   // Cache the called function in a feedback vector slot. Cache states are
   3065   // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
   3066   //  argc :            number of arguments to the construct function
   3067   //  function :        the function to call
   3068   //  feedback_vector : the feedback vector
   3069   //  index :           slot in feedback vector (smi)
   3070   Label initialize, done, miss, megamorphic, not_array_function;
   3071 
   3072   ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
   3073             masm->isolate()->heap()->megamorphic_symbol());
   3074   ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
   3075             masm->isolate()->heap()->uninitialized_symbol());
   3076 
   3077   // Load the cache state.
   3078   __ Add(scratch1, feedback_vector,
   3079          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   3080   __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
   3081 
   3082   // A monomorphic cache hit or an already megamorphic state: invoke the
   3083   // function without changing the state.
   3084   __ Cmp(scratch1, function);
   3085   __ B(eq, &done);
   3086 
   3087   if (!FLAG_pretenuring_call_new) {
   3088     // If we came here, we need to see if we are the array function.
   3089     // If we didn't have a matching function, and we didn't find the megamorph
   3090     // sentinel, then we have in the slot either some other function or an
   3091     // AllocationSite. Do a map check on the object in scratch1 register.
   3092     __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
   3093     __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
   3094 
   3095     // Make sure the function is the Array() function
   3096     __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
   3097     __ Cmp(function, scratch1);
   3098     __ B(ne, &megamorphic);
   3099     __ B(&done);
   3100   }
   3101 
   3102   __ Bind(&miss);
   3103 
   3104   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   3105   // megamorphic.
   3106   __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
   3107   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   3108   // write-barrier is needed.
   3109   __ Bind(&megamorphic);
   3110   __ Add(scratch1, feedback_vector,
   3111          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   3112   __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
   3113   __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
   3114   __ B(&done);
   3115 
   3116   // An uninitialized cache is patched with the function or sentinel to
   3117   // indicate the ElementsKind if function is the Array constructor.
   3118   __ Bind(&initialize);
   3119 
   3120   if (!FLAG_pretenuring_call_new) {
   3121     // Make sure the function is the Array() function
   3122     __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
   3123     __ Cmp(function, scratch1);
   3124     __ B(ne, &not_array_function);
   3125 
   3126     // The target function is the Array constructor,
   3127     // Create an AllocationSite if we don't already have it, store it in the
   3128     // slot.
   3129     {
   3130       FrameScope scope(masm, StackFrame::INTERNAL);
   3131       CreateAllocationSiteStub create_stub(masm->isolate());
   3132 
   3133       // Arguments register must be smi-tagged to call out.
   3134       __ SmiTag(argc);
   3135       __ Push(argc, function, feedback_vector, index);
   3136 
   3137       // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
   3138       // index in x3.
   3139       ASSERT(feedback_vector.Is(x2) && index.Is(x3));
   3140       __ CallStub(&create_stub);
   3141 
   3142       __ Pop(index, feedback_vector, function, argc);
   3143       __ SmiUntag(argc);
   3144     }
   3145     __ B(&done);
   3146 
   3147     __ Bind(&not_array_function);
   3148   }
   3149 
   3150   // An uninitialized cache is patched with the function.
   3151 
   3152   __ Add(scratch1, feedback_vector,
   3153          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   3154   __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
   3155   __ Str(function, MemOperand(scratch1, 0));
   3156 
   3157   __ Push(function);
   3158   __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
   3159                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   3160   __ Pop(function);
   3161 
   3162   __ Bind(&done);
   3163 }
   3164 
   3165 
   3166 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
   3167   // Do not transform the receiver for strict mode functions.
   3168   __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   3169   __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
   3170   __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
   3171 
   3172   // Do not transform the receiver for native (Compilerhints already in x3).
   3173   __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
   3174 }
   3175 
   3176 
   3177 static void EmitSlowCase(MacroAssembler* masm,
   3178                          int argc,
   3179                          Register function,
   3180                          Register type,
   3181                          Label* non_function) {
   3182   // Check for function proxy.
   3183   // x10 : function type.
   3184   __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function);
   3185   __ Push(function);  // put proxy as additional argument
   3186   __ Mov(x0, argc + 1);
   3187   __ Mov(x2, 0);
   3188   __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
   3189   {
   3190     Handle<Code> adaptor =
   3191         masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
   3192     __ Jump(adaptor, RelocInfo::CODE_TARGET);
   3193   }
   3194 
   3195   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   3196   // of the original receiver from the call site).
   3197   __ Bind(non_function);
   3198   __ Poke(function, argc * kXRegSize);
   3199   __ Mov(x0, argc);  // Set up the number of arguments.
   3200   __ Mov(x2, 0);
   3201   __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
   3202   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
   3203           RelocInfo::CODE_TARGET);
   3204 }
   3205 
   3206 
   3207 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
   3208   // Wrap the receiver and patch it back onto the stack.
   3209   { FrameScope frame_scope(masm, StackFrame::INTERNAL);
   3210     __ Push(x1, x3);
   3211     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   3212     __ Pop(x1);
   3213   }
   3214   __ Poke(x0, argc * kPointerSize);
   3215   __ B(cont);
   3216 }
   3217 
   3218 
   3219 static void CallFunctionNoFeedback(MacroAssembler* masm,
   3220                                    int argc, bool needs_checks,
   3221                                    bool call_as_method) {
   3222   // x1  function    the function to call
   3223   Register function = x1;
   3224   Register type = x4;
   3225   Label slow, non_function, wrap, cont;
   3226 
   3227   // TODO(jbramley): This function has a lot of unnamed registers. Name them,
   3228   // and tidy things up a bit.
   3229 
   3230   if (needs_checks) {
   3231     // Check that the function is really a JavaScript function.
   3232     __ JumpIfSmi(function, &non_function);
   3233 
   3234     // Goto slow case if we do not have a function.
   3235     __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
   3236   }
   3237 
   3238   // Fast-case: Invoke the function now.
   3239   // x1  function  pushed function
   3240   ParameterCount actual(argc);
   3241 
   3242   if (call_as_method) {
   3243     if (needs_checks) {
   3244       EmitContinueIfStrictOrNative(masm, &cont);
   3245     }
   3246 
   3247     // Compute the receiver in sloppy mode.
   3248     __ Peek(x3, argc * kPointerSize);
   3249 
   3250     if (needs_checks) {
   3251       __ JumpIfSmi(x3, &wrap);
   3252       __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
   3253     } else {
   3254       __ B(&wrap);
   3255     }
   3256 
   3257     __ Bind(&cont);
   3258   }
   3259 
   3260   __ InvokeFunction(function,
   3261                     actual,
   3262                     JUMP_FUNCTION,
   3263                     NullCallWrapper());
   3264   if (needs_checks) {
   3265     // Slow-case: Non-function called.
   3266     __ Bind(&slow);
   3267     EmitSlowCase(masm, argc, function, type, &non_function);
   3268   }
   3269 
   3270   if (call_as_method) {
   3271     __ Bind(&wrap);
   3272     EmitWrapCase(masm, argc, &cont);
   3273   }
   3274 }
   3275 
   3276 
   3277 void CallFunctionStub::Generate(MacroAssembler* masm) {
   3278   ASM_LOCATION("CallFunctionStub::Generate");
   3279   CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
   3280 }
   3281 
   3282 
   3283 void CallConstructStub::Generate(MacroAssembler* masm) {
   3284   ASM_LOCATION("CallConstructStub::Generate");
   3285   // x0 : number of arguments
   3286   // x1 : the function to call
   3287   // x2 : feedback vector
   3288   // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
   3289   Register function = x1;
   3290   Label slow, non_function_call;
   3291 
   3292   // Check that the function is not a smi.
   3293   __ JumpIfSmi(function, &non_function_call);
   3294   // Check that the function is a JSFunction.
   3295   Register object_type = x10;
   3296   __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
   3297                          &slow);
   3298 
   3299   if (RecordCallTarget()) {
   3300     GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
   3301 
   3302     __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
   3303     if (FLAG_pretenuring_call_new) {
   3304       // Put the AllocationSite from the feedback vector into x2.
   3305       // By adding kPointerSize we encode that we know the AllocationSite
   3306       // entry is at the feedback vector slot given by x3 + 1.
   3307       __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
   3308     } else {
   3309     Label feedback_register_initialized;
   3310       // Put the AllocationSite from the feedback vector into x2, or undefined.
   3311       __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
   3312       __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
   3313       __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
   3314                     &feedback_register_initialized);
   3315       __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
   3316       __ bind(&feedback_register_initialized);
   3317     }
   3318 
   3319     __ AssertUndefinedOrAllocationSite(x2, x5);
   3320   }
   3321 
   3322   // Jump to the function-specific construct stub.
   3323   Register jump_reg = x4;
   3324   Register shared_func_info = jump_reg;
   3325   Register cons_stub = jump_reg;
   3326   Register cons_stub_code = jump_reg;
   3327   __ Ldr(shared_func_info,
   3328          FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3329   __ Ldr(cons_stub,
   3330          FieldMemOperand(shared_func_info,
   3331                          SharedFunctionInfo::kConstructStubOffset));
   3332   __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
   3333   __ Br(cons_stub_code);
   3334 
   3335   Label do_call;
   3336   __ Bind(&slow);
   3337   __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
   3338   __ B(ne, &non_function_call);
   3339   __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
   3340   __ B(&do_call);
   3341 
   3342   __ Bind(&non_function_call);
   3343   __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   3344 
   3345   __ Bind(&do_call);
   3346   // Set expected number of arguments to zero (not changing x0).
   3347   __ Mov(x2, 0);
   3348   __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
   3349           RelocInfo::CODE_TARGET);
   3350 }
   3351 
   3352 
   3353 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
   3354   __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3355   __ Ldr(vector, FieldMemOperand(vector,
   3356                                  JSFunction::kSharedFunctionInfoOffset));
   3357   __ Ldr(vector, FieldMemOperand(vector,
   3358                                  SharedFunctionInfo::kFeedbackVectorOffset));
   3359 }
   3360 
   3361 
   3362 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
   3363   // x1 - function
   3364   // x3 - slot id
   3365   Label miss;
   3366   Register function = x1;
   3367   Register feedback_vector = x2;
   3368   Register index = x3;
   3369   Register scratch = x4;
   3370 
   3371   EmitLoadTypeFeedbackVector(masm, feedback_vector);
   3372 
   3373   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
   3374   __ Cmp(function, scratch);
   3375   __ B(ne, &miss);
   3376 
   3377   __ Mov(x0, Operand(arg_count()));
   3378 
   3379   __ Add(scratch, feedback_vector,
   3380          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   3381   __ Ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
   3382 
   3383   // Verify that scratch contains an AllocationSite
   3384   Register map = x5;
   3385   __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
   3386   __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
   3387 
   3388   Register allocation_site = feedback_vector;
   3389   __ Mov(allocation_site, scratch);
   3390   ArrayConstructorStub stub(masm->isolate(), arg_count());
   3391   __ TailCallStub(&stub);
   3392 
   3393   __ bind(&miss);
   3394   GenerateMiss(masm, IC::kCallIC_Customization_Miss);
   3395 
   3396   // The slow case, we need this no matter what to complete a call after a miss.
   3397   CallFunctionNoFeedback(masm,
   3398                          arg_count(),
   3399                          true,
   3400                          CallAsMethod());
   3401 
   3402   __ Unreachable();
   3403 }
   3404 
   3405 
   3406 void CallICStub::Generate(MacroAssembler* masm) {
   3407   ASM_LOCATION("CallICStub");
   3408 
   3409   // x1 - function
   3410   // x3 - slot id (Smi)
   3411   Label extra_checks_or_miss, slow_start;
   3412   Label slow, non_function, wrap, cont;
   3413   Label have_js_function;
   3414   int argc = state_.arg_count();
   3415   ParameterCount actual(argc);
   3416 
   3417   Register function = x1;
   3418   Register feedback_vector = x2;
   3419   Register index = x3;
   3420   Register type = x4;
   3421 
   3422   EmitLoadTypeFeedbackVector(masm, feedback_vector);
   3423 
   3424   // The checks. First, does x1 match the recorded monomorphic target?
   3425   __ Add(x4, feedback_vector,
   3426          Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   3427   __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
   3428 
   3429   __ Cmp(x4, function);
   3430   __ B(ne, &extra_checks_or_miss);
   3431 
   3432   __ bind(&have_js_function);
   3433   if (state_.CallAsMethod()) {
   3434     EmitContinueIfStrictOrNative(masm, &cont);
   3435 
   3436     // Compute the receiver in sloppy mode.
   3437     __ Peek(x3, argc * kPointerSize);
   3438 
   3439     __ JumpIfSmi(x3, &wrap);
   3440     __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
   3441 
   3442     __ Bind(&cont);
   3443   }
   3444 
   3445   __ InvokeFunction(function,
   3446                     actual,
   3447                     JUMP_FUNCTION,
   3448                     NullCallWrapper());
   3449 
   3450   __ bind(&slow);
   3451   EmitSlowCase(masm, argc, function, type, &non_function);
   3452 
   3453   if (state_.CallAsMethod()) {
   3454     __ bind(&wrap);
   3455     EmitWrapCase(masm, argc, &cont);
   3456   }
   3457 
   3458   __ bind(&extra_checks_or_miss);
   3459   Label miss;
   3460 
   3461   __ JumpIfRoot(x4, Heap::kMegamorphicSymbolRootIndex, &slow_start);
   3462   __ JumpIfRoot(x4, Heap::kUninitializedSymbolRootIndex, &miss);
   3463 
   3464   if (!FLAG_trace_ic) {
   3465     // We are going megamorphic. If the feedback is a JSFunction, it is fine
   3466     // to handle it here. More complex cases are dealt with in the runtime.
   3467     __ AssertNotSmi(x4);
   3468     __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
   3469     __ Add(x4, feedback_vector,
   3470            Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   3471     __ LoadRoot(x5, Heap::kMegamorphicSymbolRootIndex);
   3472     __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
   3473     __ B(&slow_start);
   3474   }
   3475 
   3476   // We are here because tracing is on or we are going monomorphic.
   3477   __ bind(&miss);
   3478   GenerateMiss(masm, IC::kCallIC_Miss);
   3479 
   3480   // the slow case
   3481   __ bind(&slow_start);
   3482 
   3483   // Check that the function is really a JavaScript function.
   3484   __ JumpIfSmi(function, &non_function);
   3485 
   3486   // Goto slow case if we do not have a function.
   3487   __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
   3488   __ B(&have_js_function);
   3489 }
   3490 
   3491 
   3492 void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
   3493   ASM_LOCATION("CallICStub[Miss]");
   3494 
   3495   // Get the receiver of the function from the stack; 1 ~ return address.
   3496   __ Peek(x4, (state_.arg_count() + 1) * kPointerSize);
   3497 
   3498   {
   3499     FrameScope scope(masm, StackFrame::INTERNAL);
   3500 
   3501     // Push the receiver and the function and feedback info.
   3502     __ Push(x4, x1, x2, x3);
   3503 
   3504     // Call the entry.
   3505     ExternalReference miss = ExternalReference(IC_Utility(id),
   3506                                                masm->isolate());
   3507     __ CallExternalReference(miss, 4);
   3508 
   3509     // Move result to edi and exit the internal frame.
   3510     __ Mov(x1, x0);
   3511   }
   3512 }
   3513 
   3514 
   3515 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   3516   // If the receiver is a smi trigger the non-string case.
   3517   __ JumpIfSmi(object_, receiver_not_string_);
   3518 
   3519   // Fetch the instance type of the receiver into result register.
   3520   __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   3521   __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   3522 
   3523   // If the receiver is not a string trigger the non-string case.
   3524   __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
   3525 
   3526   // If the index is non-smi trigger the non-smi case.
   3527   __ JumpIfNotSmi(index_, &index_not_smi_);
   3528 
   3529   __ Bind(&got_smi_index_);
   3530   // Check for index out of range.
   3531   __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
   3532   __ Cmp(result_, Operand::UntagSmi(index_));
   3533   __ B(ls, index_out_of_range_);
   3534 
   3535   __ SmiUntag(index_);
   3536 
   3537   StringCharLoadGenerator::Generate(masm,
   3538                                     object_,
   3539                                     index_.W(),
   3540                                     result_,
   3541                                     &call_runtime_);
   3542   __ SmiTag(result_);
   3543   __ Bind(&exit_);
   3544 }
   3545 
   3546 
   3547 void StringCharCodeAtGenerator::GenerateSlow(
   3548     MacroAssembler* masm,
   3549     const RuntimeCallHelper& call_helper) {
   3550   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
   3551 
   3552   __ Bind(&index_not_smi_);
   3553   // If index is a heap number, try converting it to an integer.
   3554   __ CheckMap(index_,
   3555               result_,
   3556               Heap::kHeapNumberMapRootIndex,
   3557               index_not_number_,
   3558               DONT_DO_SMI_CHECK);
   3559   call_helper.BeforeCall(masm);
   3560   // Save object_ on the stack and pass index_ as argument for runtime call.
   3561   __ Push(object_, index_);
   3562   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
   3563     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   3564   } else {
   3565     ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
   3566     // NumberToSmi discards numbers that are not exact integers.
   3567     __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
   3568   }
   3569   // Save the conversion result before the pop instructions below
   3570   // have a chance to overwrite it.
   3571   __ Mov(index_, x0);
   3572   __ Pop(object_);
   3573   // Reload the instance type.
   3574   __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   3575   __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   3576   call_helper.AfterCall(masm);
   3577 
   3578   // If index is still not a smi, it must be out of range.
   3579   __ JumpIfNotSmi(index_, index_out_of_range_);
   3580   // Otherwise, return to the fast path.
   3581   __ B(&got_smi_index_);
   3582 
   3583   // Call runtime. We get here when the receiver is a string and the
   3584   // index is a number, but the code of getting the actual character
   3585   // is too complex (e.g., when the string needs to be flattened).
   3586   __ Bind(&call_runtime_);
   3587   call_helper.BeforeCall(masm);
   3588   __ SmiTag(index_);
   3589   __ Push(object_, index_);
   3590   __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
   3591   __ Mov(result_, x0);
   3592   call_helper.AfterCall(masm);
   3593   __ B(&exit_);
   3594 
   3595   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
   3596 }
   3597 
   3598 
   3599 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   3600   __ JumpIfNotSmi(code_, &slow_case_);
   3601   __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
   3602   __ B(hi, &slow_case_);
   3603 
   3604   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   3605   // At this point code register contains smi tagged ASCII char code.
   3606   STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
   3607   __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
   3608   __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   3609   __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
   3610   __ Bind(&exit_);
   3611 }
   3612 
   3613 
   3614 void StringCharFromCodeGenerator::GenerateSlow(
   3615     MacroAssembler* masm,
   3616     const RuntimeCallHelper& call_helper) {
   3617   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
   3618 
   3619   __ Bind(&slow_case_);
   3620   call_helper.BeforeCall(masm);
   3621   __ Push(code_);
   3622   __ CallRuntime(Runtime::kCharFromCode, 1);
   3623   __ Mov(result_, x0);
   3624   call_helper.AfterCall(masm);
   3625   __ B(&exit_);
   3626 
   3627   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
   3628 }
   3629 
   3630 
   3631 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   3632   // Inputs are in x0 (lhs) and x1 (rhs).
   3633   ASSERT(state_ == CompareIC::SMI);
   3634   ASM_LOCATION("ICCompareStub[Smis]");
   3635   Label miss;
   3636   // Bail out (to 'miss') unless both x0 and x1 are smis.
   3637   __ JumpIfEitherNotSmi(x0, x1, &miss);
   3638 
   3639   if (GetCondition() == eq) {
   3640     // For equality we do not care about the sign of the result.
   3641     __ Sub(x0, x0, x1);
   3642   } else {
   3643     // Untag before subtracting to avoid handling overflow.
   3644     __ SmiUntag(x1);
   3645     __ Sub(x0, x1, Operand::UntagSmi(x0));
   3646   }
   3647   __ Ret();
   3648 
   3649   __ Bind(&miss);
   3650   GenerateMiss(masm);
   3651 }
   3652 
   3653 
   3654 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
   3655   ASSERT(state_ == CompareIC::NUMBER);
   3656   ASM_LOCATION("ICCompareStub[HeapNumbers]");
   3657 
   3658   Label unordered, maybe_undefined1, maybe_undefined2;
   3659   Label miss, handle_lhs, values_in_d_regs;
   3660   Label untag_rhs, untag_lhs;
   3661 
   3662   Register result = x0;
   3663   Register rhs = x0;
   3664   Register lhs = x1;
   3665   FPRegister rhs_d = d0;
   3666   FPRegister lhs_d = d1;
   3667 
   3668   if (left_ == CompareIC::SMI) {
   3669     __ JumpIfNotSmi(lhs, &miss);
   3670   }
   3671   if (right_ == CompareIC::SMI) {
   3672     __ JumpIfNotSmi(rhs, &miss);
   3673   }
   3674 
   3675   __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
   3676   __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
   3677 
   3678   // Load rhs if it's a heap number.
   3679   __ JumpIfSmi(rhs, &handle_lhs);
   3680   __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
   3681               DONT_DO_SMI_CHECK);
   3682   __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
   3683 
   3684   // Load lhs if it's a heap number.
   3685   __ Bind(&handle_lhs);
   3686   __ JumpIfSmi(lhs, &values_in_d_regs);
   3687   __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
   3688               DONT_DO_SMI_CHECK);
   3689   __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
   3690 
   3691   __ Bind(&values_in_d_regs);
   3692   __ Fcmp(lhs_d, rhs_d);
   3693   __ B(vs, &unordered);  // Overflow flag set if either is NaN.
   3694   STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
   3695   __ Cset(result, gt);  // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
   3696   __ Csinv(result, result, xzr, ge);  // lt => -1, gt => 1, eq => 0.
   3697   __ Ret();
   3698 
   3699   __ Bind(&unordered);
   3700   ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
   3701                      CompareIC::GENERIC);
   3702   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   3703 
   3704   __ Bind(&maybe_undefined1);
   3705   if (Token::IsOrderedRelationalCompareOp(op_)) {
   3706     __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
   3707     __ JumpIfSmi(lhs, &unordered);
   3708     __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
   3709     __ B(&unordered);
   3710   }
   3711 
   3712   __ Bind(&maybe_undefined2);
   3713   if (Token::IsOrderedRelationalCompareOp(op_)) {
   3714     __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
   3715   }
   3716 
   3717   __ Bind(&miss);
   3718   GenerateMiss(masm);
   3719 }
   3720 
   3721 
   3722 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
   3723   ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
   3724   ASM_LOCATION("ICCompareStub[InternalizedStrings]");
   3725   Label miss;
   3726 
   3727   Register result = x0;
   3728   Register rhs = x0;
   3729   Register lhs = x1;
   3730 
   3731   // Check that both operands are heap objects.
   3732   __ JumpIfEitherSmi(lhs, rhs, &miss);
   3733 
   3734   // Check that both operands are internalized strings.
   3735   Register rhs_map = x10;
   3736   Register lhs_map = x11;
   3737   Register rhs_type = x10;
   3738   Register lhs_type = x11;
   3739   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
   3740   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
   3741   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
   3742   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
   3743 
   3744   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
   3745   __ Orr(x12, lhs_type, rhs_type);
   3746   __ TestAndBranchIfAnySet(
   3747       x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
   3748 
   3749   // Internalized strings are compared by identity.
   3750   STATIC_ASSERT(EQUAL == 0);
   3751   __ Cmp(lhs, rhs);
   3752   __ Cset(result, ne);
   3753   __ Ret();
   3754 
   3755   __ Bind(&miss);
   3756   GenerateMiss(masm);
   3757 }
   3758 
   3759 
   3760 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
   3761   ASSERT(state_ == CompareIC::UNIQUE_NAME);
   3762   ASM_LOCATION("ICCompareStub[UniqueNames]");
   3763   ASSERT(GetCondition() == eq);
   3764   Label miss;
   3765 
   3766   Register result = x0;
   3767   Register rhs = x0;
   3768   Register lhs = x1;
   3769 
   3770   Register lhs_instance_type = w2;
   3771   Register rhs_instance_type = w3;
   3772 
   3773   // Check that both operands are heap objects.
   3774   __ JumpIfEitherSmi(lhs, rhs, &miss);
   3775 
   3776   // Check that both operands are unique names. This leaves the instance
   3777   // types loaded in tmp1 and tmp2.
   3778   __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
   3779   __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
   3780   __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
   3781   __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
   3782 
   3783   // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
   3784   // should have kInternalizedTag set.
   3785   __ JumpIfNotUniqueName(lhs_instance_type, &miss);
   3786   __ JumpIfNotUniqueName(rhs_instance_type, &miss);
   3787 
   3788   // Unique names are compared by identity.
   3789   STATIC_ASSERT(EQUAL == 0);
   3790   __ Cmp(lhs, rhs);
   3791   __ Cset(result, ne);
   3792   __ Ret();
   3793 
   3794   __ Bind(&miss);
   3795   GenerateMiss(masm);
   3796 }
   3797 
   3798 
   3799 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
   3800   ASSERT(state_ == CompareIC::STRING);
   3801   ASM_LOCATION("ICCompareStub[Strings]");
   3802 
   3803   Label miss;
   3804 
   3805   bool equality = Token::IsEqualityOp(op_);
   3806 
   3807   Register result = x0;
   3808   Register rhs = x0;
   3809   Register lhs = x1;
   3810 
   3811   // Check that both operands are heap objects.
   3812   __ JumpIfEitherSmi(rhs, lhs, &miss);
   3813 
   3814   // Check that both operands are strings.
   3815   Register rhs_map = x10;
   3816   Register lhs_map = x11;
   3817   Register rhs_type = x10;
   3818   Register lhs_type = x11;
   3819   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
   3820   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
   3821   __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
   3822   __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
   3823   STATIC_ASSERT(kNotStringTag != 0);
   3824   __ Orr(x12, lhs_type, rhs_type);
   3825   __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
   3826 
   3827   // Fast check for identical strings.
   3828   Label not_equal;
   3829   __ Cmp(lhs, rhs);
   3830   __ B(ne, &not_equal);
   3831   __ Mov(result, EQUAL);
   3832   __ Ret();
   3833 
   3834   __ Bind(&not_equal);
   3835   // Handle not identical strings
   3836 
   3837   // Check that both strings are internalized strings. If they are, we're done
   3838   // because we already know they are not identical. We know they are both
   3839   // strings.
   3840   if (equality) {
   3841     ASSERT(GetCondition() == eq);
   3842     STATIC_ASSERT(kInternalizedTag == 0);
   3843     Label not_internalized_strings;
   3844     __ Orr(x12, lhs_type, rhs_type);
   3845     __ TestAndBranchIfAnySet(
   3846         x12, kIsNotInternalizedMask, &not_internalized_strings);
   3847     // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
   3848     __ Ret();
   3849     __ Bind(&not_internalized_strings);
   3850   }
   3851 
   3852   // Check that both strings are sequential ASCII.
   3853   Label runtime;
   3854   __ JumpIfBothInstanceTypesAreNotSequentialAscii(
   3855       lhs_type, rhs_type, x12, x13, &runtime);
   3856 
   3857   // Compare flat ASCII strings. Returns when done.
   3858   if (equality) {
   3859     StringCompareStub::GenerateFlatAsciiStringEquals(
   3860         masm, lhs, rhs, x10, x11, x12);
   3861   } else {
   3862     StringCompareStub::GenerateCompareFlatAsciiStrings(
   3863         masm, lhs, rhs, x10, x11, x12, x13);
   3864   }
   3865 
   3866   // Handle more complex cases in runtime.
   3867   __ Bind(&runtime);
   3868   __ Push(lhs, rhs);
   3869   if (equality) {
   3870     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
   3871   } else {
   3872     __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
   3873   }
   3874 
   3875   __ Bind(&miss);
   3876   GenerateMiss(masm);
   3877 }
   3878 
   3879 
   3880 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   3881   ASSERT(state_ == CompareIC::OBJECT);
   3882   ASM_LOCATION("ICCompareStub[Objects]");
   3883 
   3884   Label miss;
   3885 
   3886   Register result = x0;
   3887   Register rhs = x0;
   3888   Register lhs = x1;
   3889 
   3890   __ JumpIfEitherSmi(rhs, lhs, &miss);
   3891 
   3892   __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
   3893   __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
   3894 
   3895   ASSERT(GetCondition() == eq);
   3896   __ Sub(result, rhs, lhs);
   3897   __ Ret();
   3898 
   3899   __ Bind(&miss);
   3900   GenerateMiss(masm);
   3901 }
   3902 
   3903 
   3904 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
   3905   ASM_LOCATION("ICCompareStub[KnownObjects]");
   3906 
   3907   Label miss;
   3908 
   3909   Register result = x0;
   3910   Register rhs = x0;
   3911   Register lhs = x1;
   3912 
   3913   __ JumpIfEitherSmi(rhs, lhs, &miss);
   3914 
   3915   Register rhs_map = x10;
   3916   Register lhs_map = x11;
   3917   __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
   3918   __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
   3919   __ Cmp(rhs_map, Operand(known_map_));
   3920   __ B(ne, &miss);
   3921   __ Cmp(lhs_map, Operand(known_map_));
   3922   __ B(ne, &miss);
   3923 
   3924   __ Sub(result, rhs, lhs);
   3925   __ Ret();
   3926 
   3927   __ Bind(&miss);
   3928   GenerateMiss(masm);
   3929 }
   3930 
   3931 
   3932 // This method handles the case where a compare stub had the wrong
   3933 // implementation. It calls a miss handler, which re-writes the stub. All other
   3934 // ICCompareStub::Generate* methods should fall back into this one if their
   3935 // operands were not the expected types.
   3936 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
   3937   ASM_LOCATION("ICCompareStub[Miss]");
   3938 
   3939   Register stub_entry = x11;
   3940   {
   3941     ExternalReference miss =
   3942       ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
   3943 
   3944     FrameScope scope(masm, StackFrame::INTERNAL);
   3945     Register op = x10;
   3946     Register left = x1;
   3947     Register right = x0;
   3948     // Preserve some caller-saved registers.
   3949     __ Push(x1, x0, lr);
   3950     // Push the arguments.
   3951     __ Mov(op, Smi::FromInt(op_));
   3952     __ Push(left, right, op);
   3953 
   3954     // Call the miss handler. This also pops the arguments.
   3955     __ CallExternalReference(miss, 3);
   3956 
   3957     // Compute the entry point of the rewritten stub.
   3958     __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
   3959     // Restore caller-saved registers.
   3960     __ Pop(lr, x0, x1);
   3961   }
   3962 
   3963   // Tail-call to the new stub.
   3964   __ Jump(stub_entry);
   3965 }
   3966 
   3967 
   3968 void StringHelper::GenerateHashInit(MacroAssembler* masm,
   3969                                     Register hash,
   3970                                     Register character) {
   3971   ASSERT(!AreAliased(hash, character));
   3972 
   3973   // hash = character + (character << 10);
   3974   __ LoadRoot(hash, Heap::kHashSeedRootIndex);
   3975   // Untag smi seed and add the character.
   3976   __ Add(hash, character, Operand(hash, LSR, kSmiShift));
   3977 
   3978   // Compute hashes modulo 2^32 using a 32-bit W register.
   3979   Register hash_w = hash.W();
   3980 
   3981   // hash += hash << 10;
   3982   __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
   3983   // hash ^= hash >> 6;
   3984   __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
   3985 }
   3986 
   3987 
   3988 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
   3989                                             Register hash,
   3990                                             Register character) {
   3991   ASSERT(!AreAliased(hash, character));
   3992 
   3993   // hash += character;
   3994   __ Add(hash, hash, character);
   3995 
   3996   // Compute hashes modulo 2^32 using a 32-bit W register.
   3997   Register hash_w = hash.W();
   3998 
   3999   // hash += hash << 10;
   4000   __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
   4001   // hash ^= hash >> 6;
   4002   __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
   4003 }
   4004 
   4005 
   4006 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
   4007                                        Register hash,
   4008                                        Register scratch) {
   4009   // Compute hashes modulo 2^32 using a 32-bit W register.
   4010   Register hash_w = hash.W();
   4011   Register scratch_w = scratch.W();
   4012   ASSERT(!AreAliased(hash_w, scratch_w));
   4013 
   4014   // hash += hash << 3;
   4015   __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
   4016   // hash ^= hash >> 11;
   4017   __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
   4018   // hash += hash << 15;
   4019   __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
   4020 
   4021   __ Ands(hash_w, hash_w, String::kHashBitMask);
   4022 
   4023   // if (hash == 0) hash = 27;
   4024   __ Mov(scratch_w, StringHasher::kZeroHash);
   4025   __ Csel(hash_w, scratch_w, hash_w, eq);
   4026 }
   4027 
   4028 
   4029 void SubStringStub::Generate(MacroAssembler* masm) {
   4030   ASM_LOCATION("SubStringStub::Generate");
   4031   Label runtime;
   4032 
   4033   // Stack frame on entry.
   4034   //  lr: return address
   4035   //  jssp[0]:  substring "to" offset
   4036   //  jssp[8]:  substring "from" offset
   4037   //  jssp[16]: pointer to string object
   4038 
   4039   // This stub is called from the native-call %_SubString(...), so
   4040   // nothing can be assumed about the arguments. It is tested that:
   4041   //  "string" is a sequential string,
   4042   //  both "from" and "to" are smis, and
   4043   //  0 <= from <= to <= string.length (in debug mode.)
   4044   // If any of these assumptions fail, we call the runtime system.
   4045 
   4046   static const int kToOffset = 0 * kPointerSize;
   4047   static const int kFromOffset = 1 * kPointerSize;
   4048   static const int kStringOffset = 2 * kPointerSize;
   4049 
   4050   Register to = x0;
   4051   Register from = x15;
   4052   Register input_string = x10;
   4053   Register input_length = x11;
   4054   Register input_type = x12;
   4055   Register result_string = x0;
   4056   Register result_length = x1;
   4057   Register temp = x3;
   4058 
   4059   __ Peek(to, kToOffset);
   4060   __ Peek(from, kFromOffset);
   4061 
   4062   // Check that both from and to are smis. If not, jump to runtime.
   4063   __ JumpIfEitherNotSmi(from, to, &runtime);
   4064   __ SmiUntag(from);
   4065   __ SmiUntag(to);
   4066 
   4067   // Calculate difference between from and to. If to < from, branch to runtime.
   4068   __ Subs(result_length, to, from);
   4069   __ B(mi, &runtime);
   4070 
   4071   // Check from is positive.
   4072   __ Tbnz(from, kWSignBit, &runtime);
   4073 
   4074   // Make sure first argument is a string.
   4075   __ Peek(input_string, kStringOffset);
   4076   __ JumpIfSmi(input_string, &runtime);
   4077   __ IsObjectJSStringType(input_string, input_type, &runtime);
   4078 
   4079   Label single_char;
   4080   __ Cmp(result_length, 1);
   4081   __ B(eq, &single_char);
   4082 
   4083   // Short-cut for the case of trivial substring.
   4084   Label return_x0;
   4085   __ Ldrsw(input_length,
   4086            UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
   4087 
   4088   __ Cmp(result_length, input_length);
   4089   __ CmovX(x0, input_string, eq);
   4090   // Return original string.
   4091   __ B(eq, &return_x0);
   4092 
   4093   // Longer than original string's length or negative: unsafe arguments.
   4094   __ B(hi, &runtime);
   4095 
   4096   // Shorter than original string's length: an actual substring.
   4097 
   4098   //   x0   to               substring end character offset
   4099   //   x1   result_length    length of substring result
   4100   //   x10  input_string     pointer to input string object
   4101   //   x10  unpacked_string  pointer to unpacked string object
   4102   //   x11  input_length     length of input string
   4103   //   x12  input_type       instance type of input string
   4104   //   x15  from             substring start character offset
   4105 
   4106   // Deal with different string types: update the index if necessary and put
   4107   // the underlying string into register unpacked_string.
   4108   Label underlying_unpacked, sliced_string, seq_or_external_string;
   4109   Label update_instance_type;
   4110   // If the string is not indirect, it can only be sequential or external.
   4111   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
   4112   STATIC_ASSERT(kIsIndirectStringMask != 0);
   4113 
   4114   // Test for string types, and branch/fall through to appropriate unpacking
   4115   // code.
   4116   __ Tst(input_type, kIsIndirectStringMask);
   4117   __ B(eq, &seq_or_external_string);
   4118   __ Tst(input_type, kSlicedNotConsMask);
   4119   __ B(ne, &sliced_string);
   4120 
   4121   Register unpacked_string = input_string;
   4122 
   4123   // Cons string. Check whether it is flat, then fetch first part.
   4124   __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
   4125   __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
   4126   __ Ldr(unpacked_string,
   4127          FieldMemOperand(input_string, ConsString::kFirstOffset));
   4128   __ B(&update_instance_type);
   4129 
   4130   __ Bind(&sliced_string);
   4131   // Sliced string. Fetch parent and correct start index by offset.
   4132   __ Ldrsw(temp,
   4133            UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
   4134   __ Add(from, from, temp);
   4135   __ Ldr(unpacked_string,
   4136          FieldMemOperand(input_string, SlicedString::kParentOffset));
   4137 
   4138   __ Bind(&update_instance_type);
   4139   __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
   4140   __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
   4141   // Now control must go to &underlying_unpacked. Since the no code is generated
   4142   // before then we fall through instead of generating a useless branch.
   4143 
   4144   __ Bind(&seq_or_external_string);
   4145   // Sequential or external string. Registers unpacked_string and input_string
   4146   // alias, so there's nothing to do here.
   4147   // Note that if code is added here, the above code must be updated.
   4148 
   4149   //   x0   result_string    pointer to result string object (uninit)
   4150   //   x1   result_length    length of substring result
   4151   //   x10  unpacked_string  pointer to unpacked string object
   4152   //   x11  input_length     length of input string
   4153   //   x12  input_type       instance type of input string
   4154   //   x15  from             substring start character offset
   4155   __ Bind(&underlying_unpacked);
   4156 
   4157   if (FLAG_string_slices) {
   4158     Label copy_routine;
   4159     __ Cmp(result_length, SlicedString::kMinLength);
   4160     // Short slice. Copy instead of slicing.
   4161     __ B(lt, &copy_routine);
   4162     // Allocate new sliced string. At this point we do not reload the instance
   4163     // type including the string encoding because we simply rely on the info
   4164     // provided by the original string. It does not matter if the original
   4165     // string's encoding is wrong because we always have to recheck encoding of
   4166     // the newly created string's parent anyway due to externalized strings.
   4167     Label two_byte_slice, set_slice_header;
   4168     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
   4169     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   4170     __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
   4171     __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
   4172                                  &runtime);
   4173     __ B(&set_slice_header);
   4174 
   4175     __ Bind(&two_byte_slice);
   4176     __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
   4177                                    &runtime);
   4178 
   4179     __ Bind(&set_slice_header);
   4180     __ SmiTag(from);
   4181     __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
   4182     __ Str(unpacked_string,
   4183            FieldMemOperand(result_string, SlicedString::kParentOffset));
   4184     __ B(&return_x0);
   4185 
   4186     __ Bind(&copy_routine);
   4187   }
   4188 
   4189   //   x0   result_string    pointer to result string object (uninit)
   4190   //   x1   result_length    length of substring result
   4191   //   x10  unpacked_string  pointer to unpacked string object
   4192   //   x11  input_length     length of input string
   4193   //   x12  input_type       instance type of input string
   4194   //   x13  unpacked_char0   pointer to first char of unpacked string (uninit)
   4195   //   x13  substring_char0  pointer to first char of substring (uninit)
   4196   //   x14  result_char0     pointer to first char of result (uninit)
   4197   //   x15  from             substring start character offset
   4198   Register unpacked_char0 = x13;
   4199   Register substring_char0 = x13;
   4200   Register result_char0 = x14;
   4201   Label two_byte_sequential, sequential_string, allocate_result;
   4202   STATIC_ASSERT(kExternalStringTag != 0);
   4203   STATIC_ASSERT(kSeqStringTag == 0);
   4204 
   4205   __ Tst(input_type, kExternalStringTag);
   4206   __ B(eq, &sequential_string);
   4207 
   4208   __ Tst(input_type, kShortExternalStringTag);
   4209   __ B(ne, &runtime);
   4210   __ Ldr(unpacked_char0,
   4211          FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
   4212   // unpacked_char0 points to the first character of the underlying string.
   4213   __ B(&allocate_result);
   4214 
   4215   __ Bind(&sequential_string);
   4216   // Locate first character of underlying subject string.
   4217   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   4218   __ Add(unpacked_char0, unpacked_string,
   4219          SeqOneByteString::kHeaderSize - kHeapObjectTag);
   4220 
   4221   __ Bind(&allocate_result);
   4222   // Sequential ASCII string. Allocate the result.
   4223   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
   4224   __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
   4225 
   4226   // Allocate and copy the resulting ASCII string.
   4227   __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
   4228 
   4229   // Locate first character of substring to copy.
   4230   __ Add(substring_char0, unpacked_char0, from);
   4231 
   4232   // Locate first character of result.
   4233   __ Add(result_char0, result_string,
   4234          SeqOneByteString::kHeaderSize - kHeapObjectTag);
   4235 
   4236   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   4237   __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
   4238   __ B(&return_x0);
   4239 
   4240   // Allocate and copy the resulting two-byte string.
   4241   __ Bind(&two_byte_sequential);
   4242   __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
   4243 
   4244   // Locate first character of substring to copy.
   4245   __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
   4246 
   4247   // Locate first character of result.
   4248   __ Add(result_char0, result_string,
   4249          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
   4250 
   4251   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   4252   __ Add(result_length, result_length, result_length);
   4253   __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
   4254 
   4255   __ Bind(&return_x0);
   4256   Counters* counters = isolate()->counters();
   4257   __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
   4258   __ Drop(3);
   4259   __ Ret();
   4260 
   4261   __ Bind(&runtime);
   4262   __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
   4263 
   4264   __ bind(&single_char);
   4265   // x1: result_length
   4266   // x10: input_string
   4267   // x12: input_type
   4268   // x15: from (untagged)
   4269   __ SmiTag(from);
   4270   StringCharAtGenerator generator(
   4271       input_string, from, result_length, x0,
   4272       &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
   4273   generator.GenerateFast(masm);
   4274   __ Drop(3);
   4275   __ Ret();
   4276   generator.SkipSlow(masm, &runtime);
   4277 }
   4278 
   4279 
   4280 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
   4281                                                       Register left,
   4282                                                       Register right,
   4283                                                       Register scratch1,
   4284                                                       Register scratch2,
   4285                                                       Register scratch3) {
   4286   ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
   4287   Register result = x0;
   4288   Register left_length = scratch1;
   4289   Register right_length = scratch2;
   4290 
   4291   // Compare lengths. If lengths differ, strings can't be equal. Lengths are
   4292   // smis, and don't need to be untagged.
   4293   Label strings_not_equal, check_zero_length;
   4294   __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
   4295   __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
   4296   __ Cmp(left_length, right_length);
   4297   __ B(eq, &check_zero_length);
   4298 
   4299   __ Bind(&strings_not_equal);
   4300   __ Mov(result, Smi::FromInt(NOT_EQUAL));
   4301   __ Ret();
   4302 
   4303   // Check if the length is zero. If so, the strings must be equal (and empty.)
   4304   Label compare_chars;
   4305   __ Bind(&check_zero_length);
   4306   STATIC_ASSERT(kSmiTag == 0);
   4307   __ Cbnz(left_length, &compare_chars);
   4308   __ Mov(result, Smi::FromInt(EQUAL));
   4309   __ Ret();
   4310 
   4311   // Compare characters. Falls through if all characters are equal.
   4312   __ Bind(&compare_chars);
   4313   GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
   4314                                 scratch3, &strings_not_equal);
   4315 
   4316   // Characters in strings are equal.
   4317   __ Mov(result, Smi::FromInt(EQUAL));
   4318   __ Ret();
   4319 }
   4320 
   4321 
   4322 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
   4323                                                         Register left,
   4324                                                         Register right,
   4325                                                         Register scratch1,
   4326                                                         Register scratch2,
   4327                                                         Register scratch3,
   4328                                                         Register scratch4) {
   4329   ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
   4330   Label result_not_equal, compare_lengths;
   4331 
   4332   // Find minimum length and length difference.
   4333   Register length_delta = scratch3;
   4334   __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
   4335   __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
   4336   __ Subs(length_delta, scratch1, scratch2);
   4337 
   4338   Register min_length = scratch1;
   4339   __ Csel(min_length, scratch2, scratch1, gt);
   4340   __ Cbz(min_length, &compare_lengths);
   4341 
   4342   // Compare loop.
   4343   GenerateAsciiCharsCompareLoop(masm,
   4344                                 left, right, min_length, scratch2, scratch4,
   4345                                 &result_not_equal);
   4346 
   4347   // Compare lengths - strings up to min-length are equal.
   4348   __ Bind(&compare_lengths);
   4349 
   4350   ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   4351 
   4352   // Use length_delta as result if it's zero.
   4353   Register result = x0;
   4354   __ Subs(result, length_delta, 0);
   4355 
   4356   __ Bind(&result_not_equal);
   4357   Register greater = x10;
   4358   Register less = x11;
   4359   __ Mov(greater, Smi::FromInt(GREATER));
   4360   __ Mov(less, Smi::FromInt(LESS));
   4361   __ CmovX(result, greater, gt);
   4362   __ CmovX(result, less, lt);
   4363   __ Ret();
   4364 }
   4365 
   4366 
   4367 void StringCompareStub::GenerateAsciiCharsCompareLoop(
   4368     MacroAssembler* masm,
   4369     Register left,
   4370     Register right,
   4371     Register length,
   4372     Register scratch1,
   4373     Register scratch2,
   4374     Label* chars_not_equal) {
   4375   ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
   4376 
   4377   // Change index to run from -length to -1 by adding length to string
   4378   // start. This means that loop ends when index reaches zero, which
   4379   // doesn't need an additional compare.
   4380   __ SmiUntag(length);
   4381   __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
   4382   __ Add(left, left, scratch1);
   4383   __ Add(right, right, scratch1);
   4384 
   4385   Register index = length;
   4386   __ Neg(index, length);  // index = -length;
   4387 
   4388   // Compare loop
   4389   Label loop;
   4390   __ Bind(&loop);
   4391   __ Ldrb(scratch1, MemOperand(left, index));
   4392   __ Ldrb(scratch2, MemOperand(right, index));
   4393   __ Cmp(scratch1, scratch2);
   4394   __ B(ne, chars_not_equal);
   4395   __ Add(index, index, 1);
   4396   __ Cbnz(index, &loop);
   4397 }
   4398 
   4399 
   4400 void StringCompareStub::Generate(MacroAssembler* masm) {
   4401   Label runtime;
   4402 
   4403   Counters* counters = isolate()->counters();
   4404 
   4405   // Stack frame on entry.
   4406   //  sp[0]: right string
   4407   //  sp[8]: left string
   4408   Register right = x10;
   4409   Register left = x11;
   4410   Register result = x0;
   4411   __ Pop(right, left);
   4412 
   4413   Label not_same;
   4414   __ Subs(result, right, left);
   4415   __ B(ne, &not_same);
   4416   STATIC_ASSERT(EQUAL == 0);
   4417   __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
   4418   __ Ret();
   4419 
   4420   __ Bind(&not_same);
   4421 
   4422   // Check that both objects are sequential ASCII strings.
   4423   __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
   4424 
   4425   // Compare flat ASCII strings natively. Remove arguments from stack first,
   4426   // as this function will generate a return.
   4427   __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
   4428   GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
   4429 
   4430   __ Bind(&runtime);
   4431 
   4432   // Push arguments back on to the stack.
   4433   //  sp[0] = right string
   4434   //  sp[8] = left string.
   4435   __ Push(left, right);
   4436 
   4437   // Call the runtime.
   4438   // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
   4439   __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
   4440 }
   4441 
   4442 
   4443 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   4444   // ----------- S t a t e -------------
   4445   //  -- x1    : left
   4446   //  -- x0    : right
   4447   //  -- lr    : return address
   4448   // -----------------------------------
   4449 
   4450   // Load x2 with the allocation site.  We stick an undefined dummy value here
   4451   // and replace it with the real allocation site later when we instantiate this
   4452   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
   4453   __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
   4454 
   4455   // Make sure that we actually patched the allocation site.
   4456   if (FLAG_debug_code) {
   4457     __ AssertNotSmi(x2, kExpectedAllocationSite);
   4458     __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
   4459     __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
   4460                             kExpectedAllocationSite);
   4461   }
   4462 
   4463   // Tail call into the stub that handles binary operations with allocation
   4464   // sites.
   4465   BinaryOpWithAllocationSiteStub stub(isolate(), state_);
   4466   __ TailCallStub(&stub);
   4467 }
   4468 
   4469 
   4470 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   4471   // We need some extra registers for this stub, they have been allocated
   4472   // but we need to save them before using them.
   4473   regs_.Save(masm);
   4474 
   4475   if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
   4476     Label dont_need_remembered_set;
   4477 
   4478     Register value = regs_.scratch0();
   4479     __ Ldr(value, MemOperand(regs_.address()));
   4480     __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
   4481 
   4482     __ CheckPageFlagSet(regs_.object(),
   4483                         value,
   4484                         1 << MemoryChunk::SCAN_ON_SCAVENGE,
   4485                         &dont_need_remembered_set);
   4486 
   4487     // First notify the incremental marker if necessary, then update the
   4488     // remembered set.
   4489     CheckNeedsToInformIncrementalMarker(
   4490         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
   4491     InformIncrementalMarker(masm);
   4492     regs_.Restore(masm);  // Restore the extra scratch registers we used.
   4493 
   4494     __ RememberedSetHelper(object_,
   4495                            address_,
   4496                            value_,            // scratch1
   4497                            save_fp_regs_mode_,
   4498                            MacroAssembler::kReturnAtEnd);
   4499 
   4500     __ Bind(&dont_need_remembered_set);
   4501   }
   4502 
   4503   CheckNeedsToInformIncrementalMarker(
   4504       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
   4505   InformIncrementalMarker(masm);
   4506   regs_.Restore(masm);  // Restore the extra scratch registers we used.
   4507   __ Ret();
   4508 }
   4509 
   4510 
   4511 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
   4512   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
   4513   Register address =
   4514     x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
   4515   ASSERT(!address.Is(regs_.object()));
   4516   ASSERT(!address.Is(x0));
   4517   __ Mov(address, regs_.address());
   4518   __ Mov(x0, regs_.object());
   4519   __ Mov(x1, address);
   4520   __ Mov(x2, ExternalReference::isolate_address(isolate()));
   4521 
   4522   AllowExternalCallThatCantCauseGC scope(masm);
   4523   ExternalReference function =
   4524       ExternalReference::incremental_marking_record_write_function(
   4525           isolate());
   4526   __ CallCFunction(function, 3, 0);
   4527 
   4528   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
   4529 }
   4530 
   4531 
   4532 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
   4533     MacroAssembler* masm,
   4534     OnNoNeedToInformIncrementalMarker on_no_need,
   4535     Mode mode) {
   4536   Label on_black;
   4537   Label need_incremental;
   4538   Label need_incremental_pop_scratch;
   4539 
   4540   Register mem_chunk = regs_.scratch0();
   4541   Register counter = regs_.scratch1();
   4542   __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
   4543   __ Ldr(counter,
   4544          MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
   4545   __ Subs(counter, counter, 1);
   4546   __ Str(counter,
   4547          MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
   4548   __ B(mi, &need_incremental);
   4549 
   4550   // If the object is not black we don't have to inform the incremental marker.
   4551   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
   4552 
   4553   regs_.Restore(masm);  // Restore the extra scratch registers we used.
   4554   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   4555     __ RememberedSetHelper(object_,
   4556                            address_,
   4557                            value_,            // scratch1
   4558                            save_fp_regs_mode_,
   4559                            MacroAssembler::kReturnAtEnd);
   4560   } else {
   4561     __ Ret();
   4562   }
   4563 
   4564   __ Bind(&on_black);
   4565   // Get the value from the slot.
   4566   Register value = regs_.scratch0();
   4567   __ Ldr(value, MemOperand(regs_.address()));
   4568 
   4569   if (mode == INCREMENTAL_COMPACTION) {
   4570     Label ensure_not_white;
   4571 
   4572     __ CheckPageFlagClear(value,
   4573                           regs_.scratch1(),
   4574                           MemoryChunk::kEvacuationCandidateMask,
   4575                           &ensure_not_white);
   4576 
   4577     __ CheckPageFlagClear(regs_.object(),
   4578                           regs_.scratch1(),
   4579                           MemoryChunk::kSkipEvacuationSlotsRecordingMask,
   4580                           &need_incremental);
   4581 
   4582     __ Bind(&ensure_not_white);
   4583   }
   4584 
   4585   // We need extra registers for this, so we push the object and the address
   4586   // register temporarily.
   4587   __ Push(regs_.address(), regs_.object());
   4588   __ EnsureNotWhite(value,
   4589                     regs_.scratch1(),  // Scratch.
   4590                     regs_.object(),    // Scratch.
   4591                     regs_.address(),   // Scratch.
   4592                     regs_.scratch2(),  // Scratch.
   4593                     &need_incremental_pop_scratch);
   4594   __ Pop(regs_.object(), regs_.address());
   4595 
   4596   regs_.Restore(masm);  // Restore the extra scratch registers we used.
   4597   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   4598     __ RememberedSetHelper(object_,
   4599                            address_,
   4600                            value_,            // scratch1
   4601                            save_fp_regs_mode_,
   4602                            MacroAssembler::kReturnAtEnd);
   4603   } else {
   4604     __ Ret();
   4605   }
   4606 
   4607   __ Bind(&need_incremental_pop_scratch);
   4608   __ Pop(regs_.object(), regs_.address());
   4609 
   4610   __ Bind(&need_incremental);
   4611   // Fall through when we need to inform the incremental marker.
   4612 }
   4613 
   4614 
   4615 void RecordWriteStub::Generate(MacroAssembler* masm) {
   4616   Label skip_to_incremental_noncompacting;
   4617   Label skip_to_incremental_compacting;
   4618 
   4619   // We patch these two first instructions back and forth between a nop and
   4620   // real branch when we start and stop incremental heap marking.
   4621   // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
   4622   // are generated.
   4623   // See RecordWriteStub::Patch for details.
   4624   {
   4625     InstructionAccurateScope scope(masm, 2);
   4626     __ adr(xzr, &skip_to_incremental_noncompacting);
   4627     __ adr(xzr, &skip_to_incremental_compacting);
   4628   }
   4629 
   4630   if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
   4631     __ RememberedSetHelper(object_,
   4632                            address_,
   4633                            value_,            // scratch1
   4634                            save_fp_regs_mode_,
   4635                            MacroAssembler::kReturnAtEnd);
   4636   }
   4637   __ Ret();
   4638 
   4639   __ Bind(&skip_to_incremental_noncompacting);
   4640   GenerateIncremental(masm, INCREMENTAL);
   4641 
   4642   __ Bind(&skip_to_incremental_compacting);
   4643   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
   4644 }
   4645 
   4646 
   4647 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
   4648   // x0     value            element value to store
   4649   // x3     index_smi        element index as smi
   4650   // sp[0]  array_index_smi  array literal index in function as smi
   4651   // sp[1]  array            array literal
   4652 
   4653   Register value = x0;
   4654   Register index_smi = x3;
   4655 
   4656   Register array = x1;
   4657   Register array_map = x2;
   4658   Register array_index_smi = x4;
   4659   __ PeekPair(array_index_smi, array, 0);
   4660   __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
   4661 
   4662   Label double_elements, smi_element, fast_elements, slow_elements;
   4663   Register bitfield2 = x10;
   4664   __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
   4665 
   4666   // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
   4667   // FAST_HOLEY_ELEMENTS.
   4668   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   4669   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   4670   STATIC_ASSERT(FAST_ELEMENTS == 2);
   4671   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   4672   __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
   4673   __ B(hi, &double_elements);
   4674 
   4675   __ JumpIfSmi(value, &smi_element);
   4676 
   4677   // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
   4678   __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
   4679           &fast_elements);
   4680 
   4681   // Store into the array literal requires an elements transition. Call into
   4682   // the runtime.
   4683   __ Bind(&slow_elements);
   4684   __ Push(array, index_smi, value);
   4685   __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   4686   __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
   4687   __ Push(x11, array_index_smi);
   4688   __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
   4689 
   4690   // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
   4691   __ Bind(&fast_elements);
   4692   __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
   4693   __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
   4694   __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
   4695   __ Str(value, MemOperand(x11));
   4696   // Update the write barrier for the array store.
   4697   __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
   4698                  EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   4699   __ Ret();
   4700 
   4701   // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
   4702   // and value is Smi.
   4703   __ Bind(&smi_element);
   4704   __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
   4705   __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
   4706   __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
   4707   __ Ret();
   4708 
   4709   __ Bind(&double_elements);
   4710   __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
   4711   __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
   4712                                  &slow_elements);
   4713   __ Ret();
   4714 }
   4715 
   4716 
   4717 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
   4718   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   4719   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   4720   int parameter_count_offset =
   4721       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
   4722   __ Ldr(x1, MemOperand(fp, parameter_count_offset));
   4723   if (function_mode_ == JS_FUNCTION_STUB_MODE) {
   4724     __ Add(x1, x1, 1);
   4725   }
   4726   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   4727   __ Drop(x1);
   4728   // Return to IC Miss stub, continuation still on stack.
   4729   __ Ret();
   4730 }
   4731 
   4732 
   4733 static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
   4734   // The entry hook is a "BumpSystemStackPointer" instruction (sub),
   4735   // followed by a "Push lr" instruction, followed by a call.
   4736   unsigned int size =
   4737       Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
   4738   if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
   4739     // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
   4740     // "BumpSystemStackPointer".
   4741     size += kInstructionSize;
   4742   }
   4743   return size;
   4744 }
   4745 
   4746 
   4747 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   4748   if (masm->isolate()->function_entry_hook() != NULL) {
   4749     ProfileEntryHookStub stub(masm->isolate());
   4750     Assembler::BlockConstPoolScope no_const_pools(masm);
   4751     DontEmitDebugCodeScope no_debug_code(masm);
   4752     Label entry_hook_call_start;
   4753     __ Bind(&entry_hook_call_start);
   4754     __ Push(lr);
   4755     __ CallStub(&stub);
   4756     ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
   4757            GetProfileEntryHookCallSize(masm));
   4758 
   4759     __ Pop(lr);
   4760   }
   4761 }
   4762 
   4763 
   4764 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   4765   MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
   4766 
   4767   // Save all kCallerSaved registers (including lr), since this can be called
   4768   // from anywhere.
   4769   // TODO(jbramley): What about FP registers?
   4770   __ PushCPURegList(kCallerSaved);
   4771   ASSERT(kCallerSaved.IncludesAliasOf(lr));
   4772   const int kNumSavedRegs = kCallerSaved.Count();
   4773 
   4774   // Compute the function's address as the first argument.
   4775   __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
   4776 
   4777 #if V8_HOST_ARCH_ARM64
   4778   uintptr_t entry_hook =
   4779       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
   4780   __ Mov(x10, entry_hook);
   4781 #else
   4782   // Under the simulator we need to indirect the entry hook through a trampoline
   4783   // function at a known address.
   4784   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
   4785   __ Mov(x10, Operand(ExternalReference(&dispatcher,
   4786                                         ExternalReference::BUILTIN_CALL,
   4787                                         isolate())));
   4788   // It additionally takes an isolate as a third parameter
   4789   __ Mov(x2, ExternalReference::isolate_address(isolate()));
   4790 #endif
   4791 
   4792   // The caller's return address is above the saved temporaries.
   4793   // Grab its location for the second argument to the hook.
   4794   __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
   4795 
   4796   {
   4797     // Create a dummy frame, as CallCFunction requires this.
   4798     FrameScope frame(masm, StackFrame::MANUAL);
   4799     __ CallCFunction(x10, 2, 0);
   4800   }
   4801 
   4802   __ PopCPURegList(kCallerSaved);
   4803   __ Ret();
   4804 }
   4805 
   4806 
   4807 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   4808   // When calling into C++ code the stack pointer must be csp.
   4809   // Therefore this code must use csp for peek/poke operations when the
   4810   // stub is generated. When the stub is called
   4811   // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
   4812   // and configure the stack pointer *before* doing the call.
   4813   const Register old_stack_pointer = __ StackPointer();
   4814   __ SetStackPointer(csp);
   4815 
   4816   // Put return address on the stack (accessible to GC through exit frame pc).
   4817   __ Poke(lr, 0);
   4818   // Call the C++ function.
   4819   __ Blr(x10);
   4820   // Return to calling code.
   4821   __ Peek(lr, 0);
   4822   __ AssertFPCRState();
   4823   __ Ret();
   4824 
   4825   __ SetStackPointer(old_stack_pointer);
   4826 }
   4827 
   4828 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
   4829                                     Register target) {
   4830   // Make sure the caller configured the stack pointer (see comment in
   4831   // DirectCEntryStub::Generate).
   4832   ASSERT(csp.Is(__ StackPointer()));
   4833 
   4834   intptr_t code =
   4835       reinterpret_cast<intptr_t>(GetCode().location());
   4836   __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
   4837   __ Mov(x10, target);
   4838   // Branch to the stub.
   4839   __ Blr(lr);
   4840 }
   4841 
   4842 
   4843 // Probe the name dictionary in the 'elements' register.
   4844 // Jump to the 'done' label if a property with the given name is found.
   4845 // Jump to the 'miss' label otherwise.
   4846 //
   4847 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
   4848 // 'elements' and 'name' registers are preserved on miss.
   4849 void NameDictionaryLookupStub::GeneratePositiveLookup(
   4850     MacroAssembler* masm,
   4851     Label* miss,
   4852     Label* done,
   4853     Register elements,
   4854     Register name,
   4855     Register scratch1,
   4856     Register scratch2) {
   4857   ASSERT(!AreAliased(elements, name, scratch1, scratch2));
   4858 
   4859   // Assert that name contains a string.
   4860   __ AssertName(name);
   4861 
   4862   // Compute the capacity mask.
   4863   __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
   4864   __ Sub(scratch1, scratch1, 1);
   4865 
   4866   // Generate an unrolled loop that performs a few probes before giving up.
   4867   for (int i = 0; i < kInlinedProbes; i++) {
   4868     // Compute the masked index: (hash + i + i * i) & mask.
   4869     __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
   4870     if (i > 0) {
   4871       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   4872       // the hash in a separate instruction. The value hash + i + i * i is right
   4873       // shifted in the following and instruction.
   4874       ASSERT(NameDictionary::GetProbeOffset(i) <
   4875           1 << (32 - Name::kHashFieldOffset));
   4876       __ Add(scratch2, scratch2, Operand(
   4877           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   4878     }
   4879     __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
   4880 
   4881     // Scale the index by multiplying by the element size.
   4882     ASSERT(NameDictionary::kEntrySize == 3);
   4883     __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
   4884 
   4885     // Check if the key is identical to the name.
   4886     UseScratchRegisterScope temps(masm);
   4887     Register scratch3 = temps.AcquireX();
   4888     __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
   4889     __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
   4890     __ Cmp(name, scratch3);
   4891     __ B(eq, done);
   4892   }
   4893 
   4894   // The inlined probes didn't find the entry.
   4895   // Call the complete stub to scan the whole dictionary.
   4896 
   4897   CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
   4898   spill_list.Combine(lr);
   4899   spill_list.Remove(scratch1);
   4900   spill_list.Remove(scratch2);
   4901 
   4902   __ PushCPURegList(spill_list);
   4903 
   4904   if (name.is(x0)) {
   4905     ASSERT(!elements.is(x1));
   4906     __ Mov(x1, name);
   4907     __ Mov(x0, elements);
   4908   } else {
   4909     __ Mov(x0, elements);
   4910     __ Mov(x1, name);
   4911   }
   4912 
   4913   Label not_found;
   4914   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
   4915   __ CallStub(&stub);
   4916   __ Cbz(x0, &not_found);
   4917   __ Mov(scratch2, x2);  // Move entry index into scratch2.
   4918   __ PopCPURegList(spill_list);
   4919   __ B(done);
   4920 
   4921   __ Bind(&not_found);
   4922   __ PopCPURegList(spill_list);
   4923   __ B(miss);
   4924 }
   4925 
   4926 
   4927 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
   4928                                                       Label* miss,
   4929                                                       Label* done,
   4930                                                       Register receiver,
   4931                                                       Register properties,
   4932                                                       Handle<Name> name,
   4933                                                       Register scratch0) {
   4934   ASSERT(!AreAliased(receiver, properties, scratch0));
   4935   ASSERT(name->IsUniqueName());
   4936   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   4937   // not equal to the name and kProbes-th slot is not used (its name is the
   4938   // undefined value), it guarantees the hash table doesn't contain the
   4939   // property. It's true even if some slots represent deleted properties
   4940   // (their names are the hole value).
   4941   for (int i = 0; i < kInlinedProbes; i++) {
   4942     // scratch0 points to properties hash.
   4943     // Compute the masked index: (hash + i + i * i) & mask.
   4944     Register index = scratch0;
   4945     // Capacity is smi 2^n.
   4946     __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
   4947     __ Sub(index, index, 1);
   4948     __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
   4949 
   4950     // Scale the index by multiplying by the entry size.
   4951     ASSERT(NameDictionary::kEntrySize == 3);
   4952     __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
   4953 
   4954     Register entity_name = scratch0;
   4955     // Having undefined at this place means the name is not contained.
   4956     Register tmp = index;
   4957     __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
   4958     __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
   4959 
   4960     __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
   4961 
   4962     // Stop if found the property.
   4963     __ Cmp(entity_name, Operand(name));
   4964     __ B(eq, miss);
   4965 
   4966     Label good;
   4967     __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
   4968 
   4969     // Check if the entry name is not a unique name.
   4970     __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
   4971     __ Ldrb(entity_name,
   4972             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
   4973     __ JumpIfNotUniqueName(entity_name, miss);
   4974     __ Bind(&good);
   4975   }
   4976 
   4977   CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
   4978   spill_list.Combine(lr);
   4979   spill_list.Remove(scratch0);  // Scratch registers don't need to be preserved.
   4980 
   4981   __ PushCPURegList(spill_list);
   4982 
   4983   __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   4984   __ Mov(x1, Operand(name));
   4985   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
   4986   __ CallStub(&stub);
   4987   // Move stub return value to scratch0. Note that scratch0 is not included in
   4988   // spill_list and won't be clobbered by PopCPURegList.
   4989   __ Mov(scratch0, x0);
   4990   __ PopCPURegList(spill_list);
   4991 
   4992   __ Cbz(scratch0, done);
   4993   __ B(miss);
   4994 }
   4995 
   4996 
   4997 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   4998   // This stub overrides SometimesSetsUpAFrame() to return false. That means
   4999   // we cannot call anything that could cause a GC from this stub.
   5000   //
   5001   // Arguments are in x0 and x1:
   5002   //   x0: property dictionary.
   5003   //   x1: the name of the property we are looking for.
   5004   //
   5005   // Return value is in x0 and is zero if lookup failed, non zero otherwise.
   5006   // If the lookup is successful, x2 will contains the index of the entry.
   5007 
   5008   Register result = x0;
   5009   Register dictionary = x0;
   5010   Register key = x1;
   5011   Register index = x2;
   5012   Register mask = x3;
   5013   Register hash = x4;
   5014   Register undefined = x5;
   5015   Register entry_key = x6;
   5016 
   5017   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
   5018 
   5019   __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
   5020   __ Sub(mask, mask, 1);
   5021 
   5022   __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
   5023   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
   5024 
   5025   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
   5026     // Compute the masked index: (hash + i + i * i) & mask.
   5027     // Capacity is smi 2^n.
   5028     if (i > 0) {
   5029       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   5030       // the hash in a separate instruction. The value hash + i + i * i is right
   5031       // shifted in the following and instruction.
   5032       ASSERT(NameDictionary::GetProbeOffset(i) <
   5033              1 << (32 - Name::kHashFieldOffset));
   5034       __ Add(index, hash,
   5035              NameDictionary::GetProbeOffset(i) << Name::kHashShift);
   5036     } else {
   5037       __ Mov(index, hash);
   5038     }
   5039     __ And(index, mask, Operand(index, LSR, Name::kHashShift));
   5040 
   5041     // Scale the index by multiplying by the entry size.
   5042     ASSERT(NameDictionary::kEntrySize == 3);
   5043     __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
   5044 
   5045     __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
   5046     __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
   5047 
   5048     // Having undefined at this place means the name is not contained.
   5049     __ Cmp(entry_key, undefined);
   5050     __ B(eq, &not_in_dictionary);
   5051 
   5052     // Stop if found the property.
   5053     __ Cmp(entry_key, key);
   5054     __ B(eq, &in_dictionary);
   5055 
   5056     if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
   5057       // Check if the entry name is not a unique name.
   5058       __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
   5059       __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
   5060       __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
   5061     }
   5062   }
   5063 
   5064   __ Bind(&maybe_in_dictionary);
   5065   // If we are doing negative lookup then probing failure should be
   5066   // treated as a lookup success. For positive lookup, probing failure
   5067   // should be treated as lookup failure.
   5068   if (mode_ == POSITIVE_LOOKUP) {
   5069     __ Mov(result, 0);
   5070     __ Ret();
   5071   }
   5072 
   5073   __ Bind(&in_dictionary);
   5074   __ Mov(result, 1);
   5075   __ Ret();
   5076 
   5077   __ Bind(&not_in_dictionary);
   5078   __ Mov(result, 0);
   5079   __ Ret();
   5080 }
   5081 
   5082 
   5083 template<class T>
   5084 static void CreateArrayDispatch(MacroAssembler* masm,
   5085                                 AllocationSiteOverrideMode mode) {
   5086   ASM_LOCATION("CreateArrayDispatch");
   5087   if (mode == DISABLE_ALLOCATION_SITES) {
   5088     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
   5089      __ TailCallStub(&stub);
   5090 
   5091   } else if (mode == DONT_OVERRIDE) {
   5092     Register kind = x3;
   5093     int last_index =
   5094         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   5095     for (int i = 0; i <= last_index; ++i) {
   5096       Label next;
   5097       ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
   5098       // TODO(jbramley): Is this the best way to handle this? Can we make the
   5099       // tail calls conditional, rather than hopping over each one?
   5100       __ CompareAndBranch(kind, candidate_kind, ne, &next);
   5101       T stub(masm->isolate(), candidate_kind);
   5102       __ TailCallStub(&stub);
   5103       __ Bind(&next);
   5104     }
   5105 
   5106     // If we reached this point there is a problem.
   5107     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   5108 
   5109   } else {
   5110     UNREACHABLE();
   5111   }
   5112 }
   5113 
   5114 
   5115 // TODO(jbramley): If this needs to be a special case, make it a proper template
   5116 // specialization, and not a separate function.
   5117 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
   5118                                            AllocationSiteOverrideMode mode) {
   5119   ASM_LOCATION("CreateArrayDispatchOneArgument");
   5120   // x0 - argc
   5121   // x1 - constructor?
   5122   // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
   5123   // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
   5124   // sp[0] - last argument
   5125 
   5126   Register allocation_site = x2;
   5127   Register kind = x3;
   5128 
   5129   Label normal_sequence;
   5130   if (mode == DONT_OVERRIDE) {
   5131     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   5132     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   5133     STATIC_ASSERT(FAST_ELEMENTS == 2);
   5134     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   5135     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
   5136     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
   5137 
   5138     // Is the low bit set? If so, the array is holey.
   5139     __ Tbnz(kind, 0, &normal_sequence);
   5140   }
   5141 
   5142   // Look at the last argument.
   5143   // TODO(jbramley): What does a 0 argument represent?
   5144   __ Peek(x10, 0);
   5145   __ Cbz(x10, &normal_sequence);
   5146 
   5147   if (mode == DISABLE_ALLOCATION_SITES) {
   5148     ElementsKind initial = GetInitialFastElementsKind();
   5149     ElementsKind holey_initial = GetHoleyElementsKind(initial);
   5150 
   5151     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
   5152                                                   holey_initial,
   5153                                                   DISABLE_ALLOCATION_SITES);
   5154     __ TailCallStub(&stub_holey);
   5155 
   5156     __ Bind(&normal_sequence);
   5157     ArraySingleArgumentConstructorStub stub(masm->isolate(),
   5158                                             initial,
   5159                                             DISABLE_ALLOCATION_SITES);
   5160     __ TailCallStub(&stub);
   5161   } else if (mode == DONT_OVERRIDE) {
   5162     // We are going to create a holey array, but our kind is non-holey.
   5163     // Fix kind and retry (only if we have an allocation site in the slot).
   5164     __ Orr(kind, kind, 1);
   5165 
   5166     if (FLAG_debug_code) {
   5167       __ Ldr(x10, FieldMemOperand(allocation_site, 0));
   5168       __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
   5169                        &normal_sequence);
   5170       __ Assert(eq, kExpectedAllocationSite);
   5171     }
   5172 
   5173     // Save the resulting elements kind in type info. We can't just store 'kind'
   5174     // in the AllocationSite::transition_info field because elements kind is
   5175     // restricted to a portion of the field; upper bits need to be left alone.
   5176     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   5177     __ Ldr(x11, FieldMemOperand(allocation_site,
   5178                                 AllocationSite::kTransitionInfoOffset));
   5179     __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
   5180     __ Str(x11, FieldMemOperand(allocation_site,
   5181                                 AllocationSite::kTransitionInfoOffset));
   5182 
   5183     __ Bind(&normal_sequence);
   5184     int last_index =
   5185         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   5186     for (int i = 0; i <= last_index; ++i) {
   5187       Label next;
   5188       ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
   5189       __ CompareAndBranch(kind, candidate_kind, ne, &next);
   5190       ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
   5191       __ TailCallStub(&stub);
   5192       __ Bind(&next);
   5193     }
   5194 
   5195     // If we reached this point there is a problem.
   5196     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   5197   } else {
   5198     UNREACHABLE();
   5199   }
   5200 }
   5201 
   5202 
   5203 template<class T>
   5204 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
   5205   int to_index = GetSequenceIndexFromFastElementsKind(
   5206       TERMINAL_FAST_ELEMENTS_KIND);
   5207   for (int i = 0; i <= to_index; ++i) {
   5208     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   5209     T stub(isolate, kind);
   5210     stub.GetCode();
   5211     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
   5212       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
   5213       stub1.GetCode();
   5214     }
   5215   }
   5216 }
   5217 
   5218 
   5219 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
   5220   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
   5221       isolate);
   5222   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
   5223       isolate);
   5224   ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
   5225       isolate);
   5226 }
   5227 
   5228 
   5229 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
   5230     Isolate* isolate) {
   5231   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   5232   for (int i = 0; i < 2; i++) {
   5233     // For internal arrays we only need a few things
   5234     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
   5235     stubh1.GetCode();
   5236     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
   5237     stubh2.GetCode();
   5238     InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
   5239     stubh3.GetCode();
   5240   }
   5241 }
   5242 
   5243 
   5244 void ArrayConstructorStub::GenerateDispatchToArrayStub(
   5245     MacroAssembler* masm,
   5246     AllocationSiteOverrideMode mode) {
   5247   Register argc = x0;
   5248   if (argument_count_ == ANY) {
   5249     Label zero_case, n_case;
   5250     __ Cbz(argc, &zero_case);
   5251     __ Cmp(argc, 1);
   5252     __ B(ne, &n_case);
   5253 
   5254     // One argument.
   5255     CreateArrayDispatchOneArgument(masm, mode);
   5256 
   5257     __ Bind(&zero_case);
   5258     // No arguments.
   5259     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   5260 
   5261     __ Bind(&n_case);
   5262     // N arguments.
   5263     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   5264 
   5265   } else if (argument_count_ == NONE) {
   5266     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   5267   } else if (argument_count_ == ONE) {
   5268     CreateArrayDispatchOneArgument(masm, mode);
   5269   } else if (argument_count_ == MORE_THAN_ONE) {
   5270     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   5271   } else {
   5272     UNREACHABLE();
   5273   }
   5274 }
   5275 
   5276 
   5277 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   5278   ASM_LOCATION("ArrayConstructorStub::Generate");
   5279   // ----------- S t a t e -------------
   5280   //  -- x0 : argc (only if argument_count_ == ANY)
   5281   //  -- x1 : constructor
   5282   //  -- x2 : AllocationSite or undefined
   5283   //  -- sp[0] : return address
   5284   //  -- sp[4] : last argument
   5285   // -----------------------------------
   5286   Register constructor = x1;
   5287   Register allocation_site = x2;
   5288 
   5289   if (FLAG_debug_code) {
   5290     // The array construct code is only set for the global and natives
   5291     // builtin Array functions which always have maps.
   5292 
   5293     Label unexpected_map, map_ok;
   5294     // Initial map for the builtin Array function should be a map.
   5295     __ Ldr(x10, FieldMemOperand(constructor,
   5296                                 JSFunction::kPrototypeOrInitialMapOffset));
   5297     // Will both indicate a NULL and a Smi.
   5298     __ JumpIfSmi(x10, &unexpected_map);
   5299     __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
   5300     __ Bind(&unexpected_map);
   5301     __ Abort(kUnexpectedInitialMapForArrayFunction);
   5302     __ Bind(&map_ok);
   5303 
   5304     // We should either have undefined in the allocation_site register or a
   5305     // valid AllocationSite.
   5306     __ AssertUndefinedOrAllocationSite(allocation_site, x10);
   5307   }
   5308 
   5309   Register kind = x3;
   5310   Label no_info;
   5311   // Get the elements kind and case on that.
   5312   __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
   5313 
   5314   __ Ldrsw(kind,
   5315            UntagSmiFieldMemOperand(allocation_site,
   5316                                    AllocationSite::kTransitionInfoOffset));
   5317   __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
   5318   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
   5319 
   5320   __ Bind(&no_info);
   5321   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
   5322 }
   5323 
   5324 
   5325 void InternalArrayConstructorStub::GenerateCase(
   5326     MacroAssembler* masm, ElementsKind kind) {
   5327   Label zero_case, n_case;
   5328   Register argc = x0;
   5329 
   5330   __ Cbz(argc, &zero_case);
   5331   __ CompareAndBranch(argc, 1, ne, &n_case);
   5332 
   5333   // One argument.
   5334   if (IsFastPackedElementsKind(kind)) {
   5335     Label packed_case;
   5336 
   5337     // We might need to create a holey array; look at the first argument.
   5338     __ Peek(x10, 0);
   5339     __ Cbz(x10, &packed_case);
   5340 
   5341     InternalArraySingleArgumentConstructorStub
   5342         stub1_holey(isolate(), GetHoleyElementsKind(kind));
   5343     __ TailCallStub(&stub1_holey);
   5344 
   5345     __ Bind(&packed_case);
   5346   }
   5347   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
   5348   __ TailCallStub(&stub1);
   5349 
   5350   __ Bind(&zero_case);
   5351   // No arguments.
   5352   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   5353   __ TailCallStub(&stub0);
   5354 
   5355   __ Bind(&n_case);
   5356   // N arguments.
   5357   InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
   5358   __ TailCallStub(&stubN);
   5359 }
   5360 
   5361 
   5362 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
   5363   // ----------- S t a t e -------------
   5364   //  -- x0 : argc
   5365   //  -- x1 : constructor
   5366   //  -- sp[0] : return address
   5367   //  -- sp[4] : last argument
   5368   // -----------------------------------
   5369 
   5370   Register constructor = x1;
   5371 
   5372   if (FLAG_debug_code) {
   5373     // The array construct code is only set for the global and natives
   5374     // builtin Array functions which always have maps.
   5375 
   5376     Label unexpected_map, map_ok;
   5377     // Initial map for the builtin Array function should be a map.
   5378     __ Ldr(x10, FieldMemOperand(constructor,
   5379                                 JSFunction::kPrototypeOrInitialMapOffset));
   5380     // Will both indicate a NULL and a Smi.
   5381     __ JumpIfSmi(x10, &unexpected_map);
   5382     __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
   5383     __ Bind(&unexpected_map);
   5384     __ Abort(kUnexpectedInitialMapForArrayFunction);
   5385     __ Bind(&map_ok);
   5386   }
   5387 
   5388   Register kind = w3;
   5389   // Figure out the right elements kind
   5390   __ Ldr(x10, FieldMemOperand(constructor,
   5391                               JSFunction::kPrototypeOrInitialMapOffset));
   5392 
   5393   // Retrieve elements_kind from map.
   5394   __ LoadElementsKindFromMap(kind, x10);
   5395 
   5396   if (FLAG_debug_code) {
   5397     Label done;
   5398     __ Cmp(x3, FAST_ELEMENTS);
   5399     __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
   5400     __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
   5401   }
   5402 
   5403   Label fast_elements_case;
   5404   __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
   5405   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
   5406 
   5407   __ Bind(&fast_elements_case);
   5408   GenerateCase(masm, FAST_ELEMENTS);
   5409 }
   5410 
   5411 
   5412 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   5413   // ----------- S t a t e -------------
   5414   //  -- x0                  : callee
   5415   //  -- x4                  : call_data
   5416   //  -- x2                  : holder
   5417   //  -- x1                  : api_function_address
   5418   //  -- cp                  : context
   5419   //  --
   5420   //  -- sp[0]               : last argument
   5421   //  -- ...
   5422   //  -- sp[(argc - 1) * 8]  : first argument
   5423   //  -- sp[argc * 8]        : receiver
   5424   // -----------------------------------
   5425 
   5426   Register callee = x0;
   5427   Register call_data = x4;
   5428   Register holder = x2;
   5429   Register api_function_address = x1;
   5430   Register context = cp;
   5431 
   5432   int argc = ArgumentBits::decode(bit_field_);
   5433   bool is_store = IsStoreBits::decode(bit_field_);
   5434   bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
   5435 
   5436   typedef FunctionCallbackArguments FCA;
   5437 
   5438   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
   5439   STATIC_ASSERT(FCA::kCalleeIndex == 5);
   5440   STATIC_ASSERT(FCA::kDataIndex == 4);
   5441   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
   5442   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   5443   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   5444   STATIC_ASSERT(FCA::kHolderIndex == 0);
   5445   STATIC_ASSERT(FCA::kArgsLength == 7);
   5446 
   5447   // FunctionCallbackArguments: context, callee and call data.
   5448   __ Push(context, callee, call_data);
   5449 
   5450   // Load context from callee
   5451   __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   5452 
   5453   if (!call_data_undefined) {
   5454     __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
   5455   }
   5456   Register isolate_reg = x5;
   5457   __ Mov(isolate_reg, ExternalReference::isolate_address(isolate()));
   5458 
   5459   // FunctionCallbackArguments:
   5460   //    return value, return value default, isolate, holder.
   5461   __ Push(call_data, call_data, isolate_reg, holder);
   5462 
   5463   // Prepare arguments.
   5464   Register args = x6;
   5465   __ Mov(args, masm->StackPointer());
   5466 
   5467   // Allocate the v8::Arguments structure in the arguments' space, since it's
   5468   // not controlled by GC.
   5469   const int kApiStackSpace = 4;
   5470 
   5471   // Allocate space for CallApiFunctionAndReturn can store some scratch
   5472   // registeres on the stack.
   5473   const int kCallApiFunctionSpillSpace = 4;
   5474 
   5475   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5476   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
   5477 
   5478   ASSERT(!AreAliased(x0, api_function_address));
   5479   // x0 = FunctionCallbackInfo&
   5480   // Arguments is after the return address.
   5481   __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
   5482   // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
   5483   __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
   5484   __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
   5485   // FunctionCallbackInfo::length_ = argc and
   5486   // FunctionCallbackInfo::is_construct_call = 0
   5487   __ Mov(x10, argc);
   5488   __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
   5489 
   5490   const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
   5491   ExternalReference thunk_ref =
   5492       ExternalReference::invoke_function_callback(isolate());
   5493 
   5494   AllowExternalCallThatCantCauseGC scope(masm);
   5495   MemOperand context_restore_operand(
   5496       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   5497   // Stores return the first js argument
   5498   int return_value_offset = 0;
   5499   if (is_store) {
   5500     return_value_offset = 2 + FCA::kArgsLength;
   5501   } else {
   5502     return_value_offset = 2 + FCA::kReturnValueOffset;
   5503   }
   5504   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   5505 
   5506   const int spill_offset = 1 + kApiStackSpace;
   5507   __ CallApiFunctionAndReturn(api_function_address,
   5508                               thunk_ref,
   5509                               kStackUnwindSpace,
   5510                               spill_offset,
   5511                               return_value_operand,
   5512                               &context_restore_operand);
   5513 }
   5514 
   5515 
   5516 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   5517   // ----------- S t a t e -------------
   5518   //  -- sp[0]                  : name
   5519   //  -- sp[8 - kArgsLength*8]  : PropertyCallbackArguments object
   5520   //  -- ...
   5521   //  -- x2                     : api_function_address
   5522   // -----------------------------------
   5523 
   5524   Register api_function_address = x2;
   5525 
   5526   __ Mov(x0, masm->StackPointer());  // x0 = Handle<Name>
   5527   __ Add(x1, x0, 1 * kPointerSize);  // x1 = PCA
   5528 
   5529   const int kApiStackSpace = 1;
   5530 
   5531   // Allocate space for CallApiFunctionAndReturn can store some scratch
   5532   // registeres on the stack.
   5533   const int kCallApiFunctionSpillSpace = 4;
   5534 
   5535   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5536   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
   5537 
   5538   // Create PropertyAccessorInfo instance on the stack above the exit frame with
   5539   // x1 (internal::Object** args_) as the data.
   5540   __ Poke(x1, 1 * kPointerSize);
   5541   __ Add(x1, masm->StackPointer(), 1 * kPointerSize);  // x1 = AccessorInfo&
   5542 
   5543   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
   5544 
   5545   ExternalReference thunk_ref =
   5546       ExternalReference::invoke_accessor_getter_callback(isolate());
   5547 
   5548   const int spill_offset = 1 + kApiStackSpace;
   5549   __ CallApiFunctionAndReturn(api_function_address,
   5550                               thunk_ref,
   5551                               kStackUnwindSpace,
   5552                               spill_offset,
   5553                               MemOperand(fp, 6 * kPointerSize),
   5554                               NULL);
   5555 }
   5556 
   5557 
   5558 #undef __
   5559 
   5560 } }  // namespace v8::internal
   5561 
   5562 #endif  // V8_TARGET_ARCH_ARM64
   5563