Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #if V8_TARGET_ARCH_ARM64
      8 
      9 #include "src/arm64/assembler-arm64.h"
     10 #include "src/code-stubs.h"
     11 #include "src/codegen.h"
     12 #include "src/disasm.h"
     13 #include "src/ic-inl.h"
     14 #include "src/runtime.h"
     15 #include "src/stub-cache.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 
     21 #define __ ACCESS_MASM(masm)
     22 
     23 
     24 // "type" holds an instance type on entry and is not clobbered.
     25 // Generated code branch on "global_object" if type is any kind of global
     26 // JS object.
     27 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
     28                                             Register type,
     29                                             Label* global_object) {
     30   __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
     31   __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
     32   __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
     33   __ B(eq, global_object);
     34 }
     35 
     36 
     37 // Generated code falls through if the receiver is a regular non-global
     38 // JS object with slow properties and no interceptors.
     39 //
     40 // "receiver" holds the receiver on entry and is unchanged.
     41 // "elements" holds the property dictionary on fall through.
     42 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
     43                                                 Register receiver,
     44                                                 Register elements,
     45                                                 Register scratch0,
     46                                                 Register scratch1,
     47                                                 Label* miss) {
     48   ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
     49 
     50   // Check that the receiver isn't a smi.
     51   __ JumpIfSmi(receiver, miss);
     52 
     53   // Check that the receiver is a valid JS object.
     54   // Let t be the object instance type, we want:
     55   //   FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
     56   // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
     57   // check the lower bound.
     58   STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
     59 
     60   __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
     61                       miss, lt);
     62 
     63   // scratch0 now contains the map of the receiver and scratch1 the object type.
     64   Register map = scratch0;
     65   Register type = scratch1;
     66 
     67   // Check if the receiver is a global JS object.
     68   GenerateGlobalInstanceTypeCheck(masm, type, miss);
     69 
     70   // Check that the object does not require access checks.
     71   __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
     72   __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
     73   __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
     74 
     75   // Check that the properties dictionary is valid.
     76   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
     77   __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
     78   __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
     79 }
     80 
     81 
     82 // Helper function used from LoadIC GenerateNormal.
     83 //
     84 // elements: Property dictionary. It is not clobbered if a jump to the miss
     85 //           label is done.
     86 // name:     Property name. It is not clobbered if a jump to the miss label is
     87 //           done
     88 // result:   Register for the result. It is only updated if a jump to the miss
     89 //           label is not done.
     90 // The scratch registers need to be different from elements, name and result.
     91 // The generated code assumes that the receiver has slow properties,
     92 // is not a global object and does not have interceptors.
     93 static void GenerateDictionaryLoad(MacroAssembler* masm,
     94                                    Label* miss,
     95                                    Register elements,
     96                                    Register name,
     97                                    Register result,
     98                                    Register scratch1,
     99                                    Register scratch2) {
    100   ASSERT(!AreAliased(elements, name, scratch1, scratch2));
    101   ASSERT(!AreAliased(result, scratch1, scratch2));
    102 
    103   Label done;
    104 
    105   // Probe the dictionary.
    106   NameDictionaryLookupStub::GeneratePositiveLookup(masm,
    107                                                    miss,
    108                                                    &done,
    109                                                    elements,
    110                                                    name,
    111                                                    scratch1,
    112                                                    scratch2);
    113 
    114   // If probing finds an entry check that the value is a normal property.
    115   __ Bind(&done);
    116 
    117   static const int kElementsStartOffset = NameDictionary::kHeaderSize +
    118       NameDictionary::kElementsStartIndex * kPointerSize;
    119   static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
    120   __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
    121   __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
    122   __ B(ne, miss);
    123 
    124   // Get the value at the masked, scaled index and return.
    125   __ Ldr(result,
    126          FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
    127 }
    128 
    129 
    130 // Helper function used from StoreIC::GenerateNormal.
    131 //
    132 // elements: Property dictionary. It is not clobbered if a jump to the miss
    133 //           label is done.
    134 // name:     Property name. It is not clobbered if a jump to the miss label is
    135 //           done
    136 // value:    The value to store (never clobbered).
    137 //
    138 // The generated code assumes that the receiver has slow properties,
    139 // is not a global object and does not have interceptors.
    140 static void GenerateDictionaryStore(MacroAssembler* masm,
    141                                     Label* miss,
    142                                     Register elements,
    143                                     Register name,
    144                                     Register value,
    145                                     Register scratch1,
    146                                     Register scratch2) {
    147   ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
    148 
    149   Label done;
    150 
    151   // Probe the dictionary.
    152   NameDictionaryLookupStub::GeneratePositiveLookup(masm,
    153                                                    miss,
    154                                                    &done,
    155                                                    elements,
    156                                                    name,
    157                                                    scratch1,
    158                                                    scratch2);
    159 
    160   // If probing finds an entry in the dictionary check that the value
    161   // is a normal property that is not read only.
    162   __ Bind(&done);
    163 
    164   static const int kElementsStartOffset = NameDictionary::kHeaderSize +
    165       NameDictionary::kElementsStartIndex * kPointerSize;
    166   static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
    167   static const int kTypeAndReadOnlyMask =
    168       PropertyDetails::TypeField::kMask |
    169       PropertyDetails::AttributesField::encode(READ_ONLY);
    170   __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
    171   __ Tst(scratch1, kTypeAndReadOnlyMask);
    172   __ B(ne, miss);
    173 
    174   // Store the value at the masked, scaled index and return.
    175   static const int kValueOffset = kElementsStartOffset + kPointerSize;
    176   __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
    177   __ Str(value, MemOperand(scratch2));
    178 
    179   // Update the write barrier. Make sure not to clobber the value.
    180   __ Mov(scratch1, value);
    181   __ RecordWrite(
    182       elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
    183 }
    184 
    185 
    186 // Checks the receiver for special cases (value type, slow case bits).
    187 // Falls through for regular JS object and return the map of the
    188 // receiver in 'map_scratch' if the receiver is not a SMI.
    189 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
    190                                            Register receiver,
    191                                            Register map_scratch,
    192                                            Register scratch,
    193                                            int interceptor_bit,
    194                                            Label* slow) {
    195   ASSERT(!AreAliased(map_scratch, scratch));
    196 
    197   // Check that the object isn't a smi.
    198   __ JumpIfSmi(receiver, slow);
    199   // Get the map of the receiver.
    200   __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
    201   // Check bit field.
    202   __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
    203   __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
    204   __ Tbnz(scratch, interceptor_bit, slow);
    205 
    206   // Check that the object is some kind of JS object EXCEPT JS Value type.
    207   // In the case that the object is a value-wrapper object, we enter the
    208   // runtime system to make sure that indexing into string objects work
    209   // as intended.
    210   STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
    211   __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
    212   __ Cmp(scratch, JS_OBJECT_TYPE);
    213   __ B(lt, slow);
    214 }
    215 
    216 
    217 // Loads an indexed element from a fast case array.
    218 // If not_fast_array is NULL, doesn't perform the elements map check.
    219 //
    220 // receiver     - holds the receiver on entry.
    221 //                Unchanged unless 'result' is the same register.
    222 //
    223 // key          - holds the smi key on entry.
    224 //                Unchanged unless 'result' is the same register.
    225 //
    226 // elements     - holds the elements of the receiver on exit.
    227 //
    228 // elements_map - holds the elements map on exit if the not_fast_array branch is
    229 //                taken. Otherwise, this is used as a scratch register.
    230 //
    231 // result       - holds the result on exit if the load succeeded.
    232 //                Allowed to be the the same as 'receiver' or 'key'.
    233 //                Unchanged on bailout so 'receiver' and 'key' can be safely
    234 //                used by further computation.
    235 static void GenerateFastArrayLoad(MacroAssembler* masm,
    236                                   Register receiver,
    237                                   Register key,
    238                                   Register elements,
    239                                   Register elements_map,
    240                                   Register scratch2,
    241                                   Register result,
    242                                   Label* not_fast_array,
    243                                   Label* slow) {
    244   ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
    245 
    246   // Check for fast array.
    247   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    248   if (not_fast_array != NULL) {
    249     // Check that the object is in fast mode and writable.
    250     __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
    251     __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
    252                      not_fast_array);
    253   } else {
    254     __ AssertFastElements(elements);
    255   }
    256 
    257   // The elements_map register is only used for the not_fast_array path, which
    258   // was handled above. From this point onward it is a scratch register.
    259   Register scratch1 = elements_map;
    260 
    261   // Check that the key (index) is within bounds.
    262   __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
    263   __ Cmp(key, scratch1);
    264   __ B(hs, slow);
    265 
    266   // Fast case: Do the load.
    267   __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
    268   __ SmiUntag(scratch2, key);
    269   __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
    270 
    271   // In case the loaded value is the_hole we have to consult GetProperty
    272   // to ensure the prototype chain is searched.
    273   __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
    274 
    275   // Move the value to the result register.
    276   // 'result' can alias with 'receiver' or 'key' but these two must be
    277   // preserved if we jump to 'slow'.
    278   __ Mov(result, scratch2);
    279 }
    280 
    281 
    282 // Checks whether a key is an array index string or a unique name.
    283 // Falls through if a key is a unique name.
    284 // The map of the key is returned in 'map_scratch'.
    285 // If the jump to 'index_string' is done the hash of the key is left
    286 // in 'hash_scratch'.
    287 static void GenerateKeyNameCheck(MacroAssembler* masm,
    288                                  Register key,
    289                                  Register map_scratch,
    290                                  Register hash_scratch,
    291                                  Label* index_string,
    292                                  Label* not_unique) {
    293   ASSERT(!AreAliased(key, map_scratch, hash_scratch));
    294 
    295   // Is the key a name?
    296   Label unique;
    297   __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
    298                       not_unique, hi);
    299   STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
    300   __ B(eq, &unique);
    301 
    302   // Is the string an array index with cached numeric value?
    303   __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
    304   __ TestAndBranchIfAllClear(hash_scratch,
    305                              Name::kContainsCachedArrayIndexMask,
    306                              index_string);
    307 
    308   // Is the string internalized? We know it's a string, so a single bit test is
    309   // enough.
    310   __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
    311   STATIC_ASSERT(kInternalizedTag == 0);
    312   __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
    313 
    314   __ Bind(&unique);
    315   // Fall through if the key is a unique name.
    316 }
    317 
    318 
    319 // Neither 'object' nor 'key' are modified by this function.
    320 //
    321 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
    322 // left with the object's elements map. Otherwise, it is used as a scratch
    323 // register.
    324 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
    325                                                 Register object,
    326                                                 Register key,
    327                                                 Register map,
    328                                                 Register scratch1,
    329                                                 Register scratch2,
    330                                                 Label* unmapped_case,
    331                                                 Label* slow_case) {
    332   ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
    333 
    334   Heap* heap = masm->isolate()->heap();
    335 
    336   // Check that the receiver is a JSObject. Because of the elements
    337   // map check later, we do not need to check for interceptors or
    338   // whether it requires access checks.
    339   __ JumpIfSmi(object, slow_case);
    340   // Check that the object is some kind of JSObject.
    341   __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
    342                       slow_case, lt);
    343 
    344   // Check that the key is a positive smi.
    345   __ JumpIfNotSmi(key, slow_case);
    346   __ Tbnz(key, kXSignBit, slow_case);
    347 
    348   // Load the elements object and check its map.
    349   Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
    350   __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
    351   __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
    352 
    353   // Check if element is in the range of mapped arguments. If not, jump
    354   // to the unmapped lookup.
    355   __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
    356   __ Sub(scratch1, scratch1, Smi::FromInt(2));
    357   __ Cmp(key, scratch1);
    358   __ B(hs, unmapped_case);
    359 
    360   // Load element index and check whether it is the hole.
    361   static const int offset =
    362       FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
    363 
    364   __ Add(scratch1, map, offset);
    365   __ SmiUntag(scratch2, key);
    366   __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
    367   __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
    368 
    369   // Load value from context and return it.
    370   __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
    371   __ SmiUntag(scratch1);
    372   __ Lsl(scratch1, scratch1, kPointerSizeLog2);
    373   __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
    374   // The base of the result (scratch2) is passed to RecordWrite in
    375   // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
    376   return MemOperand(scratch2, scratch1);
    377 }
    378 
    379 
    380 // The 'parameter_map' register must be loaded with the parameter map of the
    381 // arguments object and is overwritten.
    382 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
    383                                                   Register key,
    384                                                   Register parameter_map,
    385                                                   Register scratch,
    386                                                   Label* slow_case) {
    387   ASSERT(!AreAliased(key, parameter_map, scratch));
    388 
    389   // Element is in arguments backing store, which is referenced by the
    390   // second element of the parameter_map.
    391   const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
    392   Register backing_store = parameter_map;
    393   __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
    394   Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
    395   __ CheckMap(
    396       backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
    397   __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
    398   __ Cmp(key, scratch);
    399   __ B(hs, slow_case);
    400 
    401   __ Add(backing_store,
    402          backing_store,
    403          FixedArray::kHeaderSize - kHeapObjectTag);
    404   __ SmiUntag(scratch, key);
    405   return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
    406 }
    407 
    408 
    409 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
    410   // ----------- S t a t e -------------
    411   //  -- x2    : name
    412   //  -- lr    : return address
    413   //  -- x0    : receiver
    414   // -----------------------------------
    415 
    416   // Probe the stub cache.
    417   Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
    418   masm->isolate()->stub_cache()->GenerateProbe(
    419       masm, flags, x0, x2, x3, x4, x5, x6);
    420 
    421   // Cache miss: Jump to runtime.
    422   GenerateMiss(masm);
    423 }
    424 
    425 
    426 void LoadIC::GenerateNormal(MacroAssembler* masm) {
    427   // ----------- S t a t e -------------
    428   //  -- x2    : name
    429   //  -- lr    : return address
    430   //  -- x0    : receiver
    431   // -----------------------------------
    432   Label miss, slow;
    433 
    434   GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
    435 
    436   // x1 now holds the property dictionary.
    437   GenerateDictionaryLoad(masm, &slow, x1, x2, x0, x3, x4);
    438   __ Ret();
    439 
    440   // Dictionary load failed, go slow (but don't miss).
    441   __ Bind(&slow);
    442   GenerateRuntimeGetProperty(masm);
    443 
    444   // Cache miss: Jump to runtime.
    445   __ Bind(&miss);
    446   GenerateMiss(masm);
    447 }
    448 
    449 
    450 void LoadIC::GenerateMiss(MacroAssembler* masm) {
    451   // ----------- S t a t e -------------
    452   //  -- x2    : name
    453   //  -- lr    : return address
    454   //  -- x0    : receiver
    455   // -----------------------------------
    456   Isolate* isolate = masm->isolate();
    457   ASM_LOCATION("LoadIC::GenerateMiss");
    458 
    459   __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
    460 
    461   // Perform tail call to the entry.
    462   __ Push(x0, x2);
    463   ExternalReference ref =
    464       ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
    465   __ TailCallExternalReference(ref, 2, 1);
    466 }
    467 
    468 
    469 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
    470   // ---------- S t a t e --------------
    471   //  -- x2    : name
    472   //  -- lr    : return address
    473   //  -- x0    : receiver
    474   // -----------------------------------
    475 
    476   __ Push(x0, x2);
    477   __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
    478 }
    479 
    480 
    481 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
    482   // ---------- S t a t e --------------
    483   //  -- lr     : return address
    484   //  -- x0     : key
    485   //  -- x1     : receiver
    486   // -----------------------------------
    487   Register result = x0;
    488   Register key = x0;
    489   Register receiver = x1;
    490   Label miss, unmapped;
    491 
    492   Register map_scratch = x2;
    493   MemOperand mapped_location = GenerateMappedArgumentsLookup(
    494       masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
    495   __ Ldr(result, mapped_location);
    496   __ Ret();
    497 
    498   __ Bind(&unmapped);
    499   // Parameter map is left in map_scratch when a jump on unmapped is done.
    500   MemOperand unmapped_location =
    501       GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
    502   __ Ldr(x2, unmapped_location);
    503   __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
    504   // Move the result in x0. x0 must be preserved on miss.
    505   __ Mov(result, x2);
    506   __ Ret();
    507 
    508   __ Bind(&miss);
    509   GenerateMiss(masm);
    510 }
    511 
    512 
    513 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
    514   ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
    515   // ---------- S t a t e --------------
    516   //  -- lr     : return address
    517   //  -- x0     : value
    518   //  -- x1     : key
    519   //  -- x2     : receiver
    520   // -----------------------------------
    521 
    522   Label slow, notin;
    523 
    524   Register value = x0;
    525   Register key = x1;
    526   Register receiver = x2;
    527   Register map = x3;
    528 
    529   // These registers are used by GenerateMappedArgumentsLookup to build a
    530   // MemOperand. They are live for as long as the MemOperand is live.
    531   Register mapped1 = x4;
    532   Register mapped2 = x5;
    533 
    534   MemOperand mapped =
    535       GenerateMappedArgumentsLookup(masm, receiver, key, map,
    536                                     mapped1, mapped2,
    537                                     &notin, &slow);
    538   Operand mapped_offset = mapped.OffsetAsOperand();
    539   __ Str(value, mapped);
    540   __ Add(x10, mapped.base(), mapped_offset);
    541   __ Mov(x11, value);
    542   __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
    543   __ Ret();
    544 
    545   __ Bind(&notin);
    546 
    547   // These registers are used by GenerateMappedArgumentsLookup to build a
    548   // MemOperand. They are live for as long as the MemOperand is live.
    549   Register unmapped1 = map;   // This is assumed to alias 'map'.
    550   Register unmapped2 = x4;
    551   MemOperand unmapped =
    552       GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
    553   Operand unmapped_offset = unmapped.OffsetAsOperand();
    554   __ Str(value, unmapped);
    555   __ Add(x10, unmapped.base(), unmapped_offset);
    556   __ Mov(x11, value);
    557   __ RecordWrite(unmapped.base(), x10, x11,
    558                  kLRHasNotBeenSaved, kDontSaveFPRegs);
    559   __ Ret();
    560   __ Bind(&slow);
    561   GenerateMiss(masm);
    562 }
    563 
    564 
    565 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
    566   // ---------- S t a t e --------------
    567   //  -- lr     : return address
    568   //  -- x0     : key
    569   //  -- x1     : receiver
    570   // -----------------------------------
    571   Isolate* isolate = masm->isolate();
    572 
    573   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
    574 
    575   __ Push(x1, x0);
    576 
    577   // Perform tail call to the entry.
    578   ExternalReference ref =
    579       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
    580 
    581   __ TailCallExternalReference(ref, 2, 1);
    582 }
    583 
    584 
    585 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
    586   // ---------- S t a t e --------------
    587   //  -- lr     : return address
    588   //  -- x0     : key
    589   //  -- x1     : receiver
    590   // -----------------------------------
    591   Register key = x0;
    592   Register receiver = x1;
    593 
    594   __ Push(receiver, key);
    595   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
    596 }
    597 
    598 
    599 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
    600                                         Register key,
    601                                         Register receiver,
    602                                         Register scratch1,
    603                                         Register scratch2,
    604                                         Register scratch3,
    605                                         Register scratch4,
    606                                         Register scratch5,
    607                                         Label *slow) {
    608   ASSERT(!AreAliased(
    609       key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
    610 
    611   Isolate* isolate = masm->isolate();
    612   Label check_number_dictionary;
    613   // If we can load the value, it should be returned in x0.
    614   Register result = x0;
    615 
    616   GenerateKeyedLoadReceiverCheck(
    617       masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
    618 
    619   // Check the receiver's map to see if it has fast elements.
    620   __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
    621 
    622   GenerateFastArrayLoad(
    623       masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
    624   __ IncrementCounter(
    625       isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
    626   __ Ret();
    627 
    628   __ Bind(&check_number_dictionary);
    629   __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
    630   __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
    631 
    632   // Check whether we have a number dictionary.
    633   __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
    634 
    635   __ LoadFromNumberDictionary(
    636       slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
    637   __ Ret();
    638 }
    639 
    640 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
    641                                          Register key,
    642                                          Register receiver,
    643                                          Register scratch1,
    644                                          Register scratch2,
    645                                          Register scratch3,
    646                                          Register scratch4,
    647                                          Register scratch5,
    648                                          Label *slow) {
    649   ASSERT(!AreAliased(
    650       key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
    651 
    652   Isolate* isolate = masm->isolate();
    653   Label probe_dictionary, property_array_property;
    654   // If we can load the value, it should be returned in x0.
    655   Register result = x0;
    656 
    657   GenerateKeyedLoadReceiverCheck(
    658       masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
    659 
    660   // If the receiver is a fast-case object, check the keyed lookup cache.
    661   // Otherwise probe the dictionary.
    662   __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
    663   __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
    664   __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
    665 
    666   // We keep the map of the receiver in scratch1.
    667   Register receiver_map = scratch1;
    668 
    669   // Load the map of the receiver, compute the keyed lookup cache hash
    670   // based on 32 bits of the map pointer and the name hash.
    671   __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    672   __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
    673   __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
    674   __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
    675   int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
    676   __ And(scratch2, scratch2, mask);
    677 
    678   // Load the key (consisting of map and unique name) from the cache and
    679   // check for match.
    680   Label load_in_object_property;
    681   static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
    682   Label hit_on_nth_entry[kEntriesPerBucket];
    683   ExternalReference cache_keys =
    684       ExternalReference::keyed_lookup_cache_keys(isolate);
    685 
    686   __ Mov(scratch3, cache_keys);
    687   __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
    688 
    689   for (int i = 0; i < kEntriesPerBucket - 1; i++) {
    690     Label try_next_entry;
    691     // Load map and make scratch3 pointing to the next entry.
    692     __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
    693     __ Cmp(receiver_map, scratch4);
    694     __ B(ne, &try_next_entry);
    695     __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize));  // Load name
    696     __ Cmp(key, scratch4);
    697     __ B(eq, &hit_on_nth_entry[i]);
    698     __ Bind(&try_next_entry);
    699   }
    700 
    701   // Last entry.
    702   __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
    703   __ Cmp(receiver_map, scratch4);
    704   __ B(ne, slow);
    705   __ Ldr(scratch4, MemOperand(scratch3));
    706   __ Cmp(key, scratch4);
    707   __ B(ne, slow);
    708 
    709   // Get field offset.
    710   ExternalReference cache_field_offsets =
    711       ExternalReference::keyed_lookup_cache_field_offsets(isolate);
    712 
    713   // Hit on nth entry.
    714   for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
    715     __ Bind(&hit_on_nth_entry[i]);
    716     __ Mov(scratch3, cache_field_offsets);
    717     if (i != 0) {
    718       __ Add(scratch2, scratch2, i);
    719     }
    720     __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
    721     __ Ldrb(scratch5,
    722             FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
    723     __ Subs(scratch4, scratch4, scratch5);
    724     __ B(ge, &property_array_property);
    725     if (i != 0) {
    726       __ B(&load_in_object_property);
    727     }
    728   }
    729 
    730   // Load in-object property.
    731   __ Bind(&load_in_object_property);
    732   __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
    733   __ Add(scratch5, scratch5, scratch4);  // Index from start of object.
    734   __ Sub(receiver, receiver, kHeapObjectTag);  // Remove the heap tag.
    735   __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
    736   __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
    737                       1, scratch1, scratch2);
    738   __ Ret();
    739 
    740   // Load property array property.
    741   __ Bind(&property_array_property);
    742   __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
    743   __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
    744   __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
    745   __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
    746                       1, scratch1, scratch2);
    747   __ Ret();
    748 
    749   // Do a quick inline probe of the receiver's dictionary, if it exists.
    750   __ Bind(&probe_dictionary);
    751   __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
    752   __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
    753   GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
    754   // Load the property.
    755   GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
    756   __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
    757                       1, scratch1, scratch2);
    758   __ Ret();
    759 }
    760 
    761 
    762 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
    763   // ---------- S t a t e --------------
    764   //  -- lr     : return address
    765   //  -- x0     : key
    766   //  -- x1     : receiver
    767   // -----------------------------------
    768   Label slow, check_name, index_smi, index_name;
    769 
    770   Register key = x0;
    771   Register receiver = x1;
    772 
    773   __ JumpIfNotSmi(key, &check_name);
    774   __ Bind(&index_smi);
    775   // Now the key is known to be a smi. This place is also jumped to from below
    776   // where a numeric string is converted to a smi.
    777   GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
    778 
    779   // Slow case, key and receiver still in x0 and x1.
    780   __ Bind(&slow);
    781   __ IncrementCounter(
    782       masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
    783   GenerateRuntimeGetProperty(masm);
    784 
    785   __ Bind(&check_name);
    786   GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
    787 
    788   GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
    789 
    790   __ Bind(&index_name);
    791   __ IndexFromHash(x3, key);
    792   // Now jump to the place where smi keys are handled.
    793   __ B(&index_smi);
    794 }
    795 
    796 
    797 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
    798   // ---------- S t a t e --------------
    799   //  -- lr     : return address
    800   //  -- x0     : key (index)
    801   //  -- x1     : receiver
    802   // -----------------------------------
    803   Label miss;
    804 
    805   Register index = x0;
    806   Register receiver = x1;
    807   Register result = x0;
    808   Register scratch = x3;
    809 
    810   StringCharAtGenerator char_at_generator(receiver,
    811                                           index,
    812                                           scratch,
    813                                           result,
    814                                           &miss,  // When not a string.
    815                                           &miss,  // When not a number.
    816                                           &miss,  // When index out of range.
    817                                           STRING_INDEX_IS_ARRAY_INDEX);
    818   char_at_generator.GenerateFast(masm);
    819   __ Ret();
    820 
    821   StubRuntimeCallHelper call_helper;
    822   char_at_generator.GenerateSlow(masm, call_helper);
    823 
    824   __ Bind(&miss);
    825   GenerateMiss(masm);
    826 }
    827 
    828 
    829 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
    830   // ---------- S t a t e --------------
    831   //  -- lr     : return address
    832   //  -- x0     : key
    833   //  -- x1     : receiver
    834   // -----------------------------------
    835   Label slow;
    836   Register key = x0;
    837   Register receiver = x1;
    838 
    839   // Check that the receiver isn't a smi.
    840   __ JumpIfSmi(receiver, &slow);
    841 
    842   // Check that the key is an array index, that is Uint32.
    843   __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
    844 
    845   // Get the map of the receiver.
    846   Register map = x2;
    847   __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    848 
    849   // Check that it has indexed interceptor and access checks
    850   // are not enabled for this object.
    851   __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
    852   ASSERT(kSlowCaseBitFieldMask ==
    853       ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
    854   __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
    855   __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
    856 
    857   // Everything is fine, call runtime.
    858   __ Push(receiver, key);
    859   __ TailCallExternalReference(
    860       ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
    861                         masm->isolate()),
    862       2,
    863       1);
    864 
    865   __ Bind(&slow);
    866   GenerateMiss(masm);
    867 }
    868 
    869 
    870 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
    871   ASM_LOCATION("KeyedStoreIC::GenerateMiss");
    872   // ---------- S t a t e --------------
    873   //  -- x0     : value
    874   //  -- x1     : key
    875   //  -- x2     : receiver
    876   //  -- lr     : return address
    877   // -----------------------------------
    878 
    879   // Push receiver, key and value for runtime call.
    880   __ Push(x2, x1, x0);
    881 
    882   ExternalReference ref =
    883       ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
    884   __ TailCallExternalReference(ref, 3, 1);
    885 }
    886 
    887 
    888 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
    889   ASM_LOCATION("KeyedStoreIC::GenerateSlow");
    890   // ---------- S t a t e --------------
    891   //  -- lr     : return address
    892   //  -- x0     : value
    893   //  -- x1     : key
    894   //  -- x2     : receiver
    895   // -----------------------------------
    896 
    897   // Push receiver, key and value for runtime call.
    898   __ Push(x2, x1, x0);
    899 
    900   // The slow case calls into the runtime to complete the store without causing
    901   // an IC miss that would otherwise cause a transition to the generic stub.
    902   ExternalReference ref =
    903       ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
    904   __ TailCallExternalReference(ref, 3, 1);
    905 }
    906 
    907 
    908 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
    909                                               StrictMode strict_mode) {
    910   ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
    911   // ---------- S t a t e --------------
    912   //  -- x0     : value
    913   //  -- x1     : key
    914   //  -- x2     : receiver
    915   //  -- lr     : return address
    916   // -----------------------------------
    917 
    918   // Push receiver, key and value for runtime call.
    919   __ Push(x2, x1, x0);
    920 
    921   // Push PropertyAttributes(NONE) and strict_mode for runtime call.
    922   STATIC_ASSERT(NONE == 0);
    923   __ Mov(x10, Smi::FromInt(strict_mode));
    924   __ Push(xzr, x10);
    925 
    926   __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
    927 }
    928 
    929 
    930 static void KeyedStoreGenerateGenericHelper(
    931     MacroAssembler* masm,
    932     Label* fast_object,
    933     Label* fast_double,
    934     Label* slow,
    935     KeyedStoreCheckMap check_map,
    936     KeyedStoreIncrementLength increment_length,
    937     Register value,
    938     Register key,
    939     Register receiver,
    940     Register receiver_map,
    941     Register elements_map,
    942     Register elements) {
    943   ASSERT(!AreAliased(
    944       value, key, receiver, receiver_map, elements_map, elements, x10, x11));
    945 
    946   Label transition_smi_elements;
    947   Label transition_double_elements;
    948   Label fast_double_without_map_check;
    949   Label non_double_value;
    950   Label finish_store;
    951 
    952   __ Bind(fast_object);
    953   if (check_map == kCheckMap) {
    954     __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
    955     __ Cmp(elements_map,
    956            Operand(masm->isolate()->factory()->fixed_array_map()));
    957     __ B(ne, fast_double);
    958   }
    959 
    960   // HOLECHECK: guards "A[i] = V"
    961   // We have to go to the runtime if the current value is the hole because there
    962   // may be a callback on the element.
    963   Label holecheck_passed;
    964   __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
    965   __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
    966   __ Ldr(x11, MemOperand(x10));
    967   __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
    968   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
    969   __ bind(&holecheck_passed);
    970 
    971   // Smi stores don't require further checks.
    972   __ JumpIfSmi(value, &finish_store);
    973 
    974   // Escape to elements kind transition case.
    975   __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
    976 
    977   __ Bind(&finish_store);
    978   if (increment_length == kIncrementLength) {
    979     // Add 1 to receiver->length.
    980     __ Add(x10, key, Smi::FromInt(1));
    981     __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
    982   }
    983 
    984   Register address = x11;
    985   __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
    986   __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
    987   __ Str(value, MemOperand(address));
    988 
    989   Label dont_record_write;
    990   __ JumpIfSmi(value, &dont_record_write);
    991 
    992   // Update write barrier for the elements array address.
    993   __ Mov(x10, value);  // Preserve the value which is returned.
    994   __ RecordWrite(elements,
    995                  address,
    996                  x10,
    997                  kLRHasNotBeenSaved,
    998                  kDontSaveFPRegs,
    999                  EMIT_REMEMBERED_SET,
   1000                  OMIT_SMI_CHECK);
   1001 
   1002   __ Bind(&dont_record_write);
   1003   __ Ret();
   1004 
   1005 
   1006   __ Bind(fast_double);
   1007   if (check_map == kCheckMap) {
   1008     // Check for fast double array case. If this fails, call through to the
   1009     // runtime.
   1010     __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
   1011   }
   1012 
   1013   // HOLECHECK: guards "A[i] double hole?"
   1014   // We have to see if the double version of the hole is present. If so go to
   1015   // the runtime.
   1016   __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
   1017   __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
   1018   __ Ldr(x11, MemOperand(x10));
   1019   __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
   1020   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
   1021 
   1022   __ Bind(&fast_double_without_map_check);
   1023   __ StoreNumberToDoubleElements(value,
   1024                                  key,
   1025                                  elements,
   1026                                  x10,
   1027                                  d0,
   1028                                  &transition_double_elements);
   1029   if (increment_length == kIncrementLength) {
   1030     // Add 1 to receiver->length.
   1031     __ Add(x10, key, Smi::FromInt(1));
   1032     __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
   1033   }
   1034   __ Ret();
   1035 
   1036 
   1037   __ Bind(&transition_smi_elements);
   1038   // Transition the array appropriately depending on the value type.
   1039   __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
   1040   __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
   1041 
   1042   // Value is a double. Transition FAST_SMI_ELEMENTS ->
   1043   // FAST_DOUBLE_ELEMENTS and complete the store.
   1044   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
   1045                                          FAST_DOUBLE_ELEMENTS,
   1046                                          receiver_map,
   1047                                          x10,
   1048                                          x11,
   1049                                          slow);
   1050   ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
   1051   AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
   1052                                                     FAST_DOUBLE_ELEMENTS);
   1053   ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
   1054   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1055   __ B(&fast_double_without_map_check);
   1056 
   1057   __ Bind(&non_double_value);
   1058   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
   1059   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
   1060                                          FAST_ELEMENTS,
   1061                                          receiver_map,
   1062                                          x10,
   1063                                          x11,
   1064                                          slow);
   1065   ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
   1066   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
   1067   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
   1068                                                                    slow);
   1069   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1070   __ B(&finish_store);
   1071 
   1072   __ Bind(&transition_double_elements);
   1073   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
   1074   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
   1075   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
   1076   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
   1077                                          FAST_ELEMENTS,
   1078                                          receiver_map,
   1079                                          x10,
   1080                                          x11,
   1081                                          slow);
   1082   ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
   1083   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
   1084   ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
   1085   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1086   __ B(&finish_store);
   1087 }
   1088 
   1089 
   1090 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
   1091                                    StrictMode strict_mode) {
   1092   ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
   1093   // ---------- S t a t e --------------
   1094   //  -- x0     : value
   1095   //  -- x1     : key
   1096   //  -- x2     : receiver
   1097   //  -- lr     : return address
   1098   // -----------------------------------
   1099   Label slow;
   1100   Label array;
   1101   Label fast_object;
   1102   Label extra;
   1103   Label fast_object_grow;
   1104   Label fast_double_grow;
   1105   Label fast_double;
   1106 
   1107   Register value = x0;
   1108   Register key = x1;
   1109   Register receiver = x2;
   1110   Register receiver_map = x3;
   1111   Register elements = x4;
   1112   Register elements_map = x5;
   1113 
   1114   __ JumpIfNotSmi(key, &slow);
   1115   __ JumpIfSmi(receiver, &slow);
   1116   __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   1117 
   1118   // Check that the receiver does not require access checks and is not observed.
   1119   // The generic stub does not perform map checks or handle observed objects.
   1120   __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
   1121   __ TestAndBranchIfAnySet(
   1122       x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
   1123 
   1124   // Check if the object is a JS array or not.
   1125   Register instance_type = x10;
   1126   __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
   1127   __ B(eq, &array);
   1128   // Check that the object is some kind of JSObject.
   1129   __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
   1130   __ B(lt, &slow);
   1131 
   1132   // Object case: Check key against length in the elements array.
   1133   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1134   // Check array bounds. Both the key and the length of FixedArray are smis.
   1135   __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
   1136   __ Cmp(x10, Operand::UntagSmi(key));
   1137   __ B(hi, &fast_object);
   1138 
   1139 
   1140   __ Bind(&slow);
   1141   // Slow case, handle jump to runtime.
   1142   // Live values:
   1143   //  x0: value
   1144   //  x1: key
   1145   //  x2: receiver
   1146   GenerateRuntimeSetProperty(masm, strict_mode);
   1147 
   1148 
   1149   __ Bind(&extra);
   1150   // Extra capacity case: Check if there is extra capacity to
   1151   // perform the store and update the length. Used for adding one
   1152   // element to the array by writing to array[array.length].
   1153 
   1154   // Check for room in the elements backing store.
   1155   // Both the key and the length of FixedArray are smis.
   1156   __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
   1157   __ Cmp(x10, Operand::UntagSmi(key));
   1158   __ B(ls, &slow);
   1159 
   1160   __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
   1161   __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
   1162   __ B(eq, &fast_object_grow);
   1163   __ Cmp(elements_map,
   1164          Operand(masm->isolate()->factory()->fixed_double_array_map()));
   1165   __ B(eq, &fast_double_grow);
   1166   __ B(&slow);
   1167 
   1168 
   1169   __ Bind(&array);
   1170   // Array case: Get the length and the elements array from the JS
   1171   // array. Check that the array is in fast mode (and writable); if it
   1172   // is the length is always a smi.
   1173 
   1174   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1175 
   1176   // Check the key against the length in the array.
   1177   __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
   1178   __ Cmp(x10, Operand::UntagSmi(key));
   1179   __ B(eq, &extra);  // We can handle the case where we are appending 1 element.
   1180   __ B(lo, &slow);
   1181 
   1182   KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
   1183                                   &slow, kCheckMap, kDontIncrementLength,
   1184                                   value, key, receiver, receiver_map,
   1185                                   elements_map, elements);
   1186   KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
   1187                                   &slow, kDontCheckMap, kIncrementLength,
   1188                                   value, key, receiver, receiver_map,
   1189                                   elements_map, elements);
   1190 }
   1191 
   1192 
   1193 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   1194   // ----------- S t a t e -------------
   1195   //  -- x0    : value
   1196   //  -- x1    : receiver
   1197   //  -- x2    : name
   1198   //  -- lr    : return address
   1199   // -----------------------------------
   1200 
   1201   // Probe the stub cache.
   1202   Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
   1203   masm->isolate()->stub_cache()->GenerateProbe(
   1204       masm, flags, x1, x2, x3, x4, x5, x6);
   1205 
   1206   // Cache miss: Jump to runtime.
   1207   GenerateMiss(masm);
   1208 }
   1209 
   1210 
   1211 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   1212   // ----------- S t a t e -------------
   1213   //  -- x0    : value
   1214   //  -- x1    : receiver
   1215   //  -- x2    : name
   1216   //  -- lr    : return address
   1217   // -----------------------------------
   1218 
   1219   __ Push(x1, x2, x0);
   1220 
   1221   // Tail call to the entry.
   1222   ExternalReference ref =
   1223       ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
   1224   __ TailCallExternalReference(ref, 3, 1);
   1225 }
   1226 
   1227 
   1228 void StoreIC::GenerateNormal(MacroAssembler* masm) {
   1229   // ----------- S t a t e -------------
   1230   //  -- x0    : value
   1231   //  -- x1    : receiver
   1232   //  -- x2    : name
   1233   //  -- lr    : return address
   1234   // -----------------------------------
   1235   Label miss;
   1236   Register value = x0;
   1237   Register receiver = x1;
   1238   Register name = x2;
   1239   Register dictionary = x3;
   1240 
   1241   GenerateNameDictionaryReceiverCheck(
   1242       masm, receiver, dictionary, x4, x5, &miss);
   1243 
   1244   GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
   1245   Counters* counters = masm->isolate()->counters();
   1246   __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
   1247   __ Ret();
   1248 
   1249   // Cache miss: Jump to runtime.
   1250   __ Bind(&miss);
   1251   __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
   1252   GenerateMiss(masm);
   1253 }
   1254 
   1255 
   1256 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
   1257                                          StrictMode strict_mode) {
   1258   ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
   1259   // ----------- S t a t e -------------
   1260   //  -- x0    : value
   1261   //  -- x1    : receiver
   1262   //  -- x2    : name
   1263   //  -- lr    : return address
   1264   // -----------------------------------
   1265 
   1266   __ Push(x1, x2, x0);
   1267 
   1268   __ Mov(x11, Smi::FromInt(NONE));  // PropertyAttributes
   1269   __ Mov(x10, Smi::FromInt(strict_mode));
   1270   __ Push(x11, x10);
   1271 
   1272   // Do tail-call to runtime routine.
   1273   __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
   1274 }
   1275 
   1276 
   1277 void StoreIC::GenerateSlow(MacroAssembler* masm) {
   1278   // ---------- S t a t e --------------
   1279   //  -- x0     : value
   1280   //  -- x1     : receiver
   1281   //  -- x2     : name
   1282   //  -- lr     : return address
   1283   // -----------------------------------
   1284 
   1285   // Push receiver, name and value for runtime call.
   1286   __ Push(x1, x2, x0);
   1287 
   1288   // The slow case calls into the runtime to complete the store without causing
   1289   // an IC miss that would otherwise cause a transition to the generic stub.
   1290   ExternalReference ref =
   1291       ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
   1292   __ TailCallExternalReference(ref, 3, 1);
   1293 }
   1294 
   1295 
   1296 Condition CompareIC::ComputeCondition(Token::Value op) {
   1297   switch (op) {
   1298     case Token::EQ_STRICT:
   1299     case Token::EQ:
   1300       return eq;
   1301     case Token::LT:
   1302       return lt;
   1303     case Token::GT:
   1304       return gt;
   1305     case Token::LTE:
   1306       return le;
   1307     case Token::GTE:
   1308       return ge;
   1309     default:
   1310       UNREACHABLE();
   1311       return al;
   1312   }
   1313 }
   1314 
   1315 
   1316 bool CompareIC::HasInlinedSmiCode(Address address) {
   1317   // The address of the instruction following the call.
   1318   Address info_address =
   1319       Assembler::return_address_from_call_start(address);
   1320 
   1321   InstructionSequence* patch_info = InstructionSequence::At(info_address);
   1322   return patch_info->IsInlineData();
   1323 }
   1324 
   1325 
   1326 // Activate a SMI fast-path by patching the instructions generated by
   1327 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
   1328 // JumpPatchSite::EmitPatchInfo().
   1329 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   1330   // The patch information is encoded in the instruction stream using
   1331   // instructions which have no side effects, so we can safely execute them.
   1332   // The patch information is encoded directly after the call to the helper
   1333   // function which is requesting this patch operation.
   1334   Address info_address =
   1335       Assembler::return_address_from_call_start(address);
   1336   InlineSmiCheckInfo info(info_address);
   1337 
   1338   // Check and decode the patch information instruction.
   1339   if (!info.HasSmiCheck()) {
   1340     return;
   1341   }
   1342 
   1343   if (FLAG_trace_ic) {
   1344     PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n",
   1345            address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
   1346   }
   1347 
   1348   // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
   1349   // and JumpPatchSite::EmitJumpIfSmi().
   1350   // Changing
   1351   //   tb(n)z xzr, #0, <target>
   1352   // to
   1353   //   tb(!n)z test_reg, #0, <target>
   1354   Instruction* to_patch = info.SmiCheck();
   1355   PatchingAssembler patcher(to_patch, 1);
   1356   ASSERT(to_patch->IsTestBranch());
   1357   ASSERT(to_patch->ImmTestBranchBit5() == 0);
   1358   ASSERT(to_patch->ImmTestBranchBit40() == 0);
   1359 
   1360   STATIC_ASSERT(kSmiTag == 0);
   1361   STATIC_ASSERT(kSmiTagMask == 1);
   1362 
   1363   int branch_imm = to_patch->ImmTestBranch();
   1364   Register smi_reg;
   1365   if (check == ENABLE_INLINED_SMI_CHECK) {
   1366     ASSERT(to_patch->Rt() == xzr.code());
   1367     smi_reg = info.SmiRegister();
   1368   } else {
   1369     ASSERT(check == DISABLE_INLINED_SMI_CHECK);
   1370     ASSERT(to_patch->Rt() != xzr.code());
   1371     smi_reg = xzr;
   1372   }
   1373 
   1374   if (to_patch->Mask(TestBranchMask) == TBZ) {
   1375     // This is JumpIfNotSmi(smi_reg, branch_imm).
   1376     patcher.tbnz(smi_reg, 0, branch_imm);
   1377   } else {
   1378     ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
   1379     // This is JumpIfSmi(smi_reg, branch_imm).
   1380     patcher.tbz(smi_reg, 0, branch_imm);
   1381   }
   1382 }
   1383 
   1384 
   1385 } }  // namespace v8::internal
   1386 
   1387 #endif  // V8_TARGET_ARCH_ARM64
   1388