Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #if V8_TARGET_ARCH_ARM64
      8 
      9 #include "src/codegen.h"
     10 #include "src/ic/ic.h"
     11 #include "src/ic/ic-compiler.h"
     12 #include "src/ic/stub-cache.h"
     13 
     14 namespace v8 {
     15 namespace internal {
     16 
     17 
     18 #define __ ACCESS_MASM(masm)
     19 
     20 
     21 // "type" holds an instance type on entry and is not clobbered.
     22 // Generated code branch on "global_object" if type is any kind of global
     23 // JS object.
     24 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
     25                                             Label* global_object) {
     26   __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
     27   __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
     28   __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
     29   __ B(eq, global_object);
     30 }
     31 
     32 
     33 // Helper function used from LoadIC GenerateNormal.
     34 //
     35 // elements: Property dictionary. It is not clobbered if a jump to the miss
     36 //           label is done.
     37 // name:     Property name. It is not clobbered if a jump to the miss label is
     38 //           done
     39 // result:   Register for the result. It is only updated if a jump to the miss
     40 //           label is not done.
     41 // The scratch registers need to be different from elements, name and result.
     42 // The generated code assumes that the receiver has slow properties,
     43 // is not a global object and does not have interceptors.
     44 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
     45                                    Register elements, Register name,
     46                                    Register result, Register scratch1,
     47                                    Register scratch2) {
     48   DCHECK(!AreAliased(elements, name, scratch1, scratch2));
     49   DCHECK(!AreAliased(result, scratch1, scratch2));
     50 
     51   Label done;
     52 
     53   // Probe the dictionary.
     54   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
     55                                                    name, scratch1, scratch2);
     56 
     57   // If probing finds an entry check that the value is a normal property.
     58   __ Bind(&done);
     59 
     60   static const int kElementsStartOffset =
     61       NameDictionary::kHeaderSize +
     62       NameDictionary::kElementsStartIndex * kPointerSize;
     63   static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
     64   __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
     65   __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
     66   __ B(ne, miss);
     67 
     68   // Get the value at the masked, scaled index and return.
     69   __ Ldr(result,
     70          FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
     71 }
     72 
     73 
     74 // Helper function used from StoreIC::GenerateNormal.
     75 //
     76 // elements: Property dictionary. It is not clobbered if a jump to the miss
     77 //           label is done.
     78 // name:     Property name. It is not clobbered if a jump to the miss label is
     79 //           done
     80 // value:    The value to store (never clobbered).
     81 //
     82 // The generated code assumes that the receiver has slow properties,
     83 // is not a global object and does not have interceptors.
     84 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
     85                                     Register elements, Register name,
     86                                     Register value, Register scratch1,
     87                                     Register scratch2) {
     88   DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
     89 
     90   Label done;
     91 
     92   // Probe the dictionary.
     93   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
     94                                                    name, scratch1, scratch2);
     95 
     96   // If probing finds an entry in the dictionary check that the value
     97   // is a normal property that is not read only.
     98   __ Bind(&done);
     99 
    100   static const int kElementsStartOffset =
    101       NameDictionary::kHeaderSize +
    102       NameDictionary::kElementsStartIndex * kPointerSize;
    103   static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
    104   static const int kTypeAndReadOnlyMask =
    105       PropertyDetails::TypeField::kMask |
    106       PropertyDetails::AttributesField::encode(READ_ONLY);
    107   __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
    108   __ Tst(scratch1, kTypeAndReadOnlyMask);
    109   __ B(ne, miss);
    110 
    111   // Store the value at the masked, scaled index and return.
    112   static const int kValueOffset = kElementsStartOffset + kPointerSize;
    113   __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
    114   __ Str(value, MemOperand(scratch2));
    115 
    116   // Update the write barrier. Make sure not to clobber the value.
    117   __ Mov(scratch1, value);
    118   __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
    119                  kDontSaveFPRegs);
    120 }
    121 
    122 
    123 // Checks the receiver for special cases (value type, slow case bits).
    124 // Falls through for regular JS object and return the map of the
    125 // receiver in 'map_scratch' if the receiver is not a SMI.
    126 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
    127                                            Register receiver,
    128                                            Register map_scratch,
    129                                            Register scratch,
    130                                            int interceptor_bit, Label* slow) {
    131   DCHECK(!AreAliased(map_scratch, scratch));
    132 
    133   // Check that the object isn't a smi.
    134   __ JumpIfSmi(receiver, slow);
    135   // Get the map of the receiver.
    136   __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
    137   // Check bit field.
    138   __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
    139   __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
    140   __ Tbnz(scratch, interceptor_bit, slow);
    141 
    142   // Check that the object is some kind of JS object EXCEPT JS Value type.
    143   // In the case that the object is a value-wrapper object, we enter the
    144   // runtime system to make sure that indexing into string objects work
    145   // as intended.
    146   STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
    147   __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
    148   __ Cmp(scratch, JS_OBJECT_TYPE);
    149   __ B(lt, slow);
    150 }
    151 
    152 
    153 // Loads an indexed element from a fast case array.
    154 // If not_fast_array is NULL, doesn't perform the elements map check.
    155 //
    156 // receiver     - holds the receiver on entry.
    157 //                Unchanged unless 'result' is the same register.
    158 //
    159 // key          - holds the smi key on entry.
    160 //                Unchanged unless 'result' is the same register.
    161 //
    162 // elements     - holds the elements of the receiver on exit.
    163 //
    164 // elements_map - holds the elements map on exit if the not_fast_array branch is
    165 //                taken. Otherwise, this is used as a scratch register.
    166 //
    167 // result       - holds the result on exit if the load succeeded.
    168 //                Allowed to be the the same as 'receiver' or 'key'.
    169 //                Unchanged on bailout so 'receiver' and 'key' can be safely
    170 //                used by further computation.
    171 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
    172                                   Register key, Register elements,
    173                                   Register elements_map, Register scratch2,
    174                                   Register result, Label* not_fast_array,
    175                                   Label* slow) {
    176   DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
    177 
    178   // Check for fast array.
    179   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    180   if (not_fast_array != NULL) {
    181     // Check that the object is in fast mode and writable.
    182     __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
    183     __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
    184                      not_fast_array);
    185   } else {
    186     __ AssertFastElements(elements);
    187   }
    188 
    189   // The elements_map register is only used for the not_fast_array path, which
    190   // was handled above. From this point onward it is a scratch register.
    191   Register scratch1 = elements_map;
    192 
    193   // Check that the key (index) is within bounds.
    194   __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
    195   __ Cmp(key, scratch1);
    196   __ B(hs, slow);
    197 
    198   // Fast case: Do the load.
    199   __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
    200   __ SmiUntag(scratch2, key);
    201   __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
    202 
    203   // In case the loaded value is the_hole we have to consult GetProperty
    204   // to ensure the prototype chain is searched.
    205   __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
    206 
    207   // Move the value to the result register.
    208   // 'result' can alias with 'receiver' or 'key' but these two must be
    209   // preserved if we jump to 'slow'.
    210   __ Mov(result, scratch2);
    211 }
    212 
    213 
    214 // Checks whether a key is an array index string or a unique name.
    215 // Falls through if a key is a unique name.
    216 // The map of the key is returned in 'map_scratch'.
    217 // If the jump to 'index_string' is done the hash of the key is left
    218 // in 'hash_scratch'.
    219 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
    220                                  Register map_scratch, Register hash_scratch,
    221                                  Label* index_string, Label* not_unique) {
    222   DCHECK(!AreAliased(key, map_scratch, hash_scratch));
    223 
    224   // Is the key a name?
    225   Label unique;
    226   __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
    227                       not_unique, hi);
    228   STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
    229   __ B(eq, &unique);
    230 
    231   // Is the string an array index with cached numeric value?
    232   __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
    233   __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
    234                              index_string);
    235 
    236   // Is the string internalized? We know it's a string, so a single bit test is
    237   // enough.
    238   __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
    239   STATIC_ASSERT(kInternalizedTag == 0);
    240   __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
    241 
    242   __ Bind(&unique);
    243   // Fall through if the key is a unique name.
    244 }
    245 
    246 
    247 // Neither 'object' nor 'key' are modified by this function.
    248 //
    249 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
    250 // left with the object's elements map. Otherwise, it is used as a scratch
    251 // register.
    252 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
    253                                                 Register object, Register key,
    254                                                 Register map, Register scratch1,
    255                                                 Register scratch2,
    256                                                 Label* unmapped_case,
    257                                                 Label* slow_case) {
    258   DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
    259 
    260   Heap* heap = masm->isolate()->heap();
    261 
    262   // Check that the receiver is a JSObject. Because of the elements
    263   // map check later, we do not need to check for interceptors or
    264   // whether it requires access checks.
    265   __ JumpIfSmi(object, slow_case);
    266   // Check that the object is some kind of JSObject.
    267   __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case,
    268                       lt);
    269 
    270   // Check that the key is a positive smi.
    271   __ JumpIfNotSmi(key, slow_case);
    272   __ Tbnz(key, kXSignBit, slow_case);
    273 
    274   // Load the elements object and check its map.
    275   Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
    276   __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
    277   __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
    278 
    279   // Check if element is in the range of mapped arguments. If not, jump
    280   // to the unmapped lookup.
    281   __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
    282   __ Sub(scratch1, scratch1, Smi::FromInt(2));
    283   __ Cmp(key, scratch1);
    284   __ B(hs, unmapped_case);
    285 
    286   // Load element index and check whether it is the hole.
    287   static const int offset =
    288       FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
    289 
    290   __ Add(scratch1, map, offset);
    291   __ SmiUntag(scratch2, key);
    292   __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
    293   __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
    294 
    295   // Load value from context and return it.
    296   __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
    297   __ SmiUntag(scratch1);
    298   __ Lsl(scratch1, scratch1, kPointerSizeLog2);
    299   __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
    300   // The base of the result (scratch2) is passed to RecordWrite in
    301   // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
    302   return MemOperand(scratch2, scratch1);
    303 }
    304 
    305 
    306 // The 'parameter_map' register must be loaded with the parameter map of the
    307 // arguments object and is overwritten.
    308 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
    309                                                   Register key,
    310                                                   Register parameter_map,
    311                                                   Register scratch,
    312                                                   Label* slow_case) {
    313   DCHECK(!AreAliased(key, parameter_map, scratch));
    314 
    315   // Element is in arguments backing store, which is referenced by the
    316   // second element of the parameter_map.
    317   const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
    318   Register backing_store = parameter_map;
    319   __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
    320   Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
    321   __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
    322               DONT_DO_SMI_CHECK);
    323   __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
    324   __ Cmp(key, scratch);
    325   __ B(hs, slow_case);
    326 
    327   __ Add(backing_store, backing_store,
    328          FixedArray::kHeaderSize - kHeapObjectTag);
    329   __ SmiUntag(scratch, key);
    330   return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
    331 }
    332 
    333 
    334 void LoadIC::GenerateNormal(MacroAssembler* masm) {
    335   Register dictionary = x0;
    336   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
    337   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
    338   Label slow;
    339 
    340   __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
    341                                      JSObject::kPropertiesOffset));
    342   GenerateDictionaryLoad(masm, &slow, dictionary,
    343                          LoadDescriptor::NameRegister(), x0, x3, x4);
    344   __ Ret();
    345 
    346   // Dictionary load failed, go slow (but don't miss).
    347   __ Bind(&slow);
    348   GenerateRuntimeGetProperty(masm);
    349 }
    350 
    351 
    352 void LoadIC::GenerateMiss(MacroAssembler* masm) {
    353   // The return address is in lr.
    354   Isolate* isolate = masm->isolate();
    355   ASM_LOCATION("LoadIC::GenerateMiss");
    356 
    357   __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
    358 
    359   // Perform tail call to the entry.
    360   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
    361   ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
    362   __ TailCallExternalReference(ref, 2, 1);
    363 }
    364 
    365 
    366 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
    367   // The return address is in lr.
    368   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
    369   __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
    370 }
    371 
    372 
    373 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
    374   ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
    375   Label slow, notin;
    376   Register value = StoreDescriptor::ValueRegister();
    377   Register key = StoreDescriptor::NameRegister();
    378   Register receiver = StoreDescriptor::ReceiverRegister();
    379   DCHECK(receiver.is(x1));
    380   DCHECK(key.is(x2));
    381   DCHECK(value.is(x0));
    382 
    383   Register map = x3;
    384 
    385   // These registers are used by GenerateMappedArgumentsLookup to build a
    386   // MemOperand. They are live for as long as the MemOperand is live.
    387   Register mapped1 = x4;
    388   Register mapped2 = x5;
    389 
    390   MemOperand mapped = GenerateMappedArgumentsLookup(
    391       masm, receiver, key, map, mapped1, mapped2, &notin, &slow);
    392   Operand mapped_offset = mapped.OffsetAsOperand();
    393   __ Str(value, mapped);
    394   __ Add(x10, mapped.base(), mapped_offset);
    395   __ Mov(x11, value);
    396   __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
    397   __ Ret();
    398 
    399   __ Bind(&notin);
    400 
    401   // These registers are used by GenerateMappedArgumentsLookup to build a
    402   // MemOperand. They are live for as long as the MemOperand is live.
    403   Register unmapped1 = map;  // This is assumed to alias 'map'.
    404   Register unmapped2 = x4;
    405   MemOperand unmapped =
    406       GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
    407   Operand unmapped_offset = unmapped.OffsetAsOperand();
    408   __ Str(value, unmapped);
    409   __ Add(x10, unmapped.base(), unmapped_offset);
    410   __ Mov(x11, value);
    411   __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved,
    412                  kDontSaveFPRegs);
    413   __ Ret();
    414   __ Bind(&slow);
    415   GenerateMiss(masm);
    416 }
    417 
    418 
    419 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
    420   // The return address is in lr.
    421   Isolate* isolate = masm->isolate();
    422 
    423   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
    424 
    425   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
    426 
    427   // Perform tail call to the entry.
    428   ExternalReference ref =
    429       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
    430 
    431   __ TailCallExternalReference(ref, 2, 1);
    432 }
    433 
    434 
    435 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
    436   // The return address is in lr.
    437   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
    438   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
    439 }
    440 
    441 
    442 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
    443                                         Register receiver, Register scratch1,
    444                                         Register scratch2, Register scratch3,
    445                                         Register scratch4, Register scratch5,
    446                                         Label* slow) {
    447   DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
    448                      scratch5));
    449 
    450   Isolate* isolate = masm->isolate();
    451   Label check_number_dictionary;
    452   // If we can load the value, it should be returned in x0.
    453   Register result = x0;
    454 
    455   GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
    456                                  Map::kHasIndexedInterceptor, slow);
    457 
    458   // Check the receiver's map to see if it has fast elements.
    459   __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
    460 
    461   GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
    462                         result, NULL, slow);
    463   __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
    464                       scratch1, scratch2);
    465   __ Ret();
    466 
    467   __ Bind(&check_number_dictionary);
    468   __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
    469   __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
    470 
    471   // Check whether we have a number dictionary.
    472   __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
    473 
    474   __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
    475                               scratch4, scratch5);
    476   __ Ret();
    477 }
    478 
    479 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
    480                                          Register receiver, Register scratch1,
    481                                          Register scratch2, Register scratch3,
    482                                          Register scratch4, Register scratch5,
    483                                          Label* slow) {
    484   DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
    485                      scratch5));
    486 
    487   Isolate* isolate = masm->isolate();
    488   Label probe_dictionary, property_array_property;
    489   // If we can load the value, it should be returned in x0.
    490   Register result = x0;
    491 
    492   GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
    493                                  Map::kHasNamedInterceptor, slow);
    494 
    495   // If the receiver is a fast-case object, check the keyed lookup cache.
    496   // Otherwise probe the dictionary.
    497   __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
    498   __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
    499   __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
    500 
    501   // We keep the map of the receiver in scratch1.
    502   Register receiver_map = scratch1;
    503 
    504   // Load the map of the receiver, compute the keyed lookup cache hash
    505   // based on 32 bits of the map pointer and the name hash.
    506   __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    507   __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
    508   __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
    509   __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
    510   int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
    511   __ And(scratch2, scratch2, mask);
    512 
    513   // Load the key (consisting of map and unique name) from the cache and
    514   // check for match.
    515   Label load_in_object_property;
    516   static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
    517   Label hit_on_nth_entry[kEntriesPerBucket];
    518   ExternalReference cache_keys =
    519       ExternalReference::keyed_lookup_cache_keys(isolate);
    520 
    521   __ Mov(scratch3, cache_keys);
    522   __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
    523 
    524   for (int i = 0; i < kEntriesPerBucket - 1; i++) {
    525     Label try_next_entry;
    526     // Load map and make scratch3 pointing to the next entry.
    527     __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
    528     __ Cmp(receiver_map, scratch4);
    529     __ B(ne, &try_next_entry);
    530     __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize));  // Load name
    531     __ Cmp(key, scratch4);
    532     __ B(eq, &hit_on_nth_entry[i]);
    533     __ Bind(&try_next_entry);
    534   }
    535 
    536   // Last entry.
    537   __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
    538   __ Cmp(receiver_map, scratch4);
    539   __ B(ne, slow);
    540   __ Ldr(scratch4, MemOperand(scratch3));
    541   __ Cmp(key, scratch4);
    542   __ B(ne, slow);
    543 
    544   // Get field offset.
    545   ExternalReference cache_field_offsets =
    546       ExternalReference::keyed_lookup_cache_field_offsets(isolate);
    547 
    548   // Hit on nth entry.
    549   for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
    550     __ Bind(&hit_on_nth_entry[i]);
    551     __ Mov(scratch3, cache_field_offsets);
    552     if (i != 0) {
    553       __ Add(scratch2, scratch2, i);
    554     }
    555     __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
    556     __ Ldrb(scratch5,
    557             FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
    558     __ Subs(scratch4, scratch4, scratch5);
    559     __ B(ge, &property_array_property);
    560     if (i != 0) {
    561       __ B(&load_in_object_property);
    562     }
    563   }
    564 
    565   // Load in-object property.
    566   __ Bind(&load_in_object_property);
    567   __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
    568   __ Add(scratch5, scratch5, scratch4);        // Index from start of object.
    569   __ Sub(receiver, receiver, kHeapObjectTag);  // Remove the heap tag.
    570   __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
    571   __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
    572                       scratch1, scratch2);
    573   __ Ret();
    574 
    575   // Load property array property.
    576   __ Bind(&property_array_property);
    577   __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
    578   __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
    579   __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
    580   __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
    581                       scratch1, scratch2);
    582   __ Ret();
    583 
    584   // Do a quick inline probe of the receiver's dictionary, if it exists.
    585   __ Bind(&probe_dictionary);
    586   __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
    587   __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
    588   GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
    589   // Load the property.
    590   GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
    591   __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
    592                       scratch1, scratch2);
    593   __ Ret();
    594 }
    595 
    596 
    597 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
    598   // The return address is in lr.
    599   Label slow, check_name, index_smi, index_name;
    600 
    601   Register key = LoadDescriptor::NameRegister();
    602   Register receiver = LoadDescriptor::ReceiverRegister();
    603   DCHECK(key.is(x2));
    604   DCHECK(receiver.is(x1));
    605 
    606   __ JumpIfNotSmi(key, &check_name);
    607   __ Bind(&index_smi);
    608   // Now the key is known to be a smi. This place is also jumped to from below
    609   // where a numeric string is converted to a smi.
    610   GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
    611 
    612   // Slow case.
    613   __ Bind(&slow);
    614   __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
    615                       x4, x3);
    616   GenerateRuntimeGetProperty(masm);
    617 
    618   __ Bind(&check_name);
    619   GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
    620 
    621   GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
    622 
    623   __ Bind(&index_name);
    624   __ IndexFromHash(x3, key);
    625   // Now jump to the place where smi keys are handled.
    626   __ B(&index_smi);
    627 }
    628 
    629 
    630 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
    631   // Return address is in lr.
    632   Label miss;
    633 
    634   Register receiver = LoadDescriptor::ReceiverRegister();
    635   Register index = LoadDescriptor::NameRegister();
    636   Register result = x0;
    637   Register scratch = x3;
    638   DCHECK(!scratch.is(receiver) && !scratch.is(index));
    639 
    640   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
    641                                           &miss,  // When not a string.
    642                                           &miss,  // When not a number.
    643                                           &miss,  // When index out of range.
    644                                           STRING_INDEX_IS_ARRAY_INDEX);
    645   char_at_generator.GenerateFast(masm);
    646   __ Ret();
    647 
    648   StubRuntimeCallHelper call_helper;
    649   char_at_generator.GenerateSlow(masm, call_helper);
    650 
    651   __ Bind(&miss);
    652   GenerateMiss(masm);
    653 }
    654 
    655 
    656 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
    657   ASM_LOCATION("KeyedStoreIC::GenerateMiss");
    658 
    659   // Push receiver, key and value for runtime call.
    660   __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
    661           StoreDescriptor::ValueRegister());
    662 
    663   ExternalReference ref =
    664       ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
    665   __ TailCallExternalReference(ref, 3, 1);
    666 }
    667 
    668 
    669 static void KeyedStoreGenerateGenericHelper(
    670     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
    671     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
    672     Register value, Register key, Register receiver, Register receiver_map,
    673     Register elements_map, Register elements) {
    674   DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
    675                      x10, x11));
    676 
    677   Label transition_smi_elements;
    678   Label transition_double_elements;
    679   Label fast_double_without_map_check;
    680   Label non_double_value;
    681   Label finish_store;
    682 
    683   __ Bind(fast_object);
    684   if (check_map == kCheckMap) {
    685     __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
    686     __ Cmp(elements_map,
    687            Operand(masm->isolate()->factory()->fixed_array_map()));
    688     __ B(ne, fast_double);
    689   }
    690 
    691   // HOLECHECK: guards "A[i] = V"
    692   // We have to go to the runtime if the current value is the hole because there
    693   // may be a callback on the element.
    694   Label holecheck_passed;
    695   __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
    696   __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
    697   __ Ldr(x11, MemOperand(x10));
    698   __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
    699   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
    700   __ bind(&holecheck_passed);
    701 
    702   // Smi stores don't require further checks.
    703   __ JumpIfSmi(value, &finish_store);
    704 
    705   // Escape to elements kind transition case.
    706   __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
    707 
    708   __ Bind(&finish_store);
    709   if (increment_length == kIncrementLength) {
    710     // Add 1 to receiver->length.
    711     __ Add(x10, key, Smi::FromInt(1));
    712     __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
    713   }
    714 
    715   Register address = x11;
    716   __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
    717   __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
    718   __ Str(value, MemOperand(address));
    719 
    720   Label dont_record_write;
    721   __ JumpIfSmi(value, &dont_record_write);
    722 
    723   // Update write barrier for the elements array address.
    724   __ Mov(x10, value);  // Preserve the value which is returned.
    725   __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
    726                  EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    727 
    728   __ Bind(&dont_record_write);
    729   __ Ret();
    730 
    731 
    732   __ Bind(fast_double);
    733   if (check_map == kCheckMap) {
    734     // Check for fast double array case. If this fails, call through to the
    735     // runtime.
    736     __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
    737   }
    738 
    739   // HOLECHECK: guards "A[i] double hole?"
    740   // We have to see if the double version of the hole is present. If so go to
    741   // the runtime.
    742   __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
    743   __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
    744   __ Ldr(x11, MemOperand(x10));
    745   __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
    746   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
    747 
    748   __ Bind(&fast_double_without_map_check);
    749   __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
    750                                  &transition_double_elements);
    751   if (increment_length == kIncrementLength) {
    752     // Add 1 to receiver->length.
    753     __ Add(x10, key, Smi::FromInt(1));
    754     __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
    755   }
    756   __ Ret();
    757 
    758 
    759   __ Bind(&transition_smi_elements);
    760   // Transition the array appropriately depending on the value type.
    761   __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
    762   __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
    763 
    764   // Value is a double. Transition FAST_SMI_ELEMENTS ->
    765   // FAST_DOUBLE_ELEMENTS and complete the store.
    766   __ LoadTransitionedArrayMapConditional(
    767       FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
    768   AllocationSiteMode mode =
    769       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
    770   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
    771                                                    receiver_map, mode, slow);
    772   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    773   __ B(&fast_double_without_map_check);
    774 
    775   __ Bind(&non_double_value);
    776   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
    777   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
    778                                          receiver_map, x10, x11, slow);
    779 
    780   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
    781   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    782       masm, receiver, key, value, receiver_map, mode, slow);
    783 
    784   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    785   __ B(&finish_store);
    786 
    787   __ Bind(&transition_double_elements);
    788   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
    789   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
    790   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
    791   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
    792                                          receiver_map, x10, x11, slow);
    793   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
    794   ElementsTransitionGenerator::GenerateDoubleToObject(
    795       masm, receiver, key, value, receiver_map, mode, slow);
    796   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    797   __ B(&finish_store);
    798 }
    799 
    800 
    801 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
    802                                    StrictMode strict_mode) {
    803   ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
    804   Label slow;
    805   Label array;
    806   Label fast_object;
    807   Label extra;
    808   Label fast_object_grow;
    809   Label fast_double_grow;
    810   Label fast_double;
    811 
    812   Register value = StoreDescriptor::ValueRegister();
    813   Register key = StoreDescriptor::NameRegister();
    814   Register receiver = StoreDescriptor::ReceiverRegister();
    815   DCHECK(receiver.is(x1));
    816   DCHECK(key.is(x2));
    817   DCHECK(value.is(x0));
    818 
    819   Register receiver_map = x3;
    820   Register elements = x4;
    821   Register elements_map = x5;
    822 
    823   __ JumpIfNotSmi(key, &slow);
    824   __ JumpIfSmi(receiver, &slow);
    825   __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    826 
    827   // Check that the receiver does not require access checks and is not observed.
    828   // The generic stub does not perform map checks or handle observed objects.
    829   __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
    830   __ TestAndBranchIfAnySet(
    831       x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
    832 
    833   // Check if the object is a JS array or not.
    834   Register instance_type = x10;
    835   __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
    836   __ B(eq, &array);
    837   // Check that the object is some kind of JSObject.
    838   __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
    839   __ B(lt, &slow);
    840 
    841   // Object case: Check key against length in the elements array.
    842   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    843   // Check array bounds. Both the key and the length of FixedArray are smis.
    844   __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
    845   __ Cmp(x10, Operand::UntagSmi(key));
    846   __ B(hi, &fast_object);
    847 
    848 
    849   __ Bind(&slow);
    850   // Slow case, handle jump to runtime.
    851   // Live values:
    852   //  x0: value
    853   //  x1: key
    854   //  x2: receiver
    855   PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
    856 
    857 
    858   __ Bind(&extra);
    859   // Extra capacity case: Check if there is extra capacity to
    860   // perform the store and update the length. Used for adding one
    861   // element to the array by writing to array[array.length].
    862 
    863   // Check for room in the elements backing store.
    864   // Both the key and the length of FixedArray are smis.
    865   __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
    866   __ Cmp(x10, Operand::UntagSmi(key));
    867   __ B(ls, &slow);
    868 
    869   __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
    870   __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
    871   __ B(eq, &fast_object_grow);
    872   __ Cmp(elements_map,
    873          Operand(masm->isolate()->factory()->fixed_double_array_map()));
    874   __ B(eq, &fast_double_grow);
    875   __ B(&slow);
    876 
    877 
    878   __ Bind(&array);
    879   // Array case: Get the length and the elements array from the JS
    880   // array. Check that the array is in fast mode (and writable); if it
    881   // is the length is always a smi.
    882 
    883   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    884 
    885   // Check the key against the length in the array.
    886   __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
    887   __ Cmp(x10, Operand::UntagSmi(key));
    888   __ B(eq, &extra);  // We can handle the case where we are appending 1 element.
    889   __ B(lo, &slow);
    890 
    891   KeyedStoreGenerateGenericHelper(
    892       masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
    893       value, key, receiver, receiver_map, elements_map, elements);
    894   KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
    895                                   &slow, kDontCheckMap, kIncrementLength, value,
    896                                   key, receiver, receiver_map, elements_map,
    897                                   elements);
    898 }
    899 
    900 
    901 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
    902   Register receiver = StoreDescriptor::ReceiverRegister();
    903   Register name = StoreDescriptor::NameRegister();
    904   DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x3, x4,
    905                      x5, x6));
    906 
    907   // Probe the stub cache.
    908   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
    909       Code::ComputeHandlerFlags(Code::STORE_IC));
    910   masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
    911                                                name, x3, x4, x5, x6);
    912 
    913   // Cache miss: Jump to runtime.
    914   GenerateMiss(masm);
    915 }
    916 
    917 
    918 void StoreIC::GenerateMiss(MacroAssembler* masm) {
    919   __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
    920           StoreDescriptor::ValueRegister());
    921 
    922   // Tail call to the entry.
    923   ExternalReference ref =
    924       ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
    925   __ TailCallExternalReference(ref, 3, 1);
    926 }
    927 
    928 
    929 void StoreIC::GenerateNormal(MacroAssembler* masm) {
    930   Label miss;
    931   Register value = StoreDescriptor::ValueRegister();
    932   Register receiver = StoreDescriptor::ReceiverRegister();
    933   Register name = StoreDescriptor::NameRegister();
    934   Register dictionary = x3;
    935   DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
    936 
    937   __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
    938 
    939   GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
    940   Counters* counters = masm->isolate()->counters();
    941   __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
    942   __ Ret();
    943 
    944   // Cache miss: Jump to runtime.
    945   __ Bind(&miss);
    946   __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
    947   GenerateMiss(masm);
    948 }
    949 
    950 
    951 Condition CompareIC::ComputeCondition(Token::Value op) {
    952   switch (op) {
    953     case Token::EQ_STRICT:
    954     case Token::EQ:
    955       return eq;
    956     case Token::LT:
    957       return lt;
    958     case Token::GT:
    959       return gt;
    960     case Token::LTE:
    961       return le;
    962     case Token::GTE:
    963       return ge;
    964     default:
    965       UNREACHABLE();
    966       return al;
    967   }
    968 }
    969 
    970 
    971 bool CompareIC::HasInlinedSmiCode(Address address) {
    972   // The address of the instruction following the call.
    973   Address info_address = Assembler::return_address_from_call_start(address);
    974 
    975   InstructionSequence* patch_info = InstructionSequence::At(info_address);
    976   return patch_info->IsInlineData();
    977 }
    978 
    979 
    980 // Activate a SMI fast-path by patching the instructions generated by
    981 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
    982 // JumpPatchSite::EmitPatchInfo().
    983 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
    984   // The patch information is encoded in the instruction stream using
    985   // instructions which have no side effects, so we can safely execute them.
    986   // The patch information is encoded directly after the call to the helper
    987   // function which is requesting this patch operation.
    988   Address info_address = Assembler::return_address_from_call_start(address);
    989   InlineSmiCheckInfo info(info_address);
    990 
    991   // Check and decode the patch information instruction.
    992   if (!info.HasSmiCheck()) {
    993     return;
    994   }
    995 
    996   if (FLAG_trace_ic) {
    997     PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n", address,
    998            info_address, reinterpret_cast<void*>(info.SmiCheck()));
    999   }
   1000 
   1001   // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
   1002   // and JumpPatchSite::EmitJumpIfSmi().
   1003   // Changing
   1004   //   tb(n)z xzr, #0, <target>
   1005   // to
   1006   //   tb(!n)z test_reg, #0, <target>
   1007   Instruction* to_patch = info.SmiCheck();
   1008   PatchingAssembler patcher(to_patch, 1);
   1009   DCHECK(to_patch->IsTestBranch());
   1010   DCHECK(to_patch->ImmTestBranchBit5() == 0);
   1011   DCHECK(to_patch->ImmTestBranchBit40() == 0);
   1012 
   1013   STATIC_ASSERT(kSmiTag == 0);
   1014   STATIC_ASSERT(kSmiTagMask == 1);
   1015 
   1016   int branch_imm = to_patch->ImmTestBranch();
   1017   Register smi_reg;
   1018   if (check == ENABLE_INLINED_SMI_CHECK) {
   1019     DCHECK(to_patch->Rt() == xzr.code());
   1020     smi_reg = info.SmiRegister();
   1021   } else {
   1022     DCHECK(check == DISABLE_INLINED_SMI_CHECK);
   1023     DCHECK(to_patch->Rt() != xzr.code());
   1024     smi_reg = xzr;
   1025   }
   1026 
   1027   if (to_patch->Mask(TestBranchMask) == TBZ) {
   1028     // This is JumpIfNotSmi(smi_reg, branch_imm).
   1029     patcher.tbnz(smi_reg, 0, branch_imm);
   1030   } else {
   1031     DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
   1032     // This is JumpIfSmi(smi_reg, branch_imm).
   1033     patcher.tbz(smi_reg, 0, branch_imm);
   1034   }
   1035 }
   1036 }
   1037 }  // namespace v8::internal
   1038 
   1039 #endif  // V8_TARGET_ARCH_ARM64
   1040