Home | History | Annotate | Download | only in s390
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_S390
      6 
      7 #include "src/ic/ic.h"
      8 #include "src/codegen.h"
      9 #include "src/ic/ic-compiler.h"
     10 #include "src/ic/stub-cache.h"
     11 
     12 namespace v8 {
     13 namespace internal {
     14 
     15 // ----------------------------------------------------------------------------
     16 // Static IC stub generators.
     17 //
     18 
     19 #define __ ACCESS_MASM(masm)
     20 
     21 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
     22                                             Label* global_object) {
     23   // Register usage:
     24   //   type: holds the receiver instance type on entry.
     25   __ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE));
     26   __ beq(global_object);
     27   __ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE));
     28   __ beq(global_object);
     29 }
     30 
     31 // Helper function used from LoadIC GenerateNormal.
     32 //
     33 // elements: Property dictionary. It is not clobbered if a jump to the miss
     34 //           label is done.
     35 // name:     Property name. It is not clobbered if a jump to the miss label is
     36 //           done
     37 // result:   Register for the result. It is only updated if a jump to the miss
     38 //           label is not done. Can be the same as elements or name clobbering
     39 //           one of these in the case of not jumping to the miss label.
     40 // The two scratch registers need to be different from elements, name and
     41 // result.
     42 // The generated code assumes that the receiver has slow properties,
     43 // is not a global object and does not have interceptors.
     44 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
     45                                    Register elements, Register name,
     46                                    Register result, Register scratch1,
     47                                    Register scratch2) {
     48   // Main use of the scratch registers.
     49   // scratch1: Used as temporary and to hold the capacity of the property
     50   //           dictionary.
     51   // scratch2: Used as temporary.
     52   Label done;
     53 
     54   // Probe the dictionary.
     55   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
     56                                                    name, scratch1, scratch2);
     57 
     58   // If probing finds an entry check that the value is a normal
     59   // property.
     60   __ bind(&done);  // scratch2 == elements + 4 * index
     61   const int kElementsStartOffset =
     62       NameDictionary::kHeaderSize +
     63       NameDictionary::kElementsStartIndex * kPointerSize;
     64   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
     65   __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
     66   __ LoadRR(r0, scratch2);
     67   __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
     68   __ AndP(scratch2, scratch1);
     69   __ bne(miss);
     70   __ LoadRR(scratch2, r0);
     71 
     72   // Get the value at the masked, scaled index and return.
     73   __ LoadP(result,
     74            FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
     75 }
     76 
     77 // Helper function used from StoreIC::GenerateNormal.
     78 //
     79 // elements: Property dictionary. It is not clobbered if a jump to the miss
     80 //           label is done.
     81 // name:     Property name. It is not clobbered if a jump to the miss label is
     82 //           done
     83 // value:    The value to store.
     84 // The two scratch registers need to be different from elements, name and
     85 // result.
     86 // The generated code assumes that the receiver has slow properties,
     87 // is not a global object and does not have interceptors.
     88 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
     89                                     Register elements, Register name,
     90                                     Register value, Register scratch1,
     91                                     Register scratch2) {
     92   // Main use of the scratch registers.
     93   // scratch1: Used as temporary and to hold the capacity of the property
     94   //           dictionary.
     95   // scratch2: Used as temporary.
     96   Label done;
     97 
     98   // Probe the dictionary.
     99   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
    100                                                    name, scratch1, scratch2);
    101 
    102   // If probing finds an entry in the dictionary check that the value
    103   // is a normal property that is not read only.
    104   __ bind(&done);  // scratch2 == elements + 4 * index
    105   const int kElementsStartOffset =
    106       NameDictionary::kHeaderSize +
    107       NameDictionary::kElementsStartIndex * kPointerSize;
    108   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
    109   int kTypeAndReadOnlyMask =
    110       PropertyDetails::TypeField::kMask |
    111       PropertyDetails::AttributesField::encode(READ_ONLY);
    112   __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
    113   __ LoadRR(r0, scratch2);
    114   __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
    115   __ AndP(scratch2, scratch1);
    116   __ bne(miss /*, cr0*/);
    117   __ LoadRR(scratch2, r0);
    118 
    119   // Store the value at the masked, scaled index and return.
    120   const int kValueOffset = kElementsStartOffset + kPointerSize;
    121   __ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag));
    122   __ StoreP(value, MemOperand(scratch2));
    123 
    124   // Update the write barrier. Make sure not to clobber the value.
    125   __ LoadRR(scratch1, value);
    126   __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
    127                  kDontSaveFPRegs);
    128 }
    129 
    130 // Checks the receiver for special cases (value type, slow case bits).
    131 // Falls through for regular JS object.
    132 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
    133                                            Register receiver, Register map,
    134                                            Register scratch,
    135                                            int interceptor_bit, Label* slow) {
    136   // Check that the object isn't a smi.
    137   __ JumpIfSmi(receiver, slow);
    138   // Get the map of the receiver.
    139   __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    140   // Check bit field.
    141   __ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
    142   DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
    143   __ mov(r0,
    144          Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
    145   __ AndP(r0, scratch);
    146   __ bne(slow /*, cr0*/);
    147   // Check that the object is some kind of JS object EXCEPT JS Value type.
    148   // In the case that the object is a value-wrapper object,
    149   // we enter the runtime system to make sure that indexing into string
    150   // objects work as intended.
    151   DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
    152   __ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
    153   __ CmpP(scratch, Operand(JS_OBJECT_TYPE));
    154   __ blt(slow);
    155 }
    156 
    157 // Loads an indexed element from a fast case array.
    158 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
    159                                   Register key, Register elements,
    160                                   Register scratch1, Register scratch2,
    161                                   Register result, Label* slow) {
    162   // Register use:
    163   //
    164   // receiver - holds the receiver on entry.
    165   //            Unchanged unless 'result' is the same register.
    166   //
    167   // key      - holds the smi key on entry.
    168   //            Unchanged unless 'result' is the same register.
    169   //
    170   // result   - holds the result on exit if the load succeeded.
    171   //            Allowed to be the the same as 'receiver' or 'key'.
    172   //            Unchanged on bailout so 'receiver' and 'key' can be safely
    173   //            used by further computation.
    174   //
    175   // Scratch registers:
    176   //
    177   // elements - holds the elements of the receiver and its protoypes.
    178   //
    179   // scratch1 - used to hold elements length, bit fields, base addresses.
    180   //
    181   // scratch2 - used to hold maps, prototypes, and the loaded value.
    182   Label check_prototypes, check_next_prototype;
    183   Label done, in_bounds, absent;
    184 
    185   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    186   __ AssertFastElements(elements);
    187 
    188   // Check that the key (index) is within bounds.
    189   __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
    190   __ CmpLogicalP(key, scratch1);
    191   __ blt(&in_bounds, Label::kNear);
    192   // Out-of-bounds. Check the prototype chain to see if we can just return
    193   // 'undefined'.
    194   __ CmpP(key, Operand::Zero());
    195   __ blt(slow);  // Negative keys can't take the fast OOB path.
    196   __ bind(&check_prototypes);
    197   __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
    198   __ bind(&check_next_prototype);
    199   __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
    200   // scratch2: current prototype
    201   __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
    202   __ beq(&absent, Label::kNear);
    203   __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
    204   __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
    205   // elements: elements of current prototype
    206   // scratch2: map of current prototype
    207   __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
    208   __ blt(slow);
    209   __ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
    210   __ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
    211                                 (1 << Map::kHasIndexedInterceptor)));
    212   __ bne(slow);
    213   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
    214   __ bne(slow);
    215   __ jmp(&check_next_prototype);
    216 
    217   __ bind(&absent);
    218   __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
    219   __ jmp(&done);
    220 
    221   __ bind(&in_bounds);
    222   // Fast case: Do the load.
    223   __ AddP(scratch1, elements,
    224           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    225   // The key is a smi.
    226   __ SmiToPtrArrayOffset(scratch2, key);
    227   __ LoadP(scratch2, MemOperand(scratch2, scratch1));
    228   __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
    229   // In case the loaded value is the_hole we have to check the prototype chain.
    230   __ beq(&check_prototypes);
    231   __ LoadRR(result, scratch2);
    232   __ bind(&done);
    233 }
    234 
    235 // Checks whether a key is an array index string or a unique name.
    236 // Falls through if a key is a unique name.
    237 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
    238                                  Register map, Register hash,
    239                                  Label* index_string, Label* not_unique) {
    240   // The key is not a smi.
    241   Label unique;
    242   // Is it a name?
    243   __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
    244   __ bgt(not_unique);
    245   STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
    246   __ beq(&unique, Label::kNear);
    247 
    248   // Is the string an array index, with cached numeric value?
    249   __ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset));
    250   __ mov(r7, Operand(Name::kContainsCachedArrayIndexMask));
    251   __ AndP(r0, hash, r7);
    252   __ beq(index_string);
    253 
    254   // Is the string internalized? We know it's a string, so a single
    255   // bit test is enough.
    256   // map: key map
    257   __ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
    258   STATIC_ASSERT(kInternalizedTag == 0);
    259   __ tmll(hash, Operand(kIsNotInternalizedMask));
    260   __ bne(not_unique);
    261 
    262   __ bind(&unique);
    263 }
    264 
    265 void LoadIC::GenerateNormal(MacroAssembler* masm) {
    266   Register dictionary = r2;
    267   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
    268   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
    269 
    270   Label slow;
    271 
    272   __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
    273                                        JSObject::kPropertiesOffset));
    274   GenerateDictionaryLoad(masm, &slow, dictionary,
    275                          LoadDescriptor::NameRegister(), r2, r5, r6);
    276   __ Ret();
    277 
    278   // Dictionary load failed, go slow (but don't miss).
    279   __ bind(&slow);
    280   GenerateRuntimeGetProperty(masm);
    281 }
    282 
    283 // A register that isn't one of the parameters to the load ic.
    284 static const Register LoadIC_TempRegister() { return r5; }
    285 
    286 static void LoadIC_PushArgs(MacroAssembler* masm) {
    287   Register receiver = LoadDescriptor::ReceiverRegister();
    288   Register name = LoadDescriptor::NameRegister();
    289   Register slot = LoadDescriptor::SlotRegister();
    290   Register vector = LoadWithVectorDescriptor::VectorRegister();
    291 
    292   __ Push(receiver, name, slot, vector);
    293 }
    294 
    295 void LoadIC::GenerateMiss(MacroAssembler* masm) {
    296   // The return address is in lr.
    297   Isolate* isolate = masm->isolate();
    298 
    299   DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
    300                      LoadWithVectorDescriptor::VectorRegister()));
    301   __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7);
    302 
    303   LoadIC_PushArgs(masm);
    304 
    305   // Perform tail call to the entry.
    306   __ TailCallRuntime(Runtime::kLoadIC_Miss);
    307 }
    308 
    309 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
    310   // The return address is in lr.
    311 
    312   __ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
    313   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
    314 
    315   // Do tail-call to runtime routine.
    316   __ TailCallRuntime(Runtime::kGetProperty);
    317 }
    318 
    319 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
    320   // The return address is in lr.
    321   Isolate* isolate = masm->isolate();
    322 
    323   DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
    324                      LoadWithVectorDescriptor::VectorRegister()));
    325   __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7);
    326 
    327   LoadIC_PushArgs(masm);
    328 
    329   // Perform tail call to the entry.
    330   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
    331 }
    332 
    333 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
    334   // The return address is in lr.
    335 
    336   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
    337 
    338   // Do tail-call to runtime routine.
    339   __ TailCallRuntime(Runtime::kKeyedGetProperty);
    340 }
    341 
    342 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
    343   // The return address is in lr.
    344   Label slow, check_name, index_smi, index_name, property_array_property;
    345   Label probe_dictionary, check_number_dictionary;
    346 
    347   Register key = LoadDescriptor::NameRegister();
    348   Register receiver = LoadDescriptor::ReceiverRegister();
    349   DCHECK(key.is(r4));
    350   DCHECK(receiver.is(r3));
    351 
    352   Isolate* isolate = masm->isolate();
    353 
    354   // Check that the key is a smi.
    355   __ JumpIfNotSmi(key, &check_name);
    356   __ bind(&index_smi);
    357   // Now the key is known to be a smi. This place is also jumped to from below
    358   // where a numeric string is converted to a smi.
    359 
    360   GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
    361                                  Map::kHasIndexedInterceptor, &slow);
    362 
    363   // Check the receiver's map to see if it has fast elements.
    364   __ CheckFastElements(r2, r5, &check_number_dictionary);
    365 
    366   GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow);
    367   __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6,
    368                       r5);
    369   __ Ret();
    370 
    371   __ bind(&check_number_dictionary);
    372   __ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset));
    373   __ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset));
    374 
    375   // Check whether the elements is a number dictionary.
    376   // r5: elements map
    377   // r6: elements
    378   __ CompareRoot(r5, Heap::kHashTableMapRootIndex);
    379   __ bne(&slow, Label::kNear);
    380   __ SmiUntag(r2, key);
    381   __ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7);
    382   __ Ret();
    383 
    384   // Slow case, key and receiver still in r2 and r3.
    385   __ bind(&slow);
    386   __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6,
    387                       r5);
    388   GenerateRuntimeGetProperty(masm);
    389 
    390   __ bind(&check_name);
    391   GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow);
    392 
    393   GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
    394                                  Map::kHasNamedInterceptor, &slow);
    395 
    396   // If the receiver is a fast-case object, check the stub cache. Otherwise
    397   // probe the dictionary.
    398   __ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
    399   __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
    400   __ CompareRoot(r6, Heap::kHashTableMapRootIndex);
    401   __ beq(&probe_dictionary);
    402 
    403   // The handlers in the stub cache expect a vector and slot. Since we won't
    404   // change the IC from any downstream misses, a dummy vector can be used.
    405   Register vector = LoadWithVectorDescriptor::VectorRegister();
    406   Register slot = LoadWithVectorDescriptor::SlotRegister();
    407   DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
    408   Handle<TypeFeedbackVector> dummy_vector =
    409       TypeFeedbackVector::DummyVector(masm->isolate());
    410   int slot_index = dummy_vector->GetIndex(
    411       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
    412   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
    413   __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
    414 
    415   Code::Flags flags =
    416       Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
    417   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
    418                                                receiver, key, r6, r7, r8, r9);
    419   // Cache miss.
    420   GenerateMiss(masm);
    421 
    422   // Do a quick inline probe of the receiver's dictionary, if it
    423   // exists.
    424   __ bind(&probe_dictionary);
    425   // r5: elements
    426   __ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
    427   __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
    428   GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
    429   // Load the property to r2.
    430   GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6);
    431   __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
    432                       r6, r5);
    433   __ Ret();
    434 
    435   __ bind(&index_name);
    436   __ IndexFromHash(r5, key);
    437   // Now jump to the place where smi keys are handled.
    438   __ b(&index_smi);
    439 }
    440 
    441 static void StoreIC_PushArgs(MacroAssembler* masm) {
    442   __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
    443           StoreDescriptor::ValueRegister(),
    444           VectorStoreICDescriptor::SlotRegister(),
    445           VectorStoreICDescriptor::VectorRegister());
    446 }
    447 
    448 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
    449   StoreIC_PushArgs(masm);
    450 
    451   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
    452 }
    453 
    454 static void KeyedStoreGenerateMegamorphicHelper(
    455     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
    456     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
    457     Register value, Register key, Register receiver, Register receiver_map,
    458     Register elements_map, Register elements) {
    459   Label transition_smi_elements;
    460   Label finish_object_store, non_double_value, transition_double_elements;
    461   Label fast_double_without_map_check;
    462 
    463   // Fast case: Do the store, could be either Object or double.
    464   __ bind(fast_object);
    465   Register scratch = r6;
    466   Register address = r7;
    467   DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
    468                      scratch, address));
    469 
    470   if (check_map == kCheckMap) {
    471     __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
    472     __ CmpP(elements_map,
    473             Operand(masm->isolate()->factory()->fixed_array_map()));
    474     __ bne(fast_double);
    475   }
    476 
    477   // HOLECHECK: guards "A[i] = V"
    478   // We have to go to the runtime if the current value is the hole because
    479   // there may be a callback on the element
    480   Label holecheck_passed1;
    481   // @TODO(joransiu) : Fold AddP into memref of LoadP
    482   __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    483   __ SmiToPtrArrayOffset(scratch, key);
    484   __ LoadP(scratch, MemOperand(address, scratch));
    485   __ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
    486   __ bne(&holecheck_passed1, Label::kNear);
    487   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
    488 
    489   __ bind(&holecheck_passed1);
    490 
    491   // Smi stores don't require further checks.
    492   Label non_smi_value;
    493   __ JumpIfNotSmi(value, &non_smi_value);
    494 
    495   if (increment_length == kIncrementLength) {
    496     // Add 1 to receiver->length.
    497     __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
    498     __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
    499   }
    500   // It's irrelevant whether array is smi-only or not when writing a smi.
    501   __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    502   __ SmiToPtrArrayOffset(scratch, key);
    503   __ StoreP(value, MemOperand(address, scratch));
    504   __ Ret();
    505 
    506   __ bind(&non_smi_value);
    507   // Escape to elements kind transition case.
    508   __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
    509 
    510   // Fast elements array, store the value to the elements backing store.
    511   __ bind(&finish_object_store);
    512   if (increment_length == kIncrementLength) {
    513     // Add 1 to receiver->length.
    514     __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
    515     __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
    516   }
    517   __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    518   __ SmiToPtrArrayOffset(scratch, key);
    519   __ StoreP(value, MemOperand(address, scratch));
    520   __ la(address, MemOperand(address, scratch));
    521   // Update write barrier for the elements array address.
    522   __ LoadRR(scratch, value);  // Preserve the value which is returned.
    523   __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
    524                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    525   __ Ret();
    526 
    527   __ bind(fast_double);
    528   if (check_map == kCheckMap) {
    529     // Check for fast double array case. If this fails, call through to the
    530     // runtime.
    531     __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
    532     __ bne(slow);
    533   }
    534 
    535   // HOLECHECK: guards "A[i] double hole?"
    536   // We have to see if the double version of the hole is present. If so
    537   // go to the runtime.
    538   // @TODO(joransiu) : Fold AddP Operand into LoadlW
    539   __ AddP(address, elements,
    540           Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
    541                    kHeapObjectTag)));
    542   __ SmiToDoubleArrayOffset(scratch, key);
    543   __ LoadlW(scratch, MemOperand(address, scratch));
    544   __ CmpP(scratch, Operand(kHoleNanUpper32));
    545   __ bne(&fast_double_without_map_check, Label::kNear);
    546   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
    547 
    548   __ bind(&fast_double_without_map_check);
    549   __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
    550                                  &transition_double_elements);
    551   if (increment_length == kIncrementLength) {
    552     // Add 1 to receiver->length.
    553     __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
    554     __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
    555   }
    556   __ Ret();
    557 
    558   __ bind(&transition_smi_elements);
    559   // Transition the array appropriately depending on the value type.
    560   __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
    561   __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
    562   __ bne(&non_double_value);
    563 
    564   // Value is a double. Transition FAST_SMI_ELEMENTS ->
    565   // FAST_DOUBLE_ELEMENTS and complete the store.
    566   __ LoadTransitionedArrayMapConditional(
    567       FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
    568   AllocationSiteMode mode =
    569       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
    570   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
    571                                                    receiver_map, mode, slow);
    572   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    573   __ b(&fast_double_without_map_check);
    574 
    575   __ bind(&non_double_value);
    576   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
    577   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
    578                                          receiver_map, scratch, slow);
    579   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
    580   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    581       masm, receiver, key, value, receiver_map, mode, slow);
    582   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    583   __ b(&finish_object_store);
    584 
    585   __ bind(&transition_double_elements);
    586   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
    587   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
    588   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
    589   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
    590                                          receiver_map, scratch, slow);
    591   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
    592   ElementsTransitionGenerator::GenerateDoubleToObject(
    593       masm, receiver, key, value, receiver_map, mode, slow);
    594   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    595   __ b(&finish_object_store);
    596 }
    597 
    598 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
    599                                        LanguageMode language_mode) {
    600   // ---------- S t a t e --------------
    601   //  -- r2     : value
    602   //  -- r3     : key
    603   //  -- r4     : receiver
    604   //  -- lr     : return address
    605   // -----------------------------------
    606   Label slow, fast_object, fast_object_grow;
    607   Label fast_double, fast_double_grow;
    608   Label array, extra, check_if_double_array, maybe_name_key, miss;
    609 
    610   // Register usage.
    611   Register value = StoreDescriptor::ValueRegister();
    612   Register key = StoreDescriptor::NameRegister();
    613   Register receiver = StoreDescriptor::ReceiverRegister();
    614   DCHECK(receiver.is(r3));
    615   DCHECK(key.is(r4));
    616   DCHECK(value.is(r2));
    617   Register receiver_map = r5;
    618   Register elements_map = r8;
    619   Register elements = r9;  // Elements array of the receiver.
    620   // r6 and r7 are used as general scratch registers.
    621 
    622   // Check that the key is a smi.
    623   __ JumpIfNotSmi(key, &maybe_name_key);
    624   // Check that the object isn't a smi.
    625   __ JumpIfSmi(receiver, &slow);
    626   // Get the map of the object.
    627   __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    628   // Check that the receiver does not require access checks.
    629   // The generic stub does not perform map checks.
    630   __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
    631   __ AndP(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
    632   __ bne(&slow, Label::kNear);
    633   // Check if the object is a JS array or not.
    634   __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
    635   __ CmpP(r6, Operand(JS_ARRAY_TYPE));
    636   __ beq(&array);
    637   // Check that the object is some kind of JSObject.
    638   __ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
    639   __ blt(&slow, Label::kNear);
    640 
    641   // Object case: Check key against length in the elements array.
    642   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    643   // Check array bounds. Both the key and the length of FixedArray are smis.
    644   __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
    645   __ blt(&fast_object);
    646 
    647   // Slow case, handle jump to runtime.
    648   __ bind(&slow);
    649   // Entry registers are intact.
    650   // r2: value.
    651   // r3: key.
    652   // r4: receiver.
    653   PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
    654   // Never returns to here.
    655 
    656   __ bind(&maybe_name_key);
    657   __ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
    658   __ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
    659   __ JumpIfNotUniqueNameInstanceType(r6, &slow);
    660 
    661   // The handlers in the stub cache expect a vector and slot. Since we won't
    662   // change the IC from any downstream misses, a dummy vector can be used.
    663   Register vector = VectorStoreICDescriptor::VectorRegister();
    664   Register slot = VectorStoreICDescriptor::SlotRegister();
    665   DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
    666   Handle<TypeFeedbackVector> dummy_vector =
    667       TypeFeedbackVector::DummyVector(masm->isolate());
    668   int slot_index = dummy_vector->GetIndex(
    669       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
    670   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
    671   __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
    672 
    673   Code::Flags flags =
    674       Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
    675   masm->isolate()->stub_cache()->GenerateProbe(
    676       masm, Code::KEYED_STORE_IC, flags, receiver, key, r7, r8, r9, ip);
    677   // Cache miss.
    678   __ b(&miss);
    679 
    680   // Extra capacity case: Check if there is extra capacity to
    681   // perform the store and update the length. Used for adding one
    682   // element to the array by writing to array[array.length].
    683   __ bind(&extra);
    684   // Condition code from comparing key and array length is still available.
    685   __ bne(&slow);  // Only support writing to writing to array[array.length].
    686   // Check for room in the elements backing store.
    687   // Both the key and the length of FixedArray are smis.
    688   __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
    689   __ bge(&slow);
    690   __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
    691   __ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
    692   __ bne(&check_if_double_array, Label::kNear);
    693   __ b(&fast_object_grow);
    694 
    695   __ bind(&check_if_double_array);
    696   __ CmpP(elements_map,
    697           Operand(masm->isolate()->factory()->fixed_double_array_map()));
    698   __ bne(&slow);
    699   __ b(&fast_double_grow);
    700 
    701   // Array case: Get the length and the elements array from the JS
    702   // array. Check that the array is in fast mode (and writable); if it
    703   // is the length is always a smi.
    704   __ bind(&array);
    705   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    706 
    707   // Check the key against the length in the array.
    708   __ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
    709   __ bge(&extra);
    710 
    711   KeyedStoreGenerateMegamorphicHelper(
    712       masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
    713       value, key, receiver, receiver_map, elements_map, elements);
    714   KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
    715                                       &fast_double_grow, &slow, kDontCheckMap,
    716                                       kIncrementLength, value, key, receiver,
    717                                       receiver_map, elements_map, elements);
    718   __ bind(&miss);
    719   GenerateMiss(masm);
    720 }
    721 
    722 void StoreIC::GenerateMiss(MacroAssembler* masm) {
    723   StoreIC_PushArgs(masm);
    724 
    725   // Perform tail call to the entry.
    726   __ TailCallRuntime(Runtime::kStoreIC_Miss);
    727 }
    728 
    729 void StoreIC::GenerateNormal(MacroAssembler* masm) {
    730   Label miss;
    731   Register receiver = StoreDescriptor::ReceiverRegister();
    732   Register name = StoreDescriptor::NameRegister();
    733   Register value = StoreDescriptor::ValueRegister();
    734   Register dictionary = r7;
    735   DCHECK(receiver.is(r3));
    736   DCHECK(name.is(r4));
    737   DCHECK(value.is(r2));
    738   DCHECK(VectorStoreICDescriptor::VectorRegister().is(r5));
    739   DCHECK(VectorStoreICDescriptor::SlotRegister().is(r6));
    740 
    741   __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
    742 
    743   GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
    744   Counters* counters = masm->isolate()->counters();
    745   __ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9);
    746   __ Ret();
    747 
    748   __ bind(&miss);
    749   __ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9);
    750   GenerateMiss(masm);
    751 }
    752 
    753 #undef __
    754 
    755 Condition CompareIC::ComputeCondition(Token::Value op) {
    756   switch (op) {
    757     case Token::EQ_STRICT:
    758     case Token::EQ:
    759       return eq;
    760     case Token::LT:
    761       return lt;
    762     case Token::GT:
    763       return gt;
    764     case Token::LTE:
    765       return le;
    766     case Token::GTE:
    767       return ge;
    768     default:
    769       UNREACHABLE();
    770       return kNoCondition;
    771   }
    772 }
    773 
    774 bool CompareIC::HasInlinedSmiCode(Address address) {
    775   // The address of the instruction following the call.
    776   Address cmp_instruction_address =
    777       Assembler::return_address_from_call_start(address);
    778 
    779   // If the instruction following the call is not a CHI, nothing
    780   // was inlined.
    781   return (Instruction::S390OpcodeValue(cmp_instruction_address) == CHI);
    782 }
    783 
    784 //
    785 // This code is paired with the JumpPatchSite class in full-codegen-s390.cc
    786 //
    787 void PatchInlinedSmiCode(Isolate* isolate, Address address,
    788                          InlinedSmiCheck check) {
    789   Address cmp_instruction_address =
    790       Assembler::return_address_from_call_start(address);
    791 
    792   // If the instruction following the call is not a cmp rx, #yyy, nothing
    793   // was inlined.
    794   Instr instr = Assembler::instr_at(cmp_instruction_address);
    795   if (Instruction::S390OpcodeValue(cmp_instruction_address) != CHI) {
    796     return;
    797   }
    798 
    799   if (Instruction::S390OpcodeValue(address) != BRASL) {
    800     return;
    801   }
    802   // The delta to the start of the map check instruction and the
    803   // condition code uses at the patched jump.
    804   int delta = instr & 0x0000ffff;
    805 
    806   // If the delta is 0 the instruction is cmp r0, #0 which also signals that
    807   // nothing was inlined.
    808   if (delta == 0) {
    809     return;
    810   }
    811 
    812   if (FLAG_trace_ic) {
    813     PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
    814            static_cast<void*>(address),
    815            static_cast<void*>(cmp_instruction_address), delta);
    816   }
    817 
    818   // Expected sequence to enable by changing the following
    819   //   CR/CGR  Rx, Rx    // 2 / 4 bytes
    820   //   LR  R0, R0        // 2 bytes   // 31-bit only!
    821   //   BRC/BRCL          // 4 / 6 bytes
    822   // into
    823   //   TMLL    Rx, XXX   // 4 bytes
    824   //   BRC/BRCL          // 4 / 6 bytes
    825   // And vice versa to disable.
    826 
    827   // The following constant is the size of the CR/CGR + LR + LR
    828   const int kPatchAreaSizeNoBranch = 4;
    829   Address patch_address = cmp_instruction_address - delta;
    830   Address branch_address = patch_address + kPatchAreaSizeNoBranch;
    831 
    832   Instr instr_at_patch = Assembler::instr_at(patch_address);
    833   SixByteInstr branch_instr = Assembler::instr_at(branch_address);
    834 
    835   // This is patching a conditional "jump if not smi/jump if smi" site.
    836   size_t patch_size = 0;
    837   if (Instruction::S390OpcodeValue(branch_address) == BRC) {
    838     patch_size = kPatchAreaSizeNoBranch + 4;
    839   } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
    840     patch_size = kPatchAreaSizeNoBranch + 6;
    841   } else {
    842     DCHECK(false);
    843   }
    844   CodePatcher patcher(isolate, patch_address, patch_size);
    845   Register reg;
    846   reg.reg_code = instr_at_patch & 0xf;
    847   if (check == ENABLE_INLINED_SMI_CHECK) {
    848     patcher.masm()->TestIfSmi(reg);
    849   } else {
    850     // Emit the NOP to ensure sufficient place for patching
    851     // (replaced by LR + NILL)
    852     DCHECK(check == DISABLE_INLINED_SMI_CHECK);
    853     patcher.masm()->CmpP(reg, reg);
    854 #ifndef V8_TARGET_ARCH_S390X
    855     patcher.masm()->nop();
    856 #endif
    857   }
    858 
    859   Condition cc = al;
    860   if (Instruction::S390OpcodeValue(branch_address) == BRC) {
    861     cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20);
    862     DCHECK((cc == ne) || (cc == eq));
    863     cc = (cc == ne) ? eq : ne;
    864     patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1));
    865   } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
    866     cc = static_cast<Condition>(
    867         (branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36);
    868     DCHECK((cc == ne) || (cc == eq));
    869     cc = (cc == ne) ? eq : ne;
    870     patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1));
    871   } else {
    872     DCHECK(false);
    873   }
    874 }
    875 
    876 }  // namespace internal
    877 }  // namespace v8
    878 
    879 #endif  // V8_TARGET_ARCH_S390
    880