Home | History | Annotate | Download | only in ia32
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_IA32
      6 
      7 #include "src/codegen.h"
      8 #include "src/ic/ic.h"
      9 #include "src/ic/ic-compiler.h"
     10 #include "src/ic/stub-cache.h"
     11 
     12 namespace v8 {
     13 namespace internal {
     14 
     15 // ----------------------------------------------------------------------------
     16 // Static IC stub generators.
     17 //
     18 
     19 #define __ ACCESS_MASM(masm)
     20 
     21 
     22 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
     23                                             Label* global_object) {
     24   // Register usage:
     25   //   type: holds the receiver instance type on entry.
     26   __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
     27   __ j(equal, global_object);
     28   __ cmp(type, JS_GLOBAL_PROXY_TYPE);
     29   __ j(equal, global_object);
     30 }
     31 
     32 
     33 // Helper function used to load a property from a dictionary backing
     34 // storage. This function may fail to load a property even though it is
     35 // in the dictionary, so code at miss_label must always call a backup
     36 // property load that is complete. This function is safe to call if
     37 // name is not internalized, and will jump to the miss_label in that
     38 // case. The generated code assumes that the receiver has slow
     39 // properties, is not a global object and does not have interceptors.
     40 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
     41                                    Register elements, Register name,
     42                                    Register r0, Register r1, Register result) {
     43   // Register use:
     44   //
     45   // elements - holds the property dictionary on entry and is unchanged.
     46   //
     47   // name - holds the name of the property on entry and is unchanged.
     48   //
     49   // Scratch registers:
     50   //
     51   // r0   - used for the index into the property dictionary
     52   //
     53   // r1   - used to hold the capacity of the property dictionary.
     54   //
     55   // result - holds the result on exit.
     56 
     57   Label done;
     58 
     59   // Probe the dictionary.
     60   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
     61                                                    elements, name, r0, r1);
     62 
     63   // If probing finds an entry in the dictionary, r0 contains the
     64   // index into the dictionary. Check that the value is a normal
     65   // property.
     66   __ bind(&done);
     67   const int kElementsStartOffset =
     68       NameDictionary::kHeaderSize +
     69       NameDictionary::kElementsStartIndex * kPointerSize;
     70   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
     71   __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
     72           Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
     73   __ j(not_zero, miss_label);
     74 
     75   // Get the value at the masked, scaled index.
     76   const int kValueOffset = kElementsStartOffset + kPointerSize;
     77   __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
     78 }
     79 
     80 
     81 // Helper function used to store a property to a dictionary backing
     82 // storage. This function may fail to store a property eventhough it
     83 // is in the dictionary, so code at miss_label must always call a
     84 // backup property store that is complete. This function is safe to
     85 // call if name is not internalized, and will jump to the miss_label in
     86 // that case. The generated code assumes that the receiver has slow
     87 // properties, is not a global object and does not have interceptors.
     88 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
     89                                     Register elements, Register name,
     90                                     Register value, Register r0, Register r1) {
     91   // Register use:
     92   //
     93   // elements - holds the property dictionary on entry and is clobbered.
     94   //
     95   // name - holds the name of the property on entry and is unchanged.
     96   //
     97   // value - holds the value to store and is unchanged.
     98   //
     99   // r0 - used for index into the property dictionary and is clobbered.
    100   //
    101   // r1 - used to hold the capacity of the property dictionary and is clobbered.
    102   Label done;
    103 
    104 
    105   // Probe the dictionary.
    106   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
    107                                                    elements, name, r0, r1);
    108 
    109   // If probing finds an entry in the dictionary, r0 contains the
    110   // index into the dictionary. Check that the value is a normal
    111   // property that is not read only.
    112   __ bind(&done);
    113   const int kElementsStartOffset =
    114       NameDictionary::kHeaderSize +
    115       NameDictionary::kElementsStartIndex * kPointerSize;
    116   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
    117   const int kTypeAndReadOnlyMask =
    118       (PropertyDetails::TypeField::kMask |
    119        PropertyDetails::AttributesField::encode(READ_ONLY))
    120       << kSmiTagSize;
    121   __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
    122           Immediate(kTypeAndReadOnlyMask));
    123   __ j(not_zero, miss_label);
    124 
    125   // Store the value at the masked, scaled index.
    126   const int kValueOffset = kElementsStartOffset + kPointerSize;
    127   __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
    128   __ mov(Operand(r0, 0), value);
    129 
    130   // Update write barrier. Make sure not to clobber the value.
    131   __ mov(r1, value);
    132   __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
    133 }
    134 
    135 
    136 // Checks the receiver for special cases (value type, slow case bits).
    137 // Falls through for regular JS object.
    138 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
    139                                            Register receiver, Register map,
    140                                            int interceptor_bit, Label* slow) {
    141   // Register use:
    142   //   receiver - holds the receiver and is unchanged.
    143   // Scratch registers:
    144   //   map - used to hold the map of the receiver.
    145 
    146   // Check that the object isn't a smi.
    147   __ JumpIfSmi(receiver, slow);
    148 
    149   // Get the map of the receiver.
    150   __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
    151 
    152   // Check bit field.
    153   __ test_b(FieldOperand(map, Map::kBitFieldOffset),
    154             (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
    155   __ j(not_zero, slow);
    156   // Check that the object is some kind of JS object EXCEPT JS Value type. In
    157   // the case that the object is a value-wrapper object, we enter the runtime
    158   // system to make sure that indexing into string objects works as intended.
    159   DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
    160 
    161   __ CmpInstanceType(map, JS_OBJECT_TYPE);
    162   __ j(below, slow);
    163 }
    164 
    165 
    166 // Loads an indexed element from a fast case array.
    167 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
    168                                   Register key, Register scratch,
    169                                   Register scratch2, Register result,
    170                                   Label* slow, LanguageMode language_mode) {
    171   // Register use:
    172   //   receiver - holds the receiver and is unchanged.
    173   //   key - holds the key and is unchanged (must be a smi).
    174   // Scratch registers:
    175   //   scratch - used to hold elements of the receiver and the loaded value.
    176   //   scratch2 - holds maps and prototypes during prototype chain check.
    177   //   result - holds the result on exit if the load succeeds and
    178   //            we fall through.
    179   Label check_prototypes, check_next_prototype;
    180   Label done, in_bounds, absent;
    181 
    182   __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
    183   __ AssertFastElements(scratch);
    184 
    185   // Check that the key (index) is within bounds.
    186   __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
    187   __ j(below, &in_bounds);
    188   // Out-of-bounds. Check the prototype chain to see if we can just return
    189   // 'undefined'.
    190   __ cmp(key, 0);
    191   __ j(less, slow);  // Negative keys can't take the fast OOB path.
    192   __ bind(&check_prototypes);
    193   __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
    194   __ bind(&check_next_prototype);
    195   __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
    196   // scratch2: current prototype
    197   __ cmp(scratch2, masm->isolate()->factory()->null_value());
    198   __ j(equal, &absent);
    199   __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
    200   __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
    201   // scratch: elements of current prototype
    202   // scratch2: map of current prototype
    203   __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
    204   __ j(below, slow);
    205   __ test_b(
    206       FieldOperand(scratch2, Map::kBitFieldOffset),
    207       (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor));
    208   __ j(not_zero, slow);
    209   __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
    210   __ j(not_equal, slow);
    211   __ jmp(&check_next_prototype);
    212 
    213   __ bind(&absent);
    214   if (is_strong(language_mode)) {
    215     // Strong mode accesses must throw in this case, so call the runtime.
    216     __ jmp(slow);
    217   } else {
    218     __ mov(result, masm->isolate()->factory()->undefined_value());
    219     __ jmp(&done);
    220   }
    221 
    222   __ bind(&in_bounds);
    223   // Fast case: Do the load.
    224   STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
    225   __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
    226   __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
    227   // In case the loaded value is the_hole we have to check the prototype chain.
    228   __ j(equal, &check_prototypes);
    229   __ Move(result, scratch);
    230   __ bind(&done);
    231 }
    232 
    233 
    234 // Checks whether a key is an array index string or a unique name.
    235 // Falls through if the key is a unique name.
    236 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
    237                                  Register map, Register hash,
    238                                  Label* index_string, Label* not_unique) {
    239   // Register use:
    240   //   key - holds the key and is unchanged. Assumed to be non-smi.
    241   // Scratch registers:
    242   //   map - used to hold the map of the key.
    243   //   hash - used to hold the hash of the key.
    244   Label unique;
    245   __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
    246   __ j(above, not_unique);
    247   STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
    248   __ j(equal, &unique);
    249 
    250   // Is the string an array index, with cached numeric value?
    251   __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
    252   __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
    253   __ j(zero, index_string);
    254 
    255   // Is the string internalized? We already know it's a string so a single
    256   // bit test is enough.
    257   STATIC_ASSERT(kNotInternalizedTag != 0);
    258   __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
    259             kIsNotInternalizedMask);
    260   __ j(not_zero, not_unique);
    261 
    262   __ bind(&unique);
    263 }
    264 
    265 
    266 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
    267                                       LanguageMode language_mode) {
    268   // The return address is on the stack.
    269   Label slow, check_name, index_smi, index_name, property_array_property;
    270   Label probe_dictionary, check_number_dictionary;
    271 
    272   Register receiver = LoadDescriptor::ReceiverRegister();
    273   Register key = LoadDescriptor::NameRegister();
    274   DCHECK(receiver.is(edx));
    275   DCHECK(key.is(ecx));
    276 
    277   // Check that the key is a smi.
    278   __ JumpIfNotSmi(key, &check_name);
    279   __ bind(&index_smi);
    280   // Now the key is known to be a smi. This place is also jumped to from
    281   // where a numeric string is converted to a smi.
    282 
    283   GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
    284                                  Map::kHasIndexedInterceptor, &slow);
    285 
    286   // Check the receiver's map to see if it has fast elements.
    287   __ CheckFastElements(eax, &check_number_dictionary);
    288 
    289   GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow,
    290                         language_mode);
    291   Isolate* isolate = masm->isolate();
    292   Counters* counters = isolate->counters();
    293   __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
    294   __ ret(0);
    295 
    296   __ bind(&check_number_dictionary);
    297   __ mov(ebx, key);
    298   __ SmiUntag(ebx);
    299   __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
    300 
    301   // Check whether the elements is a number dictionary.
    302   // ebx: untagged index
    303   // eax: elements
    304   __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
    305               DONT_DO_SMI_CHECK);
    306   Label slow_pop_receiver;
    307   // Push receiver on the stack to free up a register for the dictionary
    308   // probing.
    309   __ push(receiver);
    310   __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
    311   // Pop receiver before returning.
    312   __ pop(receiver);
    313   __ ret(0);
    314 
    315   __ bind(&slow_pop_receiver);
    316   // Pop the receiver from the stack and jump to runtime.
    317   __ pop(receiver);
    318 
    319   __ bind(&slow);
    320   // Slow case: jump to runtime.
    321   __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
    322   GenerateRuntimeGetProperty(masm, language_mode);
    323 
    324   __ bind(&check_name);
    325   GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
    326 
    327   GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
    328                                  &slow);
    329 
    330   // If the receiver is a fast-case object, check the stub cache. Otherwise
    331   // probe the dictionary.
    332   __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
    333   __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
    334          Immediate(isolate->factory()->hash_table_map()));
    335   __ j(equal, &probe_dictionary);
    336 
    337   // The handlers in the stub cache expect a vector and slot. Since we won't
    338   // change the IC from any downstream misses, a dummy vector can be used.
    339   Handle<TypeFeedbackVector> dummy_vector =
    340       TypeFeedbackVector::DummyVector(isolate);
    341   int slot = dummy_vector->GetIndex(
    342       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
    343   __ push(Immediate(Smi::FromInt(slot)));
    344   __ push(Immediate(dummy_vector));
    345 
    346   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
    347       Code::ComputeHandlerFlags(Code::LOAD_IC));
    348   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
    349                                                receiver, key, ebx, edi);
    350 
    351   __ pop(LoadWithVectorDescriptor::VectorRegister());
    352   __ pop(LoadDescriptor::SlotRegister());
    353 
    354   // Cache miss.
    355   GenerateMiss(masm);
    356 
    357   // Do a quick inline probe of the receiver's dictionary, if it
    358   // exists.
    359   __ bind(&probe_dictionary);
    360 
    361   __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
    362   __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
    363   GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
    364 
    365   GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
    366   __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
    367   __ ret(0);
    368 
    369   __ bind(&index_name);
    370   __ IndexFromHash(ebx, key);
    371   // Now jump to the place where smi keys are handled.
    372   __ jmp(&index_smi);
    373 }
    374 
    375 
    376 static void KeyedStoreGenerateMegamorphicHelper(
    377     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
    378     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
    379   Label transition_smi_elements;
    380   Label finish_object_store, non_double_value, transition_double_elements;
    381   Label fast_double_without_map_check;
    382   Register receiver = StoreDescriptor::ReceiverRegister();
    383   Register key = StoreDescriptor::NameRegister();
    384   Register value = StoreDescriptor::ValueRegister();
    385   DCHECK(receiver.is(edx));
    386   DCHECK(key.is(ecx));
    387   DCHECK(value.is(eax));
    388   // key is a smi.
    389   // ebx: FixedArray receiver->elements
    390   // edi: receiver map
    391   // Fast case: Do the store, could either Object or double.
    392   __ bind(fast_object);
    393   if (check_map == kCheckMap) {
    394     __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
    395     __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
    396     __ j(not_equal, fast_double);
    397   }
    398 
    399   // HOLECHECK: guards "A[i] = V"
    400   // We have to go to the runtime if the current value is the hole because
    401   // there may be a callback on the element
    402   Label holecheck_passed1;
    403   __ cmp(FixedArrayElementOperand(ebx, key),
    404          masm->isolate()->factory()->the_hole_value());
    405   __ j(not_equal, &holecheck_passed1);
    406   __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
    407   __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
    408 
    409   __ bind(&holecheck_passed1);
    410 
    411   // Smi stores don't require further checks.
    412   Label non_smi_value;
    413   __ JumpIfNotSmi(value, &non_smi_value);
    414   if (increment_length == kIncrementLength) {
    415     // Add 1 to receiver->length.
    416     __ add(FieldOperand(receiver, JSArray::kLengthOffset),
    417            Immediate(Smi::FromInt(1)));
    418   }
    419   // It's irrelevant whether array is smi-only or not when writing a smi.
    420   __ mov(FixedArrayElementOperand(ebx, key), value);
    421   __ ret(0);
    422 
    423   __ bind(&non_smi_value);
    424   // Escape to elements kind transition case.
    425   __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
    426   __ CheckFastObjectElements(edi, &transition_smi_elements);
    427 
    428   // Fast elements array, store the value to the elements backing store.
    429   __ bind(&finish_object_store);
    430   if (increment_length == kIncrementLength) {
    431     // Add 1 to receiver->length.
    432     __ add(FieldOperand(receiver, JSArray::kLengthOffset),
    433            Immediate(Smi::FromInt(1)));
    434   }
    435   __ mov(FixedArrayElementOperand(ebx, key), value);
    436   // Update write barrier for the elements array address.
    437   __ mov(edx, value);  // Preserve the value which is returned.
    438   __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
    439                       OMIT_SMI_CHECK);
    440   __ ret(0);
    441 
    442   __ bind(fast_double);
    443   if (check_map == kCheckMap) {
    444     // Check for fast double array case. If this fails, call through to the
    445     // runtime.
    446     __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
    447     __ j(not_equal, slow);
    448     // If the value is a number, store it as a double in the FastDoubleElements
    449     // array.
    450   }
    451 
    452   // HOLECHECK: guards "A[i] double hole?"
    453   // We have to see if the double version of the hole is present. If so
    454   // go to the runtime.
    455   uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
    456   __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
    457   __ j(not_equal, &fast_double_without_map_check);
    458   __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
    459   __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
    460 
    461   __ bind(&fast_double_without_map_check);
    462   __ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0,
    463                                  &transition_double_elements);
    464   if (increment_length == kIncrementLength) {
    465     // Add 1 to receiver->length.
    466     __ add(FieldOperand(receiver, JSArray::kLengthOffset),
    467            Immediate(Smi::FromInt(1)));
    468   }
    469   __ ret(0);
    470 
    471   __ bind(&transition_smi_elements);
    472   __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
    473 
    474   // Transition the array appropriately depending on the value type.
    475   __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
    476               &non_double_value, DONT_DO_SMI_CHECK);
    477 
    478   // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
    479   // and complete the store.
    480   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
    481                                          FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
    482   AllocationSiteMode mode =
    483       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
    484   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
    485                                                    ebx, mode, slow);
    486   __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
    487   __ jmp(&fast_double_without_map_check);
    488 
    489   __ bind(&non_double_value);
    490   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
    491   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
    492                                          edi, slow);
    493   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
    494   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    495       masm, receiver, key, value, ebx, mode, slow);
    496   __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
    497   __ jmp(&finish_object_store);
    498 
    499   __ bind(&transition_double_elements);
    500   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
    501   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
    502   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
    503   __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
    504   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
    505                                          ebx, edi, slow);
    506   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
    507   ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
    508                                                       value, ebx, mode, slow);
    509   __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
    510   __ jmp(&finish_object_store);
    511 }
    512 
    513 
    514 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
    515                                        LanguageMode language_mode) {
    516   // Return address is on the stack.
    517   Label slow, fast_object, fast_object_grow;
    518   Label fast_double, fast_double_grow;
    519   Label array, extra, check_if_double_array, maybe_name_key, miss;
    520   Register receiver = StoreDescriptor::ReceiverRegister();
    521   Register key = StoreDescriptor::NameRegister();
    522   DCHECK(receiver.is(edx));
    523   DCHECK(key.is(ecx));
    524 
    525   // Check that the object isn't a smi.
    526   __ JumpIfSmi(receiver, &slow);
    527   // Get the map from the receiver.
    528   __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
    529   // Check that the receiver does not require access checks and is not observed.
    530   // The generic stub does not perform map checks or handle observed objects.
    531   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
    532             1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
    533   __ j(not_zero, &slow);
    534   // Check that the key is a smi.
    535   __ JumpIfNotSmi(key, &maybe_name_key);
    536   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
    537   __ j(equal, &array);
    538   // Check that the object is some kind of JS object EXCEPT JS Value type. In
    539   // the case that the object is a value-wrapper object, we enter the runtime
    540   // system to make sure that indexing into string objects works as intended.
    541   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
    542   __ CmpInstanceType(edi, JS_OBJECT_TYPE);
    543   __ j(below, &slow);
    544 
    545   // Object case: Check key against length in the elements array.
    546   // Key is a smi.
    547   // edi: receiver map
    548   __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
    549   // Check array bounds. Both the key and the length of FixedArray are smis.
    550   __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
    551   __ j(below, &fast_object);
    552 
    553   // Slow case: call runtime.
    554   __ bind(&slow);
    555   PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
    556   // Never returns to here.
    557 
    558   __ bind(&maybe_name_key);
    559   __ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
    560   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
    561   __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
    562 
    563 
    564   // The handlers in the stub cache expect a vector and slot. Since we won't
    565   // change the IC from any downstream misses, a dummy vector can be used.
    566   Handle<TypeFeedbackVector> dummy_vector =
    567       TypeFeedbackVector::DummyVector(masm->isolate());
    568   int slot = dummy_vector->GetIndex(
    569       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
    570   __ push(Immediate(Smi::FromInt(slot)));
    571   __ push(Immediate(dummy_vector));
    572 
    573   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
    574       Code::ComputeHandlerFlags(Code::STORE_IC));
    575   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
    576                                                receiver, key, edi, no_reg);
    577 
    578   __ pop(VectorStoreICDescriptor::VectorRegister());
    579   __ pop(VectorStoreICDescriptor::SlotRegister());
    580 
    581   // Cache miss.
    582   __ jmp(&miss);
    583 
    584   // Extra capacity case: Check if there is extra capacity to
    585   // perform the store and update the length. Used for adding one
    586   // element to the array by writing to array[array.length].
    587   __ bind(&extra);
    588   // receiver is a JSArray.
    589   // key is a smi.
    590   // ebx: receiver->elements, a FixedArray
    591   // edi: receiver map
    592   // flags: compare (key, receiver.length())
    593   // do not leave holes in the array:
    594   __ j(not_equal, &slow);
    595   __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
    596   __ j(above_equal, &slow);
    597   __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
    598   __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
    599   __ j(not_equal, &check_if_double_array);
    600   __ jmp(&fast_object_grow);
    601 
    602   __ bind(&check_if_double_array);
    603   __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
    604   __ j(not_equal, &slow);
    605   __ jmp(&fast_double_grow);
    606 
    607   // Array case: Get the length and the elements array from the JS
    608   // array. Check that the array is in fast mode (and writable); if it
    609   // is the length is always a smi.
    610   __ bind(&array);
    611   // receiver is a JSArray.
    612   // key is a smi.
    613   // edi: receiver map
    614   __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
    615 
    616   // Check the key against the length in the array and fall through to the
    617   // common store code.
    618   __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset));  // Compare smis.
    619   __ j(above_equal, &extra);
    620 
    621   KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
    622                                       kCheckMap, kDontIncrementLength);
    623   KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
    624                                       &fast_double_grow, &slow, kDontCheckMap,
    625                                       kIncrementLength);
    626 
    627   __ bind(&miss);
    628   GenerateMiss(masm);
    629 }
    630 
    631 
    632 void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
    633   Register dictionary = eax;
    634   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
    635   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
    636 
    637   Label slow;
    638 
    639   __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
    640                                   JSObject::kPropertiesOffset));
    641   GenerateDictionaryLoad(masm, &slow, dictionary,
    642                          LoadDescriptor::NameRegister(), edi, ebx, eax);
    643   __ ret(0);
    644 
    645   // Dictionary load failed, go slow (but don't miss).
    646   __ bind(&slow);
    647   GenerateRuntimeGetProperty(masm, language_mode);
    648 }
    649 
    650 
    651 static void LoadIC_PushArgs(MacroAssembler* masm) {
    652   Register receiver = LoadDescriptor::ReceiverRegister();
    653   Register name = LoadDescriptor::NameRegister();
    654 
    655   Register slot = LoadDescriptor::SlotRegister();
    656   Register vector = LoadWithVectorDescriptor::VectorRegister();
    657   DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
    658          !edi.is(vector));
    659 
    660   __ pop(edi);
    661   __ push(receiver);
    662   __ push(name);
    663   __ push(slot);
    664   __ push(vector);
    665   __ push(edi);
    666 }
    667 
    668 
    669 void LoadIC::GenerateMiss(MacroAssembler* masm) {
    670   // Return address is on the stack.
    671   __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
    672   LoadIC_PushArgs(masm);
    673 
    674   // Perform tail call to the entry.
    675   __ TailCallRuntime(Runtime::kLoadIC_Miss);
    676 }
    677 
    678 
    679 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
    680                                         LanguageMode language_mode) {
    681   // Return address is on the stack.
    682   Register receiver = LoadDescriptor::ReceiverRegister();
    683   Register name = LoadDescriptor::NameRegister();
    684   DCHECK(!ebx.is(receiver) && !ebx.is(name));
    685 
    686   __ pop(ebx);
    687   __ push(receiver);
    688   __ push(name);
    689   __ push(ebx);
    690 
    691   // Do tail-call to runtime routine.
    692   __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
    693                                               : Runtime::kGetProperty);
    694 }
    695 
    696 
    697 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
    698   // Return address is on the stack.
    699   __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
    700 
    701   LoadIC_PushArgs(masm);
    702 
    703   // Perform tail call to the entry.
    704   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
    705 }
    706 
    707 
    708 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
    709                                              LanguageMode language_mode) {
    710   // Return address is on the stack.
    711   Register receiver = LoadDescriptor::ReceiverRegister();
    712   Register name = LoadDescriptor::NameRegister();
    713   DCHECK(!ebx.is(receiver) && !ebx.is(name));
    714 
    715   __ pop(ebx);
    716   __ push(receiver);
    717   __ push(name);
    718   __ push(ebx);
    719 
    720   // Do tail-call to runtime routine.
    721   __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
    722                                               : Runtime::kKeyedGetProperty);
    723 }
    724 
    725 
    726 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
    727   // This shouldn't be called.
    728   // TODO(mvstanton): remove this method.
    729   __ int3();
    730   return;
    731 }
    732 
    733 
    734 static void StoreIC_PushArgs(MacroAssembler* masm) {
    735   Register receiver = StoreDescriptor::ReceiverRegister();
    736   Register name = StoreDescriptor::NameRegister();
    737   Register value = StoreDescriptor::ValueRegister();
    738   Register slot = VectorStoreICDescriptor::SlotRegister();
    739   Register vector = VectorStoreICDescriptor::VectorRegister();
    740 
    741   __ xchg(receiver, Operand(esp, 0));
    742   __ push(name);
    743   __ push(value);
    744   __ push(slot);
    745   __ push(vector);
    746   __ push(receiver);  // Contains the return address.
    747 }
    748 
    749 
    750 void StoreIC::GenerateMiss(MacroAssembler* masm) {
    751   // Return address is on the stack.
    752   StoreIC_PushArgs(masm);
    753 
    754   // Perform tail call to the entry.
    755   __ TailCallRuntime(Runtime::kStoreIC_Miss);
    756 }
    757 
    758 
    759 void StoreIC::GenerateNormal(MacroAssembler* masm) {
    760   Label restore_miss;
    761   Register receiver = StoreDescriptor::ReceiverRegister();
    762   Register name = StoreDescriptor::NameRegister();
    763   Register value = StoreDescriptor::ValueRegister();
    764   Register vector = VectorStoreICDescriptor::VectorRegister();
    765   Register slot = VectorStoreICDescriptor::SlotRegister();
    766 
    767   // A lot of registers are needed for storing to slow case
    768   // objects. Push and restore receiver but rely on
    769   // GenerateDictionaryStore preserving the value and name.
    770   __ push(receiver);
    771   __ push(vector);
    772   __ push(slot);
    773 
    774   Register dictionary = ebx;
    775   __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
    776   GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
    777                           receiver, edi);
    778   __ Drop(3);
    779   Counters* counters = masm->isolate()->counters();
    780   __ IncrementCounter(counters->store_normal_hit(), 1);
    781   __ ret(0);
    782 
    783   __ bind(&restore_miss);
    784   __ pop(slot);
    785   __ pop(vector);
    786   __ pop(receiver);
    787   __ IncrementCounter(counters->store_normal_miss(), 1);
    788   GenerateMiss(masm);
    789 }
    790 
    791 
    792 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
    793   // Return address is on the stack.
    794   StoreIC_PushArgs(masm);
    795 
    796   // Do tail-call to runtime routine.
    797   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
    798 }
    799 
    800 
    801 #undef __
    802 
    803 
    804 Condition CompareIC::ComputeCondition(Token::Value op) {
    805   switch (op) {
    806     case Token::EQ_STRICT:
    807     case Token::EQ:
    808       return equal;
    809     case Token::LT:
    810       return less;
    811     case Token::GT:
    812       return greater;
    813     case Token::LTE:
    814       return less_equal;
    815     case Token::GTE:
    816       return greater_equal;
    817     default:
    818       UNREACHABLE();
    819       return no_condition;
    820   }
    821 }
    822 
    823 
    824 bool CompareIC::HasInlinedSmiCode(Address address) {
    825   // The address of the instruction following the call.
    826   Address test_instruction_address =
    827       address + Assembler::kCallTargetAddressOffset;
    828 
    829   // If the instruction following the call is not a test al, nothing
    830   // was inlined.
    831   return *test_instruction_address == Assembler::kTestAlByte;
    832 }
    833 
    834 
    835 void PatchInlinedSmiCode(Isolate* isolate, Address address,
    836                          InlinedSmiCheck check) {
    837   // The address of the instruction following the call.
    838   Address test_instruction_address =
    839       address + Assembler::kCallTargetAddressOffset;
    840 
    841   // If the instruction following the call is not a test al, nothing
    842   // was inlined.
    843   if (*test_instruction_address != Assembler::kTestAlByte) {
    844     DCHECK(*test_instruction_address == Assembler::kNopByte);
    845     return;
    846   }
    847 
    848   Address delta_address = test_instruction_address + 1;
    849   // The delta to the start of the map check instruction and the
    850   // condition code uses at the patched jump.
    851   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
    852   if (FLAG_trace_ic) {
    853     PrintF("[  patching ic at %p, test=%p, delta=%d\n", address,
    854            test_instruction_address, delta);
    855   }
    856 
    857   // Patch with a short conditional jump. Enabling means switching from a short
    858   // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
    859   // reverse operation of that.
    860   Address jmp_address = test_instruction_address - delta;
    861   DCHECK((check == ENABLE_INLINED_SMI_CHECK)
    862              ? (*jmp_address == Assembler::kJncShortOpcode ||
    863                 *jmp_address == Assembler::kJcShortOpcode)
    864              : (*jmp_address == Assembler::kJnzShortOpcode ||
    865                 *jmp_address == Assembler::kJzShortOpcode));
    866   Condition cc =
    867       (check == ENABLE_INLINED_SMI_CHECK)
    868           ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
    869           : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
    870   *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
    871 }
    872 }  // namespace internal
    873 }  // namespace v8
    874 
    875 #endif  // V8_TARGET_ARCH_IA32
    876