Home | History | Annotate | Download | only in mips
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 
     29 
     30 #include "v8.h"
     31 
     32 #if V8_TARGET_ARCH_MIPS
     33 
     34 #include "codegen.h"
     35 #include "code-stubs.h"
     36 #include "ic-inl.h"
     37 #include "runtime.h"
     38 #include "stub-cache.h"
     39 
     40 namespace v8 {
     41 namespace internal {
     42 
     43 
     44 // ----------------------------------------------------------------------------
     45 // Static IC stub generators.
     46 //
     47 
     48 #define __ ACCESS_MASM(masm)
     49 
     50 
     51 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
     52                                             Register type,
     53                                             Label* global_object) {
     54   // Register usage:
     55   //   type: holds the receiver instance type on entry.
     56   __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
     57   __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
     58   __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
     59 }
     60 
     61 
     62 // Generated code falls through if the receiver is a regular non-global
     63 // JS object with slow properties and no interceptors.
     64 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
     65                                                 Register receiver,
     66                                                 Register elements,
     67                                                 Register scratch0,
     68                                                 Register scratch1,
     69                                                 Label* miss) {
     70   // Register usage:
     71   //   receiver: holds the receiver on entry and is unchanged.
     72   //   elements: holds the property dictionary on fall through.
     73   // Scratch registers:
     74   //   scratch0: used to holds the receiver map.
     75   //   scratch1: used to holds the receiver instance type, receiver bit mask
     76   //     and elements map.
     77 
     78   // Check that the receiver isn't a smi.
     79   __ JumpIfSmi(receiver, miss);
     80 
     81   // Check that the receiver is a valid JS object.
     82   __ GetObjectType(receiver, scratch0, scratch1);
     83   __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
     84 
     85   // If this assert fails, we have to check upper bound too.
     86   STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
     87 
     88   GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
     89 
     90   // Check that the global object does not require access checks.
     91   __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
     92   __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
     93                            (1 << Map::kHasNamedInterceptor)));
     94   __ Branch(miss, ne, scratch1, Operand(zero_reg));
     95 
     96   __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
     97   __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
     98   __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
     99   __ Branch(miss, ne, scratch1, Operand(scratch0));
    100 }
    101 
    102 
    103 // Helper function used from LoadIC/CallIC GenerateNormal.
    104 //
    105 // elements: Property dictionary. It is not clobbered if a jump to the miss
    106 //           label is done.
    107 // name:     Property name. It is not clobbered if a jump to the miss label is
    108 //           done
    109 // result:   Register for the result. It is only updated if a jump to the miss
    110 //           label is not done. Can be the same as elements or name clobbering
    111 //           one of these in the case of not jumping to the miss label.
    112 // The two scratch registers need to be different from elements, name and
    113 // result.
    114 // The generated code assumes that the receiver has slow properties,
    115 // is not a global object and does not have interceptors.
    116 // The address returned from GenerateStringDictionaryProbes() in scratch2
    117 // is used.
    118 static void GenerateDictionaryLoad(MacroAssembler* masm,
    119                                    Label* miss,
    120                                    Register elements,
    121                                    Register name,
    122                                    Register result,
    123                                    Register scratch1,
    124                                    Register scratch2) {
    125   // Main use of the scratch registers.
    126   // scratch1: Used as temporary and to hold the capacity of the property
    127   //           dictionary.
    128   // scratch2: Used as temporary.
    129   Label done;
    130 
    131   // Probe the dictionary.
    132   NameDictionaryLookupStub::GeneratePositiveLookup(masm,
    133                                                    miss,
    134                                                    &done,
    135                                                    elements,
    136                                                    name,
    137                                                    scratch1,
    138                                                    scratch2);
    139 
    140   // If probing finds an entry check that the value is a normal
    141   // property.
    142   __ bind(&done);  // scratch2 == elements + 4 * index.
    143   const int kElementsStartOffset = NameDictionary::kHeaderSize +
    144       NameDictionary::kElementsStartIndex * kPointerSize;
    145   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
    146   __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
    147   __ And(at,
    148          scratch1,
    149          Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
    150   __ Branch(miss, ne, at, Operand(zero_reg));
    151 
    152   // Get the value at the masked, scaled index and return.
    153   __ lw(result,
    154         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
    155 }
    156 
    157 
    158 // Helper function used from StoreIC::GenerateNormal.
    159 //
    160 // elements: Property dictionary. It is not clobbered if a jump to the miss
    161 //           label is done.
    162 // name:     Property name. It is not clobbered if a jump to the miss label is
    163 //           done
    164 // value:    The value to store.
    165 // The two scratch registers need to be different from elements, name and
    166 // result.
    167 // The generated code assumes that the receiver has slow properties,
    168 // is not a global object and does not have interceptors.
    169 // The address returned from GenerateStringDictionaryProbes() in scratch2
    170 // is used.
    171 static void GenerateDictionaryStore(MacroAssembler* masm,
    172                                     Label* miss,
    173                                     Register elements,
    174                                     Register name,
    175                                     Register value,
    176                                     Register scratch1,
    177                                     Register scratch2) {
    178   // Main use of the scratch registers.
    179   // scratch1: Used as temporary and to hold the capacity of the property
    180   //           dictionary.
    181   // scratch2: Used as temporary.
    182   Label done;
    183 
    184   // Probe the dictionary.
    185   NameDictionaryLookupStub::GeneratePositiveLookup(masm,
    186                                                    miss,
    187                                                    &done,
    188                                                    elements,
    189                                                    name,
    190                                                    scratch1,
    191                                                    scratch2);
    192 
    193   // If probing finds an entry in the dictionary check that the value
    194   // is a normal property that is not read only.
    195   __ bind(&done);  // scratch2 == elements + 4 * index.
    196   const int kElementsStartOffset = NameDictionary::kHeaderSize +
    197       NameDictionary::kElementsStartIndex * kPointerSize;
    198   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
    199   const int kTypeAndReadOnlyMask =
    200       (PropertyDetails::TypeField::kMask |
    201        PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
    202   __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
    203   __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
    204   __ Branch(miss, ne, at, Operand(zero_reg));
    205 
    206   // Store the value at the masked, scaled index and return.
    207   const int kValueOffset = kElementsStartOffset + kPointerSize;
    208   __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
    209   __ sw(value, MemOperand(scratch2));
    210 
    211   // Update the write barrier. Make sure not to clobber the value.
    212   __ mov(scratch1, value);
    213   __ RecordWrite(
    214       elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
    215 }
    216 
    217 
    218 // Checks the receiver for special cases (value type, slow case bits).
    219 // Falls through for regular JS object.
    220 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
    221                                            Register receiver,
    222                                            Register map,
    223                                            Register scratch,
    224                                            int interceptor_bit,
    225                                            Label* slow) {
    226   // Check that the object isn't a smi.
    227   __ JumpIfSmi(receiver, slow);
    228   // Get the map of the receiver.
    229   __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    230   // Check bit field.
    231   __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
    232   __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
    233   __ Branch(slow, ne, at, Operand(zero_reg));
    234   // Check that the object is some kind of JS object EXCEPT JS Value type.
    235   // In the case that the object is a value-wrapper object,
    236   // we enter the runtime system to make sure that indexing into string
    237   // objects work as intended.
    238   ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
    239   __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
    240   __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
    241 }
    242 
    243 
    244 // Loads an indexed element from a fast case array.
    245 // If not_fast_array is NULL, doesn't perform the elements map check.
    246 static void GenerateFastArrayLoad(MacroAssembler* masm,
    247                                   Register receiver,
    248                                   Register key,
    249                                   Register elements,
    250                                   Register scratch1,
    251                                   Register scratch2,
    252                                   Register result,
    253                                   Label* not_fast_array,
    254                                   Label* out_of_range) {
    255   // Register use:
    256   //
    257   // receiver - holds the receiver on entry.
    258   //            Unchanged unless 'result' is the same register.
    259   //
    260   // key      - holds the smi key on entry.
    261   //            Unchanged unless 'result' is the same register.
    262   //
    263   // elements - holds the elements of the receiver on exit.
    264   //
    265   // result   - holds the result on exit if the load succeeded.
    266   //            Allowed to be the the same as 'receiver' or 'key'.
    267   //            Unchanged on bailout so 'receiver' and 'key' can be safely
    268   //            used by further computation.
    269   //
    270   // Scratch registers:
    271   //
    272   // scratch1 - used to hold elements map and elements length.
    273   //            Holds the elements map if not_fast_array branch is taken.
    274   //
    275   // scratch2 - used to hold the loaded value.
    276 
    277   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    278   if (not_fast_array != NULL) {
    279     // Check that the object is in fast mode (not dictionary).
    280     __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
    281     __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
    282     __ Branch(not_fast_array, ne, scratch1, Operand(at));
    283   } else {
    284     __ AssertFastElements(elements);
    285   }
    286 
    287   // Check that the key (index) is within bounds.
    288   __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
    289   __ Branch(out_of_range, hs, key, Operand(scratch1));
    290 
    291   // Fast case: Do the load.
    292   __ Addu(scratch1, elements,
    293           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    294   // The key is a smi.
    295   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
    296   __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
    297   __ addu(at, at, scratch1);
    298   __ lw(scratch2, MemOperand(at));
    299 
    300   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
    301   // In case the loaded value is the_hole we have to consult GetProperty
    302   // to ensure the prototype chain is searched.
    303   __ Branch(out_of_range, eq, scratch2, Operand(at));
    304   __ mov(result, scratch2);
    305 }
    306 
    307 
    308 // Checks whether a key is an array index string or a unique name.
    309 // Falls through if a key is a unique name.
    310 static void GenerateKeyNameCheck(MacroAssembler* masm,
    311                                  Register key,
    312                                  Register map,
    313                                  Register hash,
    314                                  Label* index_string,
    315                                  Label* not_unique) {
    316   // The key is not a smi.
    317   Label unique;
    318   // Is it a name?
    319   __ GetObjectType(key, map, hash);
    320   __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
    321   STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
    322   __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
    323 
    324   // Is the string an array index, with cached numeric value?
    325   __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
    326   __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
    327   __ Branch(index_string, eq, at, Operand(zero_reg));
    328 
    329   // Is the string internalized? We know it's a string, so a single
    330   // bit test is enough.
    331   // map: key map
    332   __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
    333   STATIC_ASSERT(kInternalizedTag == 0);
    334   __ And(at, hash, Operand(kIsNotInternalizedMask));
    335   __ Branch(not_unique, ne, at, Operand(zero_reg));
    336 
    337   __ bind(&unique);
    338 }
    339 
    340 
    341 // Defined in ic.cc.
    342 Object* CallIC_Miss(Arguments args);
    343 
    344 // The generated code does not accept smi keys.
    345 // The generated code falls through if both probes miss.
    346 void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
    347                                                int argc,
    348                                                Code::Kind kind,
    349                                                ExtraICState extra_state) {
    350   // ----------- S t a t e -------------
    351   //  -- a1    : receiver
    352   //  -- a2    : name
    353   // -----------------------------------
    354   Label number, non_number, non_string, boolean, probe, miss;
    355 
    356   // Probe the stub cache.
    357   Code::Flags flags = Code::ComputeFlags(kind,
    358                                          MONOMORPHIC,
    359                                          extra_state,
    360                                          Code::NORMAL,
    361                                          argc);
    362   masm->isolate()->stub_cache()->GenerateProbe(
    363       masm, flags, a1, a2, a3, t0, t1, t2);
    364 
    365   // If the stub cache probing failed, the receiver might be a value.
    366   // For value objects, we use the map of the prototype objects for
    367   // the corresponding JSValue for the cache and that is what we need
    368   // to probe.
    369   //
    370   // Check for number.
    371   __ JumpIfSmi(a1, &number, t1);
    372   __ GetObjectType(a1, a3, a3);
    373   __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
    374   __ bind(&number);
    375   StubCompiler::GenerateLoadGlobalFunctionPrototype(
    376       masm, Context::NUMBER_FUNCTION_INDEX, a1);
    377   __ Branch(&probe);
    378 
    379   // Check for string.
    380   __ bind(&non_number);
    381   __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
    382   StubCompiler::GenerateLoadGlobalFunctionPrototype(
    383       masm, Context::STRING_FUNCTION_INDEX, a1);
    384   __ Branch(&probe);
    385 
    386   // Check for boolean.
    387   __ bind(&non_string);
    388   __ LoadRoot(t0, Heap::kTrueValueRootIndex);
    389   __ Branch(&boolean, eq, a1, Operand(t0));
    390   __ LoadRoot(t1, Heap::kFalseValueRootIndex);
    391   __ Branch(&miss, ne, a1, Operand(t1));
    392   __ bind(&boolean);
    393   StubCompiler::GenerateLoadGlobalFunctionPrototype(
    394       masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
    395 
    396   // Probe the stub cache for the value object.
    397   __ bind(&probe);
    398   masm->isolate()->stub_cache()->GenerateProbe(
    399       masm, flags, a1, a2, a3, t0, t1, t2);
    400 
    401   __ bind(&miss);
    402 }
    403 
    404 
    405 static void GenerateFunctionTailCall(MacroAssembler* masm,
    406                                      int argc,
    407                                      Label* miss,
    408                                      Register scratch) {
    409   // a1: function
    410 
    411   // Check that the value isn't a smi.
    412   __ JumpIfSmi(a1, miss);
    413 
    414   // Check that the value is a JSFunction.
    415   __ GetObjectType(a1, scratch, scratch);
    416   __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
    417 
    418   // Invoke the function.
    419   ParameterCount actual(argc);
    420   __ InvokeFunction(a1, actual, JUMP_FUNCTION,
    421                     NullCallWrapper(), CALL_AS_METHOD);
    422 }
    423 
    424 
    425 void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
    426   // ----------- S t a t e -------------
    427   //  -- a2    : name
    428   //  -- ra    : return address
    429   // -----------------------------------
    430   Label miss;
    431 
    432   // Get the receiver of the function from the stack into a1.
    433   __ lw(a1, MemOperand(sp, argc * kPointerSize));
    434 
    435   GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
    436 
    437   // a0: elements
    438   // Search the dictionary - put result in register a1.
    439   GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
    440 
    441   GenerateFunctionTailCall(masm, argc, &miss, t0);
    442 
    443   // Cache miss: Jump to runtime.
    444   __ bind(&miss);
    445 }
    446 
    447 
    448 void CallICBase::GenerateMiss(MacroAssembler* masm,
    449                               int argc,
    450                               IC::UtilityId id,
    451                               ExtraICState extra_state) {
    452   // ----------- S t a t e -------------
    453   //  -- a2    : name
    454   //  -- ra    : return address
    455   // -----------------------------------
    456   Isolate* isolate = masm->isolate();
    457 
    458   if (id == IC::kCallIC_Miss) {
    459     __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
    460   } else {
    461     __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
    462   }
    463 
    464   // Get the receiver of the function from the stack.
    465   __ lw(a3, MemOperand(sp, argc*kPointerSize));
    466 
    467   {
    468     FrameScope scope(masm, StackFrame::INTERNAL);
    469 
    470     // Push the receiver and the name of the function.
    471     __ Push(a3, a2);
    472 
    473     // Call the entry.
    474     __ PrepareCEntryArgs(2);
    475     __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
    476 
    477     CEntryStub stub(1);
    478     __ CallStub(&stub);
    479 
    480     // Move result to a1 and leave the internal frame.
    481     __ mov(a1, v0);
    482   }
    483 
    484   // Check if the receiver is a global object of some sort.
    485   // This can happen only for regular CallIC but not KeyedCallIC.
    486   if (id == IC::kCallIC_Miss) {
    487     Label invoke, global;
    488     __ lw(a2, MemOperand(sp, argc * kPointerSize));
    489     __ JumpIfSmi(a2, &invoke);
    490     __ GetObjectType(a2, a3, a3);
    491     __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
    492     __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
    493 
    494     // Patch the receiver on the stack.
    495     __ bind(&global);
    496     __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
    497     __ sw(a2, MemOperand(sp, argc * kPointerSize));
    498     __ bind(&invoke);
    499   }
    500   // Invoke the function.
    501   CallKind call_kind = CallICBase::Contextual::decode(extra_state)
    502       ? CALL_AS_FUNCTION
    503       : CALL_AS_METHOD;
    504   ParameterCount actual(argc);
    505   __ InvokeFunction(a1,
    506                     actual,
    507                     JUMP_FUNCTION,
    508                     NullCallWrapper(),
    509                     call_kind);
    510 }
    511 
    512 
    513 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
    514                                  int argc,
    515                                  ExtraICState extra_ic_state) {
    516   // ----------- S t a t e -------------
    517   //  -- a2    : name
    518   //  -- ra    : return address
    519   // -----------------------------------
    520 
    521   // Get the receiver of the function from the stack into a1.
    522   __ lw(a1, MemOperand(sp, argc * kPointerSize));
    523   GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
    524   GenerateMiss(masm, argc, extra_ic_state);
    525 }
    526 
    527 
    528 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
    529   // ----------- S t a t e -------------
    530   //  -- a2    : name
    531   //  -- ra    : return address
    532   // -----------------------------------
    533 
    534   // Get the receiver of the function from the stack into a1.
    535   __ lw(a1, MemOperand(sp, argc * kPointerSize));
    536 
    537   Label do_call, slow_call, slow_load, slow_reload_receiver;
    538   Label check_number_dictionary, check_name, lookup_monomorphic_cache;
    539   Label index_smi, index_name;
    540 
    541   // Check that the key is a smi.
    542   __ JumpIfNotSmi(a2, &check_name);
    543   __ bind(&index_smi);
    544   // Now the key is known to be a smi. This place is also jumped to from below
    545   // where a numeric string is converted to a smi.
    546 
    547   GenerateKeyedLoadReceiverCheck(
    548       masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
    549 
    550   GenerateFastArrayLoad(
    551       masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
    552   Counters* counters = masm->isolate()->counters();
    553   __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
    554 
    555   __ bind(&do_call);
    556   // receiver in a1 is not used after this point.
    557   // a2: key
    558   // a1: function
    559 
    560   GenerateFunctionTailCall(masm, argc, &slow_call, a0);
    561 
    562   __ bind(&check_number_dictionary);
    563   // a2: key
    564   // a3: elements map
    565   // t0: elements pointer
    566   // Check whether the elements is a number dictionary.
    567   __ LoadRoot(at, Heap::kHashTableMapRootIndex);
    568   __ Branch(&slow_load, ne, a3, Operand(at));
    569   __ sra(a0, a2, kSmiTagSize);
    570   // a0: untagged index
    571   __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
    572   __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
    573   __ jmp(&do_call);
    574 
    575   __ bind(&slow_load);
    576   // This branch is taken when calling KeyedCallIC_Miss is neither required
    577   // nor beneficial.
    578   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
    579   {
    580     FrameScope scope(masm, StackFrame::INTERNAL);
    581     __ Push(a2, a1, a2);  // Save the key and pass the receiver and the key.
    582     __ CallRuntime(Runtime::kKeyedGetProperty, 2);
    583     __ pop(a2);  // Restore the key.
    584   }
    585   __ mov(a1, v0);
    586   __ jmp(&do_call);
    587 
    588   __ bind(&check_name);
    589   GenerateKeyNameCheck(masm, a2, a0, a3, &index_name, &slow_call);
    590 
    591   // The key is known to be a unique name.
    592   // If the receiver is a regular JS object with slow properties then do
    593   // a quick inline probe of the receiver's dictionary.
    594   // Otherwise do the monomorphic cache probe.
    595   GenerateKeyedLoadReceiverCheck(
    596       masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
    597 
    598   __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
    599   __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
    600   __ LoadRoot(at, Heap::kHashTableMapRootIndex);
    601   __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
    602 
    603   GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
    604   __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
    605   __ jmp(&do_call);
    606 
    607   __ bind(&lookup_monomorphic_cache);
    608   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
    609   GenerateMonomorphicCacheProbe(masm,
    610                                 argc,
    611                                 Code::KEYED_CALL_IC,
    612                                 kNoExtraICState);
    613   // Fall through on miss.
    614 
    615   __ bind(&slow_call);
    616   // This branch is taken if:
    617   // - the receiver requires boxing or access check,
    618   // - the key is neither smi nor a unique name,
    619   // - the value loaded is not a function,
    620   // - there is hope that the runtime will create a monomorphic call stub,
    621   //   that will get fetched next time.
    622   __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
    623   GenerateMiss(masm, argc);
    624 
    625   __ bind(&index_name);
    626   __ IndexFromHash(a3, a2);
    627   // Now jump to the place where smi keys are handled.
    628   __ jmp(&index_smi);
    629 }
    630 
    631 
    632 void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
    633   // ----------- S t a t e -------------
    634   //  -- a2    : name
    635   //  -- ra    : return address
    636   // -----------------------------------
    637 
    638   // Check if the name is really a name.
    639   Label miss;
    640   __ JumpIfSmi(a2, &miss);
    641   __ IsObjectNameType(a2, a0, &miss);
    642 
    643   CallICBase::GenerateNormal(masm, argc);
    644   __ bind(&miss);
    645   GenerateMiss(masm, argc);
    646 }
    647 
    648 
    649 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
    650   // ----------- S t a t e -------------
    651   //  -- a2    : name
    652   //  -- ra    : return address
    653   //  -- a0    : receiver
    654   // -----------------------------------
    655 
    656   // Probe the stub cache.
    657   Code::Flags flags = Code::ComputeFlags(
    658       Code::HANDLER, MONOMORPHIC, kNoExtraICState,
    659       Code::NORMAL, Code::LOAD_IC);
    660   masm->isolate()->stub_cache()->GenerateProbe(
    661       masm, flags, a0, a2, a3, t0, t1, t2);
    662 
    663   // Cache miss: Jump to runtime.
    664   GenerateMiss(masm);
    665 }
    666 
    667 
    668 void LoadIC::GenerateNormal(MacroAssembler* masm) {
    669   // ----------- S t a t e -------------
    670   //  -- a2    : name
    671   //  -- lr    : return address
    672   //  -- a0    : receiver
    673   // -----------------------------------
    674   Label miss;
    675 
    676   GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
    677 
    678   // a1: elements
    679   GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
    680   __ Ret();
    681 
    682   // Cache miss: Jump to runtime.
    683   __ bind(&miss);
    684   GenerateMiss(masm);
    685 }
    686 
    687 
    688 void LoadIC::GenerateMiss(MacroAssembler* masm) {
    689   // ----------- S t a t e -------------
    690   //  -- a2    : name
    691   //  -- ra    : return address
    692   //  -- a0    : receiver
    693   // -----------------------------------
    694   Isolate* isolate = masm->isolate();
    695 
    696   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
    697 
    698   __ mov(a3, a0);
    699   __ Push(a3, a2);
    700 
    701   // Perform tail call to the entry.
    702   ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
    703   __ TailCallExternalReference(ref, 2, 1);
    704 }
    705 
    706 
    707 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
    708   // ---------- S t a t e --------------
    709   //  -- a2    : name
    710   //  -- ra    : return address
    711   //  -- a0    : receiver
    712   // -----------------------------------
    713 
    714   __ mov(a3, a0);
    715   __ Push(a3, a2);
    716 
    717   __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
    718 }
    719 
    720 
    721 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
    722                                                 Register object,
    723                                                 Register key,
    724                                                 Register scratch1,
    725                                                 Register scratch2,
    726                                                 Register scratch3,
    727                                                 Label* unmapped_case,
    728                                                 Label* slow_case) {
    729   // Check that the receiver is a JSObject. Because of the map check
    730   // later, we do not need to check for interceptors or whether it
    731   // requires access checks.
    732   __ JumpIfSmi(object, slow_case);
    733   // Check that the object is some kind of JSObject.
    734   __ GetObjectType(object, scratch1, scratch2);
    735   __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
    736 
    737   // Check that the key is a positive smi.
    738   __ And(scratch1, key, Operand(0x80000001));
    739   __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
    740 
    741   // Load the elements into scratch1 and check its map.
    742   __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
    743   __ CheckMap(scratch1,
    744               scratch2,
    745               Heap::kNonStrictArgumentsElementsMapRootIndex,
    746               slow_case,
    747               DONT_DO_SMI_CHECK);
    748   // Check if element is in the range of mapped arguments. If not, jump
    749   // to the unmapped lookup with the parameter map in scratch1.
    750   __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
    751   __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
    752   __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
    753 
    754   // Load element index and check whether it is the hole.
    755   const int kOffset =
    756       FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
    757 
    758   __ li(scratch3, Operand(kPointerSize >> 1));
    759   __ Mul(scratch3, key, scratch3);
    760   __ Addu(scratch3, scratch3, Operand(kOffset));
    761 
    762   __ Addu(scratch2, scratch1, scratch3);
    763   __ lw(scratch2, MemOperand(scratch2));
    764   __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
    765   __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
    766 
    767   // Load value from context and return it. We can reuse scratch1 because
    768   // we do not jump to the unmapped lookup (which requires the parameter
    769   // map in scratch1).
    770   __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
    771   __ li(scratch3, Operand(kPointerSize >> 1));
    772   __ Mul(scratch3, scratch2, scratch3);
    773   __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
    774   __ Addu(scratch2, scratch1, scratch3);
    775   return MemOperand(scratch2);
    776 }
    777 
    778 
    779 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
    780                                                   Register key,
    781                                                   Register parameter_map,
    782                                                   Register scratch,
    783                                                   Label* slow_case) {
    784   // Element is in arguments backing store, which is referenced by the
    785   // second element of the parameter_map. The parameter_map register
    786   // must be loaded with the parameter map of the arguments object and is
    787   // overwritten.
    788   const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
    789   Register backing_store = parameter_map;
    790   __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
    791   __ CheckMap(backing_store,
    792               scratch,
    793               Heap::kFixedArrayMapRootIndex,
    794               slow_case,
    795               DONT_DO_SMI_CHECK);
    796   __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
    797   __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
    798   __ li(scratch, Operand(kPointerSize >> 1));
    799   __ Mul(scratch, key, scratch);
    800   __ Addu(scratch,
    801           scratch,
    802           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    803   __ Addu(scratch, backing_store, scratch);
    804   return MemOperand(scratch);
    805 }
    806 
    807 
    808 void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
    809   // ---------- S t a t e --------------
    810   //  -- lr     : return address
    811   //  -- a0     : key
    812   //  -- a1     : receiver
    813   // -----------------------------------
    814   Label slow, notin;
    815   MemOperand mapped_location =
    816       GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
    817   __ Ret(USE_DELAY_SLOT);
    818   __ lw(v0, mapped_location);
    819   __ bind(&notin);
    820   // The unmapped lookup expects that the parameter map is in a2.
    821   MemOperand unmapped_location =
    822       GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
    823   __ lw(a2, unmapped_location);
    824   __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
    825   __ Branch(&slow, eq, a2, Operand(a3));
    826   __ Ret(USE_DELAY_SLOT);
    827   __ mov(v0, a2);
    828   __ bind(&slow);
    829   GenerateMiss(masm);
    830 }
    831 
    832 
    833 void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
    834   // ---------- S t a t e --------------
    835   //  -- a0     : value
    836   //  -- a1     : key
    837   //  -- a2     : receiver
    838   //  -- lr     : return address
    839   // -----------------------------------
    840   Label slow, notin;
    841   // Store address is returned in register (of MemOperand) mapped_location.
    842   MemOperand mapped_location =
    843       GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
    844   __ sw(a0, mapped_location);
    845   __ mov(t5, a0);
    846   ASSERT_EQ(mapped_location.offset(), 0);
    847   __ RecordWrite(a3, mapped_location.rm(), t5,
    848                  kRAHasNotBeenSaved, kDontSaveFPRegs);
    849   __ Ret(USE_DELAY_SLOT);
    850   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
    851   __ bind(&notin);
    852   // The unmapped lookup expects that the parameter map is in a3.
    853   // Store address is returned in register (of MemOperand) unmapped_location.
    854   MemOperand unmapped_location =
    855       GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
    856   __ sw(a0, unmapped_location);
    857   __ mov(t5, a0);
    858   ASSERT_EQ(unmapped_location.offset(), 0);
    859   __ RecordWrite(a3, unmapped_location.rm(), t5,
    860                  kRAHasNotBeenSaved, kDontSaveFPRegs);
    861   __ Ret(USE_DELAY_SLOT);
    862   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
    863   __ bind(&slow);
    864   GenerateMiss(masm);
    865 }
    866 
    867 
    868 void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
    869                                              int argc) {
    870   // ----------- S t a t e -------------
    871   //  -- a2    : name
    872   //  -- lr    : return address
    873   // -----------------------------------
    874   Label slow, notin;
    875   // Load receiver.
    876   __ lw(a1, MemOperand(sp, argc * kPointerSize));
    877   MemOperand mapped_location =
    878       GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
    879   __ lw(a1, mapped_location);
    880   GenerateFunctionTailCall(masm, argc, &slow, a3);
    881   __ bind(&notin);
    882   // The unmapped lookup expects that the parameter map is in a3.
    883   MemOperand unmapped_location =
    884       GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
    885   __ lw(a1, unmapped_location);
    886   __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
    887   __ Branch(&slow, eq, a1, Operand(a3));
    888   GenerateFunctionTailCall(masm, argc, &slow, a3);
    889   __ bind(&slow);
    890   GenerateMiss(masm, argc);
    891 }
    892 
    893 
    894 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
    895   // ---------- S t a t e --------------
    896   //  -- ra     : return address
    897   //  -- a0     : key
    898   //  -- a1     : receiver
    899   // -----------------------------------
    900   Isolate* isolate = masm->isolate();
    901 
    902   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
    903 
    904   __ Push(a1, a0);
    905 
    906   // Perform tail call to the entry.
    907   ExternalReference ref =
    908       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
    909 
    910   __ TailCallExternalReference(ref, 2, 1);
    911 }
    912 
    913 
    914 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
    915   // ---------- S t a t e --------------
    916   //  -- ra     : return address
    917   //  -- a0     : key
    918   //  -- a1     : receiver
    919   // -----------------------------------
    920 
    921   __ Push(a1, a0);
    922 
    923   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
    924 }
    925 
    926 
    927 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
    928   // ---------- S t a t e --------------
    929   //  -- ra     : return address
    930   //  -- a0     : key
    931   //  -- a1     : receiver
    932   // -----------------------------------
    933   Label slow, check_name, index_smi, index_name, property_array_property;
    934   Label probe_dictionary, check_number_dictionary;
    935 
    936   Register key = a0;
    937   Register receiver = a1;
    938 
    939   Isolate* isolate = masm->isolate();
    940 
    941   // Check that the key is a smi.
    942   __ JumpIfNotSmi(key, &check_name);
    943   __ bind(&index_smi);
    944   // Now the key is known to be a smi. This place is also jumped to from below
    945   // where a numeric string is converted to a smi.
    946 
    947   GenerateKeyedLoadReceiverCheck(
    948       masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
    949 
    950   // Check the receiver's map to see if it has fast elements.
    951   __ CheckFastElements(a2, a3, &check_number_dictionary);
    952 
    953   GenerateFastArrayLoad(
    954       masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
    955 
    956   __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
    957   __ Ret();
    958 
    959   __ bind(&check_number_dictionary);
    960   __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
    961   __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
    962 
    963   // Check whether the elements is a number dictionary.
    964   // a0: key
    965   // a3: elements map
    966   // t0: elements
    967   __ LoadRoot(at, Heap::kHashTableMapRootIndex);
    968   __ Branch(&slow, ne, a3, Operand(at));
    969   __ sra(a2, a0, kSmiTagSize);
    970   __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
    971   __ Ret();
    972 
    973   // Slow case, key and receiver still in a0 and a1.
    974   __ bind(&slow);
    975   __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
    976                       1,
    977                       a2,
    978                       a3);
    979   GenerateRuntimeGetProperty(masm);
    980 
    981   __ bind(&check_name);
    982   GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
    983 
    984   GenerateKeyedLoadReceiverCheck(
    985        masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
    986 
    987 
    988   // If the receiver is a fast-case object, check the keyed lookup
    989   // cache. Otherwise probe the dictionary.
    990   __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
    991   __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
    992   __ LoadRoot(at, Heap::kHashTableMapRootIndex);
    993   __ Branch(&probe_dictionary, eq, t0, Operand(at));
    994 
    995   // Load the map of the receiver, compute the keyed lookup cache hash
    996   // based on 32 bits of the map pointer and the name hash.
    997   __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
    998   __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
    999   __ lw(t0, FieldMemOperand(a0, Name::kHashFieldOffset));
   1000   __ sra(at, t0, Name::kHashShift);
   1001   __ xor_(a3, a3, at);
   1002   int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
   1003   __ And(a3, a3, Operand(mask));
   1004 
   1005   // Load the key (consisting of map and unique name) from the cache and
   1006   // check for match.
   1007   Label load_in_object_property;
   1008   static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
   1009   Label hit_on_nth_entry[kEntriesPerBucket];
   1010   ExternalReference cache_keys =
   1011       ExternalReference::keyed_lookup_cache_keys(isolate);
   1012   __ li(t0, Operand(cache_keys));
   1013   __ sll(at, a3, kPointerSizeLog2 + 1);
   1014   __ addu(t0, t0, at);
   1015 
   1016   for (int i = 0; i < kEntriesPerBucket - 1; i++) {
   1017     Label try_next_entry;
   1018     __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
   1019     __ Branch(&try_next_entry, ne, a2, Operand(t1));
   1020     __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
   1021     __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
   1022     __ bind(&try_next_entry);
   1023   }
   1024 
   1025   __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
   1026   __ Branch(&slow, ne, a2, Operand(t1));
   1027   __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
   1028   __ Branch(&slow, ne, a0, Operand(t1));
   1029 
   1030   // Get field offset.
   1031   // a0     : key
   1032   // a1     : receiver
   1033   // a2     : receiver's map
   1034   // a3     : lookup cache index
   1035   ExternalReference cache_field_offsets =
   1036       ExternalReference::keyed_lookup_cache_field_offsets(isolate);
   1037 
   1038   // Hit on nth entry.
   1039   for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
   1040     __ bind(&hit_on_nth_entry[i]);
   1041     __ li(t0, Operand(cache_field_offsets));
   1042     __ sll(at, a3, kPointerSizeLog2);
   1043     __ addu(at, t0, at);
   1044     __ lw(t1, MemOperand(at, kPointerSize * i));
   1045     __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
   1046     __ Subu(t1, t1, t2);
   1047     __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
   1048     if (i != 0) {
   1049       __ Branch(&load_in_object_property);
   1050     }
   1051   }
   1052 
   1053   // Load in-object property.
   1054   __ bind(&load_in_object_property);
   1055   __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
   1056   __ addu(t2, t2, t1);  // Index from start of object.
   1057   __ Subu(a1, a1, Operand(kHeapObjectTag));  // Remove the heap tag.
   1058   __ sll(at, t2, kPointerSizeLog2);
   1059   __ addu(at, a1, at);
   1060   __ lw(v0, MemOperand(at));
   1061   __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
   1062                       1,
   1063                       a2,
   1064                       a3);
   1065   __ Ret();
   1066 
   1067   // Load property array property.
   1068   __ bind(&property_array_property);
   1069   __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
   1070   __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
   1071   __ sll(t0, t1, kPointerSizeLog2);
   1072   __ Addu(t0, t0, a1);
   1073   __ lw(v0, MemOperand(t0));
   1074   __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
   1075                       1,
   1076                       a2,
   1077                       a3);
   1078   __ Ret();
   1079 
   1080 
   1081   // Do a quick inline probe of the receiver's dictionary, if it
   1082   // exists.
   1083   __ bind(&probe_dictionary);
   1084   // a1: receiver
   1085   // a0: key
   1086   // a3: elements
   1087   __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
   1088   __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
   1089   GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
   1090   // Load the property to v0.
   1091   GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
   1092   __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
   1093                       1,
   1094                       a2,
   1095                       a3);
   1096   __ Ret();
   1097 
   1098   __ bind(&index_name);
   1099   __ IndexFromHash(a3, key);
   1100   // Now jump to the place where smi keys are handled.
   1101   __ Branch(&index_smi);
   1102 }
   1103 
   1104 
   1105 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
   1106   // ---------- S t a t e --------------
   1107   //  -- ra     : return address
   1108   //  -- a0     : key (index)
   1109   //  -- a1     : receiver
   1110   // -----------------------------------
   1111   Label miss;
   1112 
   1113   Register receiver = a1;
   1114   Register index = a0;
   1115   Register scratch = a3;
   1116   Register result = v0;
   1117 
   1118   StringCharAtGenerator char_at_generator(receiver,
   1119                                           index,
   1120                                           scratch,
   1121                                           result,
   1122                                           &miss,  // When not a string.
   1123                                           &miss,  // When not a number.
   1124                                           &miss,  // When index out of range.
   1125                                           STRING_INDEX_IS_ARRAY_INDEX);
   1126   char_at_generator.GenerateFast(masm);
   1127   __ Ret();
   1128 
   1129   StubRuntimeCallHelper call_helper;
   1130   char_at_generator.GenerateSlow(masm, call_helper);
   1131 
   1132   __ bind(&miss);
   1133   GenerateMiss(masm);
   1134 }
   1135 
   1136 
   1137 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
   1138                                               StrictModeFlag strict_mode) {
   1139   // ---------- S t a t e --------------
   1140   //  -- a0     : value
   1141   //  -- a1     : key
   1142   //  -- a2     : receiver
   1143   //  -- ra     : return address
   1144   // -----------------------------------
   1145 
   1146   // Push receiver, key and value for runtime call.
   1147   __ Push(a2, a1, a0);
   1148   __ li(a1, Operand(Smi::FromInt(NONE)));          // PropertyAttributes.
   1149   __ li(a0, Operand(Smi::FromInt(strict_mode)));   // Strict mode.
   1150   __ Push(a1, a0);
   1151 
   1152   __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
   1153 }
   1154 
   1155 
   1156 static void KeyedStoreGenerateGenericHelper(
   1157     MacroAssembler* masm,
   1158     Label* fast_object,
   1159     Label* fast_double,
   1160     Label* slow,
   1161     KeyedStoreCheckMap check_map,
   1162     KeyedStoreIncrementLength increment_length,
   1163     Register value,
   1164     Register key,
   1165     Register receiver,
   1166     Register receiver_map,
   1167     Register elements_map,
   1168     Register elements) {
   1169   Label transition_smi_elements;
   1170   Label finish_object_store, non_double_value, transition_double_elements;
   1171   Label fast_double_without_map_check;
   1172 
   1173   // Fast case: Do the store, could be either Object or double.
   1174   __ bind(fast_object);
   1175   Register scratch_value = t0;
   1176   Register address = t1;
   1177   if (check_map == kCheckMap) {
   1178     __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
   1179     __ Branch(fast_double, ne, elements_map,
   1180               Operand(masm->isolate()->factory()->fixed_array_map()));
   1181   }
   1182 
   1183   // HOLECHECK: guards "A[i] = V"
   1184   // We have to go to the runtime if the current value is the hole because
   1185   // there may be a callback on the element.
   1186   Label holecheck_passed1;
   1187   __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
   1188   __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
   1189   __ addu(address, address, at);
   1190   __ lw(scratch_value, MemOperand(address));
   1191   __ Branch(&holecheck_passed1, ne, scratch_value,
   1192             Operand(masm->isolate()->factory()->the_hole_value()));
   1193   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
   1194                                       slow);
   1195 
   1196   __ bind(&holecheck_passed1);
   1197 
   1198   // Smi stores don't require further checks.
   1199   Label non_smi_value;
   1200   __ JumpIfNotSmi(value, &non_smi_value);
   1201 
   1202   if (increment_length == kIncrementLength) {
   1203     // Add 1 to receiver->length.
   1204     __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
   1205     __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
   1206   }
   1207   // It's irrelevant whether array is smi-only or not when writing a smi.
   1208   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   1209   __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
   1210   __ Addu(address, address, scratch_value);
   1211   __ sw(value, MemOperand(address));
   1212   __ Ret();
   1213 
   1214   __ bind(&non_smi_value);
   1215   // Escape to elements kind transition case.
   1216   __ CheckFastObjectElements(receiver_map, scratch_value,
   1217                              &transition_smi_elements);
   1218 
   1219   // Fast elements array, store the value to the elements backing store.
   1220   __ bind(&finish_object_store);
   1221   if (increment_length == kIncrementLength) {
   1222     // Add 1 to receiver->length.
   1223     __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
   1224     __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
   1225   }
   1226   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   1227   __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
   1228   __ Addu(address, address, scratch_value);
   1229   __ sw(value, MemOperand(address));
   1230   // Update write barrier for the elements array address.
   1231   __ mov(scratch_value, value);  // Preserve the value which is returned.
   1232   __ RecordWrite(elements,
   1233                  address,
   1234                  scratch_value,
   1235                  kRAHasNotBeenSaved,
   1236                  kDontSaveFPRegs,
   1237                  EMIT_REMEMBERED_SET,
   1238                  OMIT_SMI_CHECK);
   1239   __ Ret();
   1240 
   1241   __ bind(fast_double);
   1242   if (check_map == kCheckMap) {
   1243     // Check for fast double array case. If this fails, call through to the
   1244     // runtime.
   1245     __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
   1246     __ Branch(slow, ne, elements_map, Operand(at));
   1247   }
   1248 
   1249   // HOLECHECK: guards "A[i] double hole?"
   1250   // We have to see if the double version of the hole is present. If so
   1251   // go to the runtime.
   1252   __ Addu(address, elements,
   1253           Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
   1254                   - kHeapObjectTag));
   1255   __ sll(at, key, kPointerSizeLog2);
   1256   __ addu(address, address, at);
   1257   __ lw(scratch_value, MemOperand(address));
   1258   __ Branch(&fast_double_without_map_check, ne, scratch_value,
   1259             Operand(kHoleNanUpper32));
   1260   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
   1261                                       slow);
   1262 
   1263   __ bind(&fast_double_without_map_check);
   1264   __ StoreNumberToDoubleElements(value,
   1265                                  key,
   1266                                  elements,  // Overwritten.
   1267                                  a3,        // Scratch regs...
   1268                                  t0,
   1269                                  t1,
   1270                                  &transition_double_elements);
   1271   if (increment_length == kIncrementLength) {
   1272     // Add 1 to receiver->length.
   1273     __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
   1274     __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
   1275   }
   1276   __ Ret();
   1277 
   1278   __ bind(&transition_smi_elements);
   1279   // Transition the array appropriately depending on the value type.
   1280   __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
   1281   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   1282   __ Branch(&non_double_value, ne, t0, Operand(at));
   1283 
   1284   // Value is a double. Transition FAST_SMI_ELEMENTS ->
   1285   // FAST_DOUBLE_ELEMENTS and complete the store.
   1286   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
   1287                                          FAST_DOUBLE_ELEMENTS,
   1288                                          receiver_map,
   1289                                          t0,
   1290                                          slow);
   1291   ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
   1292   AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
   1293                                                     FAST_DOUBLE_ELEMENTS);
   1294   ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
   1295   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1296   __ jmp(&fast_double_without_map_check);
   1297 
   1298   __ bind(&non_double_value);
   1299   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
   1300   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
   1301                                          FAST_ELEMENTS,
   1302                                          receiver_map,
   1303                                          t0,
   1304                                          slow);
   1305   ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
   1306   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
   1307   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
   1308                                                                    slow);
   1309   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1310   __ jmp(&finish_object_store);
   1311 
   1312   __ bind(&transition_double_elements);
   1313   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
   1314   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
   1315   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
   1316   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
   1317                                          FAST_ELEMENTS,
   1318                                          receiver_map,
   1319                                          t0,
   1320                                          slow);
   1321   ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
   1322   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
   1323   ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
   1324   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1325   __ jmp(&finish_object_store);
   1326 }
   1327 
   1328 
   1329 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
   1330                                    StrictModeFlag strict_mode) {
   1331   // ---------- S t a t e --------------
   1332   //  -- a0     : value
   1333   //  -- a1     : key
   1334   //  -- a2     : receiver
   1335   //  -- ra     : return address
   1336   // -----------------------------------
   1337   Label slow, fast_object, fast_object_grow;
   1338   Label fast_double, fast_double_grow;
   1339   Label array, extra, check_if_double_array;
   1340 
   1341   // Register usage.
   1342   Register value = a0;
   1343   Register key = a1;
   1344   Register receiver = a2;
   1345   Register receiver_map = a3;
   1346   Register elements_map = t2;
   1347   Register elements = t3;  // Elements array of the receiver.
   1348   // t0 and t1 are used as general scratch registers.
   1349 
   1350   // Check that the key is a smi.
   1351   __ JumpIfNotSmi(key, &slow);
   1352   // Check that the object isn't a smi.
   1353   __ JumpIfSmi(receiver, &slow);
   1354   // Get the map of the object.
   1355   __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   1356   // Check that the receiver does not require access checks and is not observed.
   1357   // The generic stub does not perform map checks or handle observed objects.
   1358   __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
   1359   __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded |
   1360                          1 << Map::kIsObserved));
   1361   __ Branch(&slow, ne, t0, Operand(zero_reg));
   1362   // Check if the object is a JS array or not.
   1363   __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
   1364   __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
   1365   // Check that the object is some kind of JSObject.
   1366   __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
   1367 
   1368   // Object case: Check key against length in the elements array.
   1369   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1370   // Check array bounds. Both the key and the length of FixedArray are smis.
   1371   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
   1372   __ Branch(&fast_object, lo, key, Operand(t0));
   1373 
   1374   // Slow case, handle jump to runtime.
   1375   __ bind(&slow);
   1376   // Entry registers are intact.
   1377   // a0: value.
   1378   // a1: key.
   1379   // a2: receiver.
   1380   GenerateRuntimeSetProperty(masm, strict_mode);
   1381 
   1382   // Extra capacity case: Check if there is extra capacity to
   1383   // perform the store and update the length. Used for adding one
   1384   // element to the array by writing to array[array.length].
   1385   __ bind(&extra);
   1386   // Condition code from comparing key and array length is still available.
   1387   // Only support writing to array[array.length].
   1388   __ Branch(&slow, ne, key, Operand(t0));
   1389   // Check for room in the elements backing store.
   1390   // Both the key and the length of FixedArray are smis.
   1391   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
   1392   __ Branch(&slow, hs, key, Operand(t0));
   1393   __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
   1394   __ Branch(
   1395       &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
   1396 
   1397   __ jmp(&fast_object_grow);
   1398 
   1399   __ bind(&check_if_double_array);
   1400   __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
   1401   __ jmp(&fast_double_grow);
   1402 
   1403   // Array case: Get the length and the elements array from the JS
   1404   // array. Check that the array is in fast mode (and writable); if it
   1405   // is the length is always a smi.
   1406   __ bind(&array);
   1407   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   1408 
   1409   // Check the key against the length in the array.
   1410   __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
   1411   __ Branch(&extra, hs, key, Operand(t0));
   1412 
   1413   KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
   1414                                   &slow, kCheckMap, kDontIncrementLength,
   1415                                   value, key, receiver, receiver_map,
   1416                                   elements_map, elements);
   1417   KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
   1418                                   &slow, kDontCheckMap, kIncrementLength,
   1419                                   value, key, receiver, receiver_map,
   1420                                   elements_map, elements);
   1421 }
   1422 
   1423 
   1424 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
   1425   // ---------- S t a t e --------------
   1426   //  -- ra     : return address
   1427   //  -- a0     : key
   1428   //  -- a1     : receiver
   1429   // -----------------------------------
   1430   Label slow;
   1431 
   1432   // Check that the receiver isn't a smi.
   1433   __ JumpIfSmi(a1, &slow);
   1434 
   1435   // Check that the key is an array index, that is Uint32.
   1436   __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
   1437   __ Branch(&slow, ne, t0, Operand(zero_reg));
   1438 
   1439   // Get the map of the receiver.
   1440   __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
   1441 
   1442   // Check that it has indexed interceptor and access checks
   1443   // are not enabled for this object.
   1444   __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
   1445   __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
   1446   __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
   1447   // Everything is fine, call runtime.
   1448   __ Push(a1, a0);  // Receiver, key.
   1449 
   1450   // Perform tail call to the entry.
   1451   __ TailCallExternalReference(ExternalReference(
   1452        IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
   1453 
   1454   __ bind(&slow);
   1455   GenerateMiss(masm);
   1456 }
   1457 
   1458 
   1459 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   1460   // ---------- S t a t e --------------
   1461   //  -- a0     : value
   1462   //  -- a1     : key
   1463   //  -- a2     : receiver
   1464   //  -- ra     : return address
   1465   // -----------------------------------
   1466 
   1467   // Push receiver, key and value for runtime call.
   1468   __ Push(a2, a1, a0);
   1469 
   1470   ExternalReference ref =
   1471       ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
   1472   __ TailCallExternalReference(ref, 3, 1);
   1473 }
   1474 
   1475 
   1476 void StoreIC::GenerateSlow(MacroAssembler* masm) {
   1477   // ---------- S t a t e --------------
   1478   //  -- a0     : value
   1479   //  -- a2     : key
   1480   //  -- a1     : receiver
   1481   //  -- ra     : return address
   1482   // -----------------------------------
   1483 
   1484   // Push receiver, key and value for runtime call.
   1485   __ Push(a1, a2, a0);
   1486 
   1487   // The slow case calls into the runtime to complete the store without causing
   1488   // an IC miss that would otherwise cause a transition to the generic stub.
   1489   ExternalReference ref =
   1490       ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
   1491   __ TailCallExternalReference(ref, 3, 1);
   1492 }
   1493 
   1494 
   1495 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
   1496   // ---------- S t a t e --------------
   1497   //  -- a0     : value
   1498   //  -- a1     : key
   1499   //  -- a2     : receiver
   1500   //  -- ra     : return address
   1501   // -----------------------------------
   1502 
   1503   // Push receiver, key and value for runtime call.
   1504   // We can't use MultiPush as the order of the registers is important.
   1505   __ Push(a2, a1, a0);
   1506 
   1507   // The slow case calls into the runtime to complete the store without causing
   1508   // an IC miss that would otherwise cause a transition to the generic stub.
   1509   ExternalReference ref =
   1510       ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
   1511 
   1512   __ TailCallExternalReference(ref, 3, 1);
   1513 }
   1514 
   1515 
   1516 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
   1517                                   ExtraICState extra_ic_state) {
   1518   // ----------- S t a t e -------------
   1519   //  -- a0    : value
   1520   //  -- a1    : receiver
   1521   //  -- a2    : name
   1522   //  -- ra    : return address
   1523   // -----------------------------------
   1524 
   1525   // Get the receiver from the stack and probe the stub cache.
   1526   Code::Flags flags = Code::ComputeFlags(
   1527       Code::HANDLER, MONOMORPHIC, extra_ic_state,
   1528       Code::NORMAL, Code::STORE_IC);
   1529   masm->isolate()->stub_cache()->GenerateProbe(
   1530       masm, flags, a1, a2, a3, t0, t1, t2);
   1531 
   1532   // Cache miss: Jump to runtime.
   1533   GenerateMiss(masm);
   1534 }
   1535 
   1536 
   1537 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   1538   // ----------- S t a t e -------------
   1539   //  -- a0    : value
   1540   //  -- a1    : receiver
   1541   //  -- a2    : name
   1542   //  -- ra    : return address
   1543   // -----------------------------------
   1544 
   1545   __ Push(a1, a2, a0);
   1546   // Perform tail call to the entry.
   1547   ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
   1548                                             masm->isolate());
   1549   __ TailCallExternalReference(ref, 3, 1);
   1550 }
   1551 
   1552 
   1553 void StoreIC::GenerateNormal(MacroAssembler* masm) {
   1554   // ----------- S t a t e -------------
   1555   //  -- a0    : value
   1556   //  -- a1    : receiver
   1557   //  -- a2    : name
   1558   //  -- ra    : return address
   1559   // -----------------------------------
   1560   Label miss;
   1561 
   1562   GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
   1563 
   1564   GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
   1565   Counters* counters = masm->isolate()->counters();
   1566   __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
   1567   __ Ret();
   1568 
   1569   __ bind(&miss);
   1570   __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
   1571   GenerateMiss(masm);
   1572 }
   1573 
   1574 
   1575 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
   1576                                          StrictModeFlag strict_mode) {
   1577   // ----------- S t a t e -------------
   1578   //  -- a0    : value
   1579   //  -- a1    : receiver
   1580   //  -- a2    : name
   1581   //  -- ra    : return address
   1582   // -----------------------------------
   1583 
   1584   __ Push(a1, a2, a0);
   1585 
   1586   __ li(a1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
   1587   __ li(a0, Operand(Smi::FromInt(strict_mode)));
   1588   __ Push(a1, a0);
   1589 
   1590   // Do tail-call to runtime routine.
   1591   __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
   1592 }
   1593 
   1594 
   1595 #undef __
   1596 
   1597 
   1598 Condition CompareIC::ComputeCondition(Token::Value op) {
   1599   switch (op) {
   1600     case Token::EQ_STRICT:
   1601     case Token::EQ:
   1602       return eq;
   1603     case Token::LT:
   1604       return lt;
   1605     case Token::GT:
   1606       return gt;
   1607     case Token::LTE:
   1608       return le;
   1609     case Token::GTE:
   1610       return ge;
   1611     default:
   1612       UNREACHABLE();
   1613       return kNoCondition;
   1614   }
   1615 }
   1616 
   1617 
   1618 bool CompareIC::HasInlinedSmiCode(Address address) {
   1619   // The address of the instruction following the call.
   1620   Address andi_instruction_address =
   1621       address + Assembler::kCallTargetAddressOffset;
   1622 
   1623   // If the instruction following the call is not a andi at, rx, #yyy, nothing
   1624   // was inlined.
   1625   Instr instr = Assembler::instr_at(andi_instruction_address);
   1626   return Assembler::IsAndImmediate(instr) &&
   1627       Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
   1628 }
   1629 
   1630 
   1631 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   1632   Address andi_instruction_address =
   1633       address + Assembler::kCallTargetAddressOffset;
   1634 
   1635   // If the instruction following the call is not a andi at, rx, #yyy, nothing
   1636   // was inlined.
   1637   Instr instr = Assembler::instr_at(andi_instruction_address);
   1638   if (!(Assembler::IsAndImmediate(instr) &&
   1639         Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
   1640     return;
   1641   }
   1642 
   1643   // The delta to the start of the map check instruction and the
   1644   // condition code uses at the patched jump.
   1645   int delta = Assembler::GetImmediate16(instr);
   1646   delta += Assembler::GetRs(instr) * kImm16Mask;
   1647   // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
   1648   // signals that nothing was inlined.
   1649   if (delta == 0) {
   1650     return;
   1651   }
   1652 
   1653   if (FLAG_trace_ic) {
   1654     PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
   1655            address, andi_instruction_address, delta);
   1656   }
   1657 
   1658   Address patch_address =
   1659       andi_instruction_address - delta * Instruction::kInstrSize;
   1660   Instr instr_at_patch = Assembler::instr_at(patch_address);
   1661   Instr branch_instr =
   1662       Assembler::instr_at(patch_address + Instruction::kInstrSize);
   1663   // This is patching a conditional "jump if not smi/jump if smi" site.
   1664   // Enabling by changing from
   1665   //   andi at, rx, 0
   1666   //   Branch <target>, eq, at, Operand(zero_reg)
   1667   // to:
   1668   //   andi at, rx, #kSmiTagMask
   1669   //   Branch <target>, ne, at, Operand(zero_reg)
   1670   // and vice-versa to be disabled again.
   1671   CodePatcher patcher(patch_address, 2);
   1672   Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
   1673   if (check == ENABLE_INLINED_SMI_CHECK) {
   1674     ASSERT(Assembler::IsAndImmediate(instr_at_patch));
   1675     ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
   1676     patcher.masm()->andi(at, reg, kSmiTagMask);
   1677   } else {
   1678     ASSERT(check == DISABLE_INLINED_SMI_CHECK);
   1679     ASSERT(Assembler::IsAndImmediate(instr_at_patch));
   1680     patcher.masm()->andi(at, reg, 0);
   1681   }
   1682   ASSERT(Assembler::IsBranch(branch_instr));
   1683   if (Assembler::IsBeq(branch_instr)) {
   1684     patcher.ChangeBranchCondition(ne);
   1685   } else {
   1686     ASSERT(Assembler::IsBne(branch_instr));
   1687     patcher.ChangeBranchCondition(eq);
   1688   }
   1689 }
   1690 
   1691 
   1692 } }  // namespace v8::internal
   1693 
   1694 #endif  // V8_TARGET_ARCH_MIPS
   1695