Home | History | Annotate | Download | only in mips
      1 // Copyright 2012 the V8 project authors. All rights reserved.7
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "src/crankshaft/mips/lithium-codegen-mips.h"
     29 
     30 #include "src/base/bits.h"
     31 #include "src/code-factory.h"
     32 #include "src/code-stubs.h"
     33 #include "src/crankshaft/hydrogen-osr.h"
     34 #include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
     35 #include "src/ic/ic.h"
     36 #include "src/ic/stub-cache.h"
     37 #include "src/profiler/cpu-profiler.h"
     38 
     39 
     40 namespace v8 {
     41 namespace internal {
     42 
     43 
     44 class SafepointGenerator final : public CallWrapper {
     45  public:
     46   SafepointGenerator(LCodeGen* codegen,
     47                      LPointerMap* pointers,
     48                      Safepoint::DeoptMode mode)
     49       : codegen_(codegen),
     50         pointers_(pointers),
     51         deopt_mode_(mode) { }
     52   virtual ~SafepointGenerator() {}
     53 
     54   void BeforeCall(int call_size) const override {}
     55 
     56   void AfterCall() const override {
     57     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     58   }
     59 
     60  private:
     61   LCodeGen* codegen_;
     62   LPointerMap* pointers_;
     63   Safepoint::DeoptMode deopt_mode_;
     64 };
     65 
     66 
     67 #define __ masm()->
     68 
     69 bool LCodeGen::GenerateCode() {
     70   LPhase phase("Z_Code generation", chunk());
     71   DCHECK(is_unused());
     72   status_ = GENERATING;
     73 
     74   // Open a frame scope to indicate that there is a frame on the stack.  The
     75   // NONE indicates that the scope shouldn't actually generate code to set up
     76   // the frame (that is done in GeneratePrologue).
     77   FrameScope frame_scope(masm_, StackFrame::NONE);
     78 
     79   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
     80          GenerateJumpTable() && GenerateSafepointTable();
     81 }
     82 
     83 
     84 void LCodeGen::FinishCode(Handle<Code> code) {
     85   DCHECK(is_done());
     86   code->set_stack_slots(GetStackSlotCount());
     87   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     88   PopulateDeoptimizationData(code);
     89 }
     90 
     91 
     92 void LCodeGen::SaveCallerDoubles() {
     93   DCHECK(info()->saves_caller_doubles());
     94   DCHECK(NeedsEagerFrame());
     95   Comment(";;; Save clobbered callee double registers");
     96   int count = 0;
     97   BitVector* doubles = chunk()->allocated_double_registers();
     98   BitVector::Iterator save_iterator(doubles);
     99   while (!save_iterator.Done()) {
    100     __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
    101             MemOperand(sp, count * kDoubleSize));
    102     save_iterator.Advance();
    103     count++;
    104   }
    105 }
    106 
    107 
    108 void LCodeGen::RestoreCallerDoubles() {
    109   DCHECK(info()->saves_caller_doubles());
    110   DCHECK(NeedsEagerFrame());
    111   Comment(";;; Restore clobbered callee double registers");
    112   BitVector* doubles = chunk()->allocated_double_registers();
    113   BitVector::Iterator save_iterator(doubles);
    114   int count = 0;
    115   while (!save_iterator.Done()) {
    116     __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
    117             MemOperand(sp, count * kDoubleSize));
    118     save_iterator.Advance();
    119     count++;
    120   }
    121 }
    122 
    123 
    124 bool LCodeGen::GeneratePrologue() {
    125   DCHECK(is_generating());
    126 
    127   if (info()->IsOptimizing()) {
    128     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    129 
    130 #ifdef DEBUG
    131     if (strlen(FLAG_stop_at) > 0 &&
    132         info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    133       __ stop("stop_at");
    134     }
    135 #endif
    136 
    137     // a1: Callee's JS function.
    138     // cp: Callee's context.
    139     // fp: Caller's frame pointer.
    140     // lr: Caller's pc.
    141   }
    142 
    143   info()->set_prologue_offset(masm_->pc_offset());
    144   if (NeedsEagerFrame()) {
    145     if (info()->IsStub()) {
    146       __ StubPrologue();
    147     } else {
    148       __ Prologue(info()->GeneratePreagedPrologue());
    149     }
    150     frame_is_built_ = true;
    151   }
    152 
    153   // Reserve space for the stack slots needed by the code.
    154   int slots = GetStackSlotCount();
    155   if (slots > 0) {
    156     if (FLAG_debug_code) {
    157       __ Subu(sp,  sp, Operand(slots * kPointerSize));
    158       __ Push(a0, a1);
    159       __ Addu(a0, sp, Operand(slots *  kPointerSize));
    160       __ li(a1, Operand(kSlotsZapValue));
    161       Label loop;
    162       __ bind(&loop);
    163       __ Subu(a0, a0, Operand(kPointerSize));
    164       __ sw(a1, MemOperand(a0, 2 * kPointerSize));
    165       __ Branch(&loop, ne, a0, Operand(sp));
    166       __ Pop(a0, a1);
    167     } else {
    168       __ Subu(sp, sp, Operand(slots * kPointerSize));
    169     }
    170   }
    171 
    172   if (info()->saves_caller_doubles()) {
    173     SaveCallerDoubles();
    174   }
    175   return !is_aborted();
    176 }
    177 
    178 
    179 void LCodeGen::DoPrologue(LPrologue* instr) {
    180   Comment(";;; Prologue begin");
    181 
    182   // Possibly allocate a local context.
    183   if (info()->scope()->num_heap_slots() > 0) {
    184     Comment(";;; Allocate local context");
    185     bool need_write_barrier = true;
    186     // Argument to NewContext is the function, which is in a1.
    187     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    188     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    189     if (info()->scope()->is_script_scope()) {
    190       __ push(a1);
    191       __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
    192       __ CallRuntime(Runtime::kNewScriptContext);
    193       deopt_mode = Safepoint::kLazyDeopt;
    194     } else if (slots <= FastNewContextStub::kMaximumSlots) {
    195       FastNewContextStub stub(isolate(), slots);
    196       __ CallStub(&stub);
    197       // Result of FastNewContextStub is always in new space.
    198       need_write_barrier = false;
    199     } else {
    200       __ push(a1);
    201       __ CallRuntime(Runtime::kNewFunctionContext);
    202     }
    203     RecordSafepoint(deopt_mode);
    204 
    205     // Context is returned in both v0. It replaces the context passed to us.
    206     // It's saved in the stack and kept live in cp.
    207     __ mov(cp, v0);
    208     __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
    209     // Copy any necessary parameters into the context.
    210     int num_parameters = scope()->num_parameters();
    211     int first_parameter = scope()->has_this_declaration() ? -1 : 0;
    212     for (int i = first_parameter; i < num_parameters; i++) {
    213       Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
    214       if (var->IsContextSlot()) {
    215         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    216             (num_parameters - 1 - i) * kPointerSize;
    217         // Load parameter from stack.
    218         __ lw(a0, MemOperand(fp, parameter_offset));
    219         // Store it in the context.
    220         MemOperand target = ContextMemOperand(cp, var->index());
    221         __ sw(a0, target);
    222         // Update the write barrier. This clobbers a3 and a0.
    223         if (need_write_barrier) {
    224           __ RecordWriteContextSlot(
    225               cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
    226         } else if (FLAG_debug_code) {
    227           Label done;
    228           __ JumpIfInNewSpace(cp, a0, &done);
    229           __ Abort(kExpectedNewSpaceObject);
    230           __ bind(&done);
    231         }
    232       }
    233     }
    234     Comment(";;; End allocate local context");
    235   }
    236 
    237   Comment(";;; Prologue end");
    238 }
    239 
    240 
    241 void LCodeGen::GenerateOsrPrologue() {
    242   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    243   // are none, at the OSR entrypoint instruction.
    244   if (osr_pc_offset_ >= 0) return;
    245 
    246   osr_pc_offset_ = masm()->pc_offset();
    247 
    248   // Adjust the frame size, subsuming the unoptimized frame into the
    249   // optimized frame.
    250   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    251   DCHECK(slots >= 0);
    252   __ Subu(sp, sp, Operand(slots * kPointerSize));
    253 }
    254 
    255 
    256 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    257   if (instr->IsCall()) {
    258     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    259   }
    260   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    261     safepoints_.BumpLastLazySafepointIndex();
    262   }
    263 }
    264 
    265 
    266 bool LCodeGen::GenerateDeferredCode() {
    267   DCHECK(is_generating());
    268   if (deferred_.length() > 0) {
    269     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    270       LDeferredCode* code = deferred_[i];
    271 
    272       HValue* value =
    273           instructions_->at(code->instruction_index())->hydrogen_value();
    274       RecordAndWritePosition(
    275           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
    276 
    277       Comment(";;; <@%d,#%d> "
    278               "-------------------- Deferred %s --------------------",
    279               code->instruction_index(),
    280               code->instr()->hydrogen_value()->id(),
    281               code->instr()->Mnemonic());
    282       __ bind(code->entry());
    283       if (NeedsDeferredFrame()) {
    284         Comment(";;; Build frame");
    285         DCHECK(!frame_is_built_);
    286         DCHECK(info()->IsStub());
    287         frame_is_built_ = true;
    288         __ MultiPush(cp.bit() | fp.bit() | ra.bit());
    289         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
    290         __ push(scratch0());
    291         __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    292         Comment(";;; Deferred code");
    293       }
    294       code->Generate();
    295       if (NeedsDeferredFrame()) {
    296         Comment(";;; Destroy frame");
    297         DCHECK(frame_is_built_);
    298         __ pop(at);
    299         __ MultiPop(cp.bit() | fp.bit() | ra.bit());
    300         frame_is_built_ = false;
    301       }
    302       __ jmp(code->exit());
    303     }
    304   }
    305   // Deferred code is the last part of the instruction sequence. Mark
    306   // the generated code as done unless we bailed out.
    307   if (!is_aborted()) status_ = DONE;
    308   return !is_aborted();
    309 }
    310 
    311 
    312 bool LCodeGen::GenerateJumpTable() {
    313   if (jump_table_.length() > 0) {
    314     Label needs_frame, call_deopt_entry;
    315 
    316     Comment(";;; -------------------- Jump table --------------------");
    317     Address base = jump_table_[0].address;
    318 
    319     Register entry_offset = t9;
    320 
    321     int length = jump_table_.length();
    322     for (int i = 0; i < length; i++) {
    323       Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
    324       __ bind(&table_entry->label);
    325 
    326       DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
    327       Address entry = table_entry->address;
    328       DeoptComment(table_entry->deopt_info);
    329 
    330       // Second-level deopt table entries are contiguous and small, so instead
    331       // of loading the full, absolute address of each one, load an immediate
    332       // offset which will be added to the base address later.
    333       __ li(entry_offset, Operand(entry - base));
    334 
    335       if (table_entry->needs_frame) {
    336         DCHECK(!info()->saves_caller_doubles());
    337         Comment(";;; call deopt with frame");
    338         __ MultiPush(cp.bit() | fp.bit() | ra.bit());
    339         __ Call(&needs_frame);
    340       } else {
    341         __ Call(&call_deopt_entry);
    342       }
    343       info()->LogDeoptCallPosition(masm()->pc_offset(),
    344                                    table_entry->deopt_info.inlining_id);
    345     }
    346 
    347     if (needs_frame.is_linked()) {
    348       __ bind(&needs_frame);
    349       // This variant of deopt can only be used with stubs. Since we don't
    350       // have a function pointer to install in the stack frame that we're
    351       // building, install a special marker there instead.
    352       DCHECK(info()->IsStub());
    353       __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
    354       __ push(at);
    355       __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    356     }
    357 
    358     Comment(";;; call deopt");
    359     __ bind(&call_deopt_entry);
    360 
    361     if (info()->saves_caller_doubles()) {
    362       DCHECK(info()->IsStub());
    363       RestoreCallerDoubles();
    364     }
    365 
    366     // Add the base address to the offset previously loaded in entry_offset.
    367     __ Addu(entry_offset, entry_offset,
    368             Operand(ExternalReference::ForDeoptEntry(base)));
    369     __ Jump(entry_offset);
    370   }
    371   __ RecordComment("]");
    372 
    373   // The deoptimization jump table is the last part of the instruction
    374   // sequence. Mark the generated code as done unless we bailed out.
    375   if (!is_aborted()) status_ = DONE;
    376   return !is_aborted();
    377 }
    378 
    379 
    380 bool LCodeGen::GenerateSafepointTable() {
    381   DCHECK(is_done());
    382   safepoints_.Emit(masm(), GetStackSlotCount());
    383   return !is_aborted();
    384 }
    385 
    386 
    387 Register LCodeGen::ToRegister(int index) const {
    388   return Register::from_code(index);
    389 }
    390 
    391 
    392 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
    393   return DoubleRegister::from_code(index);
    394 }
    395 
    396 
    397 Register LCodeGen::ToRegister(LOperand* op) const {
    398   DCHECK(op->IsRegister());
    399   return ToRegister(op->index());
    400 }
    401 
    402 
    403 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    404   if (op->IsRegister()) {
    405     return ToRegister(op->index());
    406   } else if (op->IsConstantOperand()) {
    407     LConstantOperand* const_op = LConstantOperand::cast(op);
    408     HConstant* constant = chunk_->LookupConstant(const_op);
    409     Handle<Object> literal = constant->handle(isolate());
    410     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    411     if (r.IsInteger32()) {
    412       AllowDeferredHandleDereference get_number;
    413       DCHECK(literal->IsNumber());
    414       __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
    415     } else if (r.IsSmi()) {
    416       DCHECK(constant->HasSmiValue());
    417       __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
    418     } else if (r.IsDouble()) {
    419       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    420     } else {
    421       DCHECK(r.IsSmiOrTagged());
    422       __ li(scratch, literal);
    423     }
    424     return scratch;
    425   } else if (op->IsStackSlot()) {
    426     __ lw(scratch, ToMemOperand(op));
    427     return scratch;
    428   }
    429   UNREACHABLE();
    430   return scratch;
    431 }
    432 
    433 
    434 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    435   DCHECK(op->IsDoubleRegister());
    436   return ToDoubleRegister(op->index());
    437 }
    438 
    439 
    440 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
    441                                                 FloatRegister flt_scratch,
    442                                                 DoubleRegister dbl_scratch) {
    443   if (op->IsDoubleRegister()) {
    444     return ToDoubleRegister(op->index());
    445   } else if (op->IsConstantOperand()) {
    446     LConstantOperand* const_op = LConstantOperand::cast(op);
    447     HConstant* constant = chunk_->LookupConstant(const_op);
    448     Handle<Object> literal = constant->handle(isolate());
    449     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    450     if (r.IsInteger32()) {
    451       DCHECK(literal->IsNumber());
    452       __ li(at, Operand(static_cast<int32_t>(literal->Number())));
    453       __ mtc1(at, flt_scratch);
    454       __ cvt_d_w(dbl_scratch, flt_scratch);
    455       return dbl_scratch;
    456     } else if (r.IsDouble()) {
    457       Abort(kUnsupportedDoubleImmediate);
    458     } else if (r.IsTagged()) {
    459       Abort(kUnsupportedTaggedImmediate);
    460     }
    461   } else if (op->IsStackSlot()) {
    462     MemOperand mem_op = ToMemOperand(op);
    463     __ ldc1(dbl_scratch, mem_op);
    464     return dbl_scratch;
    465   }
    466   UNREACHABLE();
    467   return dbl_scratch;
    468 }
    469 
    470 
    471 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    472   HConstant* constant = chunk_->LookupConstant(op);
    473   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    474   return constant->handle(isolate());
    475 }
    476 
    477 
    478 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    479   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    480 }
    481 
    482 
    483 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    484   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    485 }
    486 
    487 
    488 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    489   return ToRepresentation(op, Representation::Integer32());
    490 }
    491 
    492 
    493 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
    494                                    const Representation& r) const {
    495   HConstant* constant = chunk_->LookupConstant(op);
    496   int32_t value = constant->Integer32Value();
    497   if (r.IsInteger32()) return value;
    498   DCHECK(r.IsSmiOrTagged());
    499   return reinterpret_cast<int32_t>(Smi::FromInt(value));
    500 }
    501 
    502 
    503 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    504   HConstant* constant = chunk_->LookupConstant(op);
    505   return Smi::FromInt(constant->Integer32Value());
    506 }
    507 
    508 
    509 double LCodeGen::ToDouble(LConstantOperand* op) const {
    510   HConstant* constant = chunk_->LookupConstant(op);
    511   DCHECK(constant->HasDoubleValue());
    512   return constant->DoubleValue();
    513 }
    514 
    515 
    516 Operand LCodeGen::ToOperand(LOperand* op) {
    517   if (op->IsConstantOperand()) {
    518     LConstantOperand* const_op = LConstantOperand::cast(op);
    519     HConstant* constant = chunk()->LookupConstant(const_op);
    520     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    521     if (r.IsSmi()) {
    522       DCHECK(constant->HasSmiValue());
    523       return Operand(Smi::FromInt(constant->Integer32Value()));
    524     } else if (r.IsInteger32()) {
    525       DCHECK(constant->HasInteger32Value());
    526       return Operand(constant->Integer32Value());
    527     } else if (r.IsDouble()) {
    528       Abort(kToOperandUnsupportedDoubleImmediate);
    529     }
    530     DCHECK(r.IsTagged());
    531     return Operand(constant->handle(isolate()));
    532   } else if (op->IsRegister()) {
    533     return Operand(ToRegister(op));
    534   } else if (op->IsDoubleRegister()) {
    535     Abort(kToOperandIsDoubleRegisterUnimplemented);
    536     return Operand(0);
    537   }
    538   // Stack slots not implemented, use ToMemOperand instead.
    539   UNREACHABLE();
    540   return Operand(0);
    541 }
    542 
    543 
    544 static int ArgumentsOffsetWithoutFrame(int index) {
    545   DCHECK(index < 0);
    546   return -(index + 1) * kPointerSize;
    547 }
    548 
    549 
    550 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    551   DCHECK(!op->IsRegister());
    552   DCHECK(!op->IsDoubleRegister());
    553   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    554   if (NeedsEagerFrame()) {
    555     return MemOperand(fp, StackSlotOffset(op->index()));
    556   } else {
    557     // Retrieve parameter without eager stack-frame relative to the
    558     // stack-pointer.
    559     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    560   }
    561 }
    562 
    563 
    564 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    565   DCHECK(op->IsDoubleStackSlot());
    566   if (NeedsEagerFrame()) {
    567     return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
    568   } else {
    569     // Retrieve parameter without eager stack-frame relative to the
    570     // stack-pointer.
    571     return MemOperand(
    572         sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    573   }
    574 }
    575 
    576 
    577 void LCodeGen::WriteTranslation(LEnvironment* environment,
    578                                 Translation* translation) {
    579   if (environment == NULL) return;
    580 
    581   // The translation includes one command per value in the environment.
    582   int translation_size = environment->translation_size();
    583 
    584   WriteTranslation(environment->outer(), translation);
    585   WriteTranslationFrame(environment, translation);
    586 
    587   int object_index = 0;
    588   int dematerialized_index = 0;
    589   for (int i = 0; i < translation_size; ++i) {
    590     LOperand* value = environment->values()->at(i);
    591     AddToTranslation(
    592         environment, translation, value, environment->HasTaggedValueAt(i),
    593         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    594   }
    595 }
    596 
    597 
    598 void LCodeGen::AddToTranslation(LEnvironment* environment,
    599                                 Translation* translation,
    600                                 LOperand* op,
    601                                 bool is_tagged,
    602                                 bool is_uint32,
    603                                 int* object_index_pointer,
    604                                 int* dematerialized_index_pointer) {
    605   if (op == LEnvironment::materialization_marker()) {
    606     int object_index = (*object_index_pointer)++;
    607     if (environment->ObjectIsDuplicateAt(object_index)) {
    608       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    609       translation->DuplicateObject(dupe_of);
    610       return;
    611     }
    612     int object_length = environment->ObjectLengthAt(object_index);
    613     if (environment->ObjectIsArgumentsAt(object_index)) {
    614       translation->BeginArgumentsObject(object_length);
    615     } else {
    616       translation->BeginCapturedObject(object_length);
    617     }
    618     int dematerialized_index = *dematerialized_index_pointer;
    619     int env_offset = environment->translation_size() + dematerialized_index;
    620     *dematerialized_index_pointer += object_length;
    621     for (int i = 0; i < object_length; ++i) {
    622       LOperand* value = environment->values()->at(env_offset + i);
    623       AddToTranslation(environment,
    624                        translation,
    625                        value,
    626                        environment->HasTaggedValueAt(env_offset + i),
    627                        environment->HasUint32ValueAt(env_offset + i),
    628                        object_index_pointer,
    629                        dematerialized_index_pointer);
    630     }
    631     return;
    632   }
    633 
    634   if (op->IsStackSlot()) {
    635     int index = op->index();
    636     if (index >= 0) {
    637       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
    638     }
    639     if (is_tagged) {
    640       translation->StoreStackSlot(index);
    641     } else if (is_uint32) {
    642       translation->StoreUint32StackSlot(index);
    643     } else {
    644       translation->StoreInt32StackSlot(index);
    645     }
    646   } else if (op->IsDoubleStackSlot()) {
    647     int index = op->index();
    648     if (index >= 0) {
    649       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
    650     }
    651     translation->StoreDoubleStackSlot(index);
    652   } else if (op->IsRegister()) {
    653     Register reg = ToRegister(op);
    654     if (is_tagged) {
    655       translation->StoreRegister(reg);
    656     } else if (is_uint32) {
    657       translation->StoreUint32Register(reg);
    658     } else {
    659       translation->StoreInt32Register(reg);
    660     }
    661   } else if (op->IsDoubleRegister()) {
    662     DoubleRegister reg = ToDoubleRegister(op);
    663     translation->StoreDoubleRegister(reg);
    664   } else if (op->IsConstantOperand()) {
    665     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    666     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    667     translation->StoreLiteral(src_index);
    668   } else {
    669     UNREACHABLE();
    670   }
    671 }
    672 
    673 
    674 void LCodeGen::CallCode(Handle<Code> code,
    675                         RelocInfo::Mode mode,
    676                         LInstruction* instr) {
    677   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    678 }
    679 
    680 
    681 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    682                                RelocInfo::Mode mode,
    683                                LInstruction* instr,
    684                                SafepointMode safepoint_mode) {
    685   DCHECK(instr != NULL);
    686   __ Call(code, mode);
    687   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    688 }
    689 
    690 
    691 void LCodeGen::CallRuntime(const Runtime::Function* function,
    692                            int num_arguments,
    693                            LInstruction* instr,
    694                            SaveFPRegsMode save_doubles) {
    695   DCHECK(instr != NULL);
    696 
    697   __ CallRuntime(function, num_arguments, save_doubles);
    698 
    699   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    700 }
    701 
    702 
    703 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    704   if (context->IsRegister()) {
    705     __ Move(cp, ToRegister(context));
    706   } else if (context->IsStackSlot()) {
    707     __ lw(cp, ToMemOperand(context));
    708   } else if (context->IsConstantOperand()) {
    709     HConstant* constant =
    710         chunk_->LookupConstant(LConstantOperand::cast(context));
    711     __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
    712   } else {
    713     UNREACHABLE();
    714   }
    715 }
    716 
    717 
    718 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    719                                        int argc,
    720                                        LInstruction* instr,
    721                                        LOperand* context) {
    722   LoadContextFromDeferred(context);
    723   __ CallRuntimeSaveDoubles(id);
    724   RecordSafepointWithRegisters(
    725       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    726 }
    727 
    728 
    729 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    730                                                     Safepoint::DeoptMode mode) {
    731   environment->set_has_been_used();
    732   if (!environment->HasBeenRegistered()) {
    733     // Physical stack frame layout:
    734     // -x ............. -4  0 ..................................... y
    735     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    736 
    737     // Layout of the environment:
    738     // 0 ..................................................... size-1
    739     // [parameters] [locals] [expression stack including arguments]
    740 
    741     // Layout of the translation:
    742     // 0 ........................................................ size - 1 + 4
    743     // [expression stack including arguments] [locals] [4 words] [parameters]
    744     // |>------------  translation_size ------------<|
    745 
    746     int frame_count = 0;
    747     int jsframe_count = 0;
    748     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    749       ++frame_count;
    750       if (e->frame_type() == JS_FUNCTION) {
    751         ++jsframe_count;
    752       }
    753     }
    754     Translation translation(&translations_, frame_count, jsframe_count, zone());
    755     WriteTranslation(environment, &translation);
    756     int deoptimization_index = deoptimizations_.length();
    757     int pc_offset = masm()->pc_offset();
    758     environment->Register(deoptimization_index,
    759                           translation.index(),
    760                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    761     deoptimizations_.Add(environment, zone());
    762   }
    763 }
    764 
    765 
    766 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
    767                             Deoptimizer::DeoptReason deopt_reason,
    768                             Deoptimizer::BailoutType bailout_type,
    769                             Register src1, const Operand& src2) {
    770   LEnvironment* environment = instr->environment();
    771   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    772   DCHECK(environment->HasBeenRegistered());
    773   int id = environment->deoptimization_index();
    774   Address entry =
    775       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    776   if (entry == NULL) {
    777     Abort(kBailoutWasNotPrepared);
    778     return;
    779   }
    780 
    781   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    782     Register scratch = scratch0();
    783     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    784     Label no_deopt;
    785     __ Push(a1, scratch);
    786     __ li(scratch, Operand(count));
    787     __ lw(a1, MemOperand(scratch));
    788     __ Subu(a1, a1, Operand(1));
    789     __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
    790     __ li(a1, Operand(FLAG_deopt_every_n_times));
    791     __ sw(a1, MemOperand(scratch));
    792     __ Pop(a1, scratch);
    793 
    794     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    795     __ bind(&no_deopt);
    796     __ sw(a1, MemOperand(scratch));
    797     __ Pop(a1, scratch);
    798   }
    799 
    800   if (info()->ShouldTrapOnDeopt()) {
    801     Label skip;
    802     if (condition != al) {
    803       __ Branch(&skip, NegateCondition(condition), src1, src2);
    804     }
    805     __ stop("trap_on_deopt");
    806     __ bind(&skip);
    807   }
    808 
    809   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
    810 
    811   DCHECK(info()->IsStub() || frame_is_built_);
    812   // Go through jump table if we need to handle condition, build frame, or
    813   // restore caller doubles.
    814   if (condition == al && frame_is_built_ &&
    815       !info()->saves_caller_doubles()) {
    816     DeoptComment(deopt_info);
    817     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
    818     info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
    819   } else {
    820     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
    821                                             !frame_is_built_);
    822     // We often have several deopts to the same entry, reuse the last
    823     // jump entry if this is the case.
    824     if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
    825         jump_table_.is_empty() ||
    826         !table_entry.IsEquivalentTo(jump_table_.last())) {
    827       jump_table_.Add(table_entry, zone());
    828     }
    829     __ Branch(&jump_table_.last().label, condition, src1, src2);
    830   }
    831 }
    832 
    833 
    834 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
    835                             Deoptimizer::DeoptReason deopt_reason,
    836                             Register src1, const Operand& src2) {
    837   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    838       ? Deoptimizer::LAZY
    839       : Deoptimizer::EAGER;
    840   DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
    841 }
    842 
    843 
    844 void LCodeGen::RecordSafepointWithLazyDeopt(
    845     LInstruction* instr, SafepointMode safepoint_mode) {
    846   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    847     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    848   } else {
    849     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    850     RecordSafepointWithRegisters(
    851         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    852   }
    853 }
    854 
    855 
    856 void LCodeGen::RecordSafepoint(
    857     LPointerMap* pointers,
    858     Safepoint::Kind kind,
    859     int arguments,
    860     Safepoint::DeoptMode deopt_mode) {
    861   DCHECK(expected_safepoint_kind_ == kind);
    862 
    863   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    864   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
    865       kind, arguments, deopt_mode);
    866   for (int i = 0; i < operands->length(); i++) {
    867     LOperand* pointer = operands->at(i);
    868     if (pointer->IsStackSlot()) {
    869       safepoint.DefinePointerSlot(pointer->index(), zone());
    870     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    871       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    872     }
    873   }
    874 }
    875 
    876 
    877 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    878                                Safepoint::DeoptMode deopt_mode) {
    879   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    880 }
    881 
    882 
    883 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    884   LPointerMap empty_pointers(zone());
    885   RecordSafepoint(&empty_pointers, deopt_mode);
    886 }
    887 
    888 
    889 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    890                                             int arguments,
    891                                             Safepoint::DeoptMode deopt_mode) {
    892   RecordSafepoint(
    893       pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    894 }
    895 
    896 
    897 void LCodeGen::RecordAndWritePosition(int position) {
    898   if (position == RelocInfo::kNoPosition) return;
    899   masm()->positions_recorder()->RecordPosition(position);
    900   masm()->positions_recorder()->WriteRecordedPositions();
    901 }
    902 
    903 
    904 static const char* LabelType(LLabel* label) {
    905   if (label->is_loop_header()) return " (loop header)";
    906   if (label->is_osr_entry()) return " (OSR entry)";
    907   return "";
    908 }
    909 
    910 
    911 void LCodeGen::DoLabel(LLabel* label) {
    912   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    913           current_instruction_,
    914           label->hydrogen_value()->id(),
    915           label->block_id(),
    916           LabelType(label));
    917   __ bind(label->label());
    918   current_block_ = label->block_id();
    919   DoGap(label);
    920 }
    921 
    922 
    923 void LCodeGen::DoParallelMove(LParallelMove* move) {
    924   resolver_.Resolve(move);
    925 }
    926 
    927 
    928 void LCodeGen::DoGap(LGap* gap) {
    929   for (int i = LGap::FIRST_INNER_POSITION;
    930        i <= LGap::LAST_INNER_POSITION;
    931        i++) {
    932     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    933     LParallelMove* move = gap->GetParallelMove(inner_pos);
    934     if (move != NULL) DoParallelMove(move);
    935   }
    936 }
    937 
    938 
    939 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
    940   DoGap(instr);
    941 }
    942 
    943 
    944 void LCodeGen::DoParameter(LParameter* instr) {
    945   // Nothing to do.
    946 }
    947 
    948 
    949 void LCodeGen::DoCallStub(LCallStub* instr) {
    950   DCHECK(ToRegister(instr->context()).is(cp));
    951   DCHECK(ToRegister(instr->result()).is(v0));
    952   switch (instr->hydrogen()->major_key()) {
    953     case CodeStub::RegExpExec: {
    954       RegExpExecStub stub(isolate());
    955       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    956       break;
    957     }
    958     case CodeStub::SubString: {
    959       SubStringStub stub(isolate());
    960       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    961       break;
    962     }
    963     default:
    964       UNREACHABLE();
    965   }
    966 }
    967 
    968 
    969 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
    970   GenerateOsrPrologue();
    971 }
    972 
    973 
    974 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
    975   Register dividend = ToRegister(instr->dividend());
    976   int32_t divisor = instr->divisor();
    977   DCHECK(dividend.is(ToRegister(instr->result())));
    978 
    979   // Theoretically, a variation of the branch-free code for integer division by
    980   // a power of 2 (calculating the remainder via an additional multiplication
    981   // (which gets simplified to an 'and') and subtraction) should be faster, and
    982   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
    983   // indicate that positive dividends are heavily favored, so the branching
    984   // version performs better.
    985   HMod* hmod = instr->hydrogen();
    986   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
    987   Label dividend_is_not_negative, done;
    988 
    989   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
    990     __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
    991     // Note: The code below even works when right contains kMinInt.
    992     __ subu(dividend, zero_reg, dividend);
    993     __ And(dividend, dividend, Operand(mask));
    994     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    995       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
    996                    Operand(zero_reg));
    997     }
    998     __ Branch(USE_DELAY_SLOT, &done);
    999     __ subu(dividend, zero_reg, dividend);
   1000   }
   1001 
   1002   __ bind(&dividend_is_not_negative);
   1003   __ And(dividend, dividend, Operand(mask));
   1004   __ bind(&done);
   1005 }
   1006 
   1007 
   1008 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   1009   Register dividend = ToRegister(instr->dividend());
   1010   int32_t divisor = instr->divisor();
   1011   Register result = ToRegister(instr->result());
   1012   DCHECK(!dividend.is(result));
   1013 
   1014   if (divisor == 0) {
   1015     DeoptimizeIf(al, instr);
   1016     return;
   1017   }
   1018 
   1019   __ TruncatingDiv(result, dividend, Abs(divisor));
   1020   __ Mul(result, result, Operand(Abs(divisor)));
   1021   __ Subu(result, dividend, Operand(result));
   1022 
   1023   // Check for negative zero.
   1024   HMod* hmod = instr->hydrogen();
   1025   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1026     Label remainder_not_zero;
   1027     __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
   1028     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
   1029                  Operand(zero_reg));
   1030     __ bind(&remainder_not_zero);
   1031   }
   1032 }
   1033 
   1034 
   1035 void LCodeGen::DoModI(LModI* instr) {
   1036   HMod* hmod = instr->hydrogen();
   1037   const Register left_reg = ToRegister(instr->left());
   1038   const Register right_reg = ToRegister(instr->right());
   1039   const Register result_reg = ToRegister(instr->result());
   1040 
   1041   // div runs in the background while we check for special cases.
   1042   __ Mod(result_reg, left_reg, right_reg);
   1043 
   1044   Label done;
   1045   // Check for x % 0, we have to deopt in this case because we can't return a
   1046   // NaN.
   1047   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1048     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
   1049                  Operand(zero_reg));
   1050   }
   1051 
   1052   // Check for kMinInt % -1, div will return kMinInt, which is not what we
   1053   // want. We have to deopt if we care about -0, because we can't return that.
   1054   if (hmod->CheckFlag(HValue::kCanOverflow)) {
   1055     Label no_overflow_possible;
   1056     __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
   1057     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1058       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
   1059     } else {
   1060       __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
   1061       __ Branch(USE_DELAY_SLOT, &done);
   1062       __ mov(result_reg, zero_reg);
   1063     }
   1064     __ bind(&no_overflow_possible);
   1065   }
   1066 
   1067   // If we care about -0, test if the dividend is <0 and the result is 0.
   1068   __ Branch(&done, ge, left_reg, Operand(zero_reg));
   1069   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1070     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
   1071                  Operand(zero_reg));
   1072   }
   1073   __ bind(&done);
   1074 }
   1075 
   1076 
   1077 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1078   Register dividend = ToRegister(instr->dividend());
   1079   int32_t divisor = instr->divisor();
   1080   Register result = ToRegister(instr->result());
   1081   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   1082   DCHECK(!result.is(dividend));
   1083 
   1084   // Check for (0 / -x) that will produce negative zero.
   1085   HDiv* hdiv = instr->hydrogen();
   1086   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1087     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
   1088                  Operand(zero_reg));
   1089   }
   1090   // Check for (kMinInt / -1).
   1091   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1092     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
   1093   }
   1094   // Deoptimize if remainder will not be 0.
   1095   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   1096       divisor != 1 && divisor != -1) {
   1097     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1098     __ And(at, dividend, Operand(mask));
   1099     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
   1100   }
   1101 
   1102   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   1103     __ Subu(result, zero_reg, dividend);
   1104     return;
   1105   }
   1106   uint16_t shift = WhichPowerOf2Abs(divisor);
   1107   if (shift == 0) {
   1108     __ Move(result, dividend);
   1109   } else if (shift == 1) {
   1110     __ srl(result, dividend, 31);
   1111     __ Addu(result, dividend, Operand(result));
   1112   } else {
   1113     __ sra(result, dividend, 31);
   1114     __ srl(result, result, 32 - shift);
   1115     __ Addu(result, dividend, Operand(result));
   1116   }
   1117   if (shift > 0) __ sra(result, result, shift);
   1118   if (divisor < 0) __ Subu(result, zero_reg, result);
   1119 }
   1120 
   1121 
   1122 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1123   Register dividend = ToRegister(instr->dividend());
   1124   int32_t divisor = instr->divisor();
   1125   Register result = ToRegister(instr->result());
   1126   DCHECK(!dividend.is(result));
   1127 
   1128   if (divisor == 0) {
   1129     DeoptimizeIf(al, instr);
   1130     return;
   1131   }
   1132 
   1133   // Check for (0 / -x) that will produce negative zero.
   1134   HDiv* hdiv = instr->hydrogen();
   1135   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1136     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
   1137                  Operand(zero_reg));
   1138   }
   1139 
   1140   __ TruncatingDiv(result, dividend, Abs(divisor));
   1141   if (divisor < 0) __ Subu(result, zero_reg, result);
   1142 
   1143   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1144     __ Mul(scratch0(), result, Operand(divisor));
   1145     __ Subu(scratch0(), scratch0(), dividend);
   1146     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
   1147                  Operand(zero_reg));
   1148   }
   1149 }
   1150 
   1151 
   1152 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1153 void LCodeGen::DoDivI(LDivI* instr) {
   1154   HBinaryOperation* hdiv = instr->hydrogen();
   1155   Register dividend = ToRegister(instr->dividend());
   1156   Register divisor = ToRegister(instr->divisor());
   1157   const Register result = ToRegister(instr->result());
   1158   Register remainder = ToRegister(instr->temp());
   1159 
   1160   // On MIPS div is asynchronous - it will run in the background while we
   1161   // check for special cases.
   1162   __ Div(remainder, result, dividend, divisor);
   1163 
   1164   // Check for x / 0.
   1165   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1166     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
   1167                  Operand(zero_reg));
   1168   }
   1169 
   1170   // Check for (0 / -x) that will produce negative zero.
   1171   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1172     Label left_not_zero;
   1173     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
   1174     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
   1175                  Operand(zero_reg));
   1176     __ bind(&left_not_zero);
   1177   }
   1178 
   1179   // Check for (kMinInt / -1).
   1180   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1181       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1182     Label left_not_min_int;
   1183     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
   1184     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
   1185     __ bind(&left_not_min_int);
   1186   }
   1187 
   1188   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1189     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
   1190                  Operand(zero_reg));
   1191   }
   1192 }
   1193 
   1194 
   1195 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1196   DoubleRegister addend = ToDoubleRegister(instr->addend());
   1197   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1198   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1199 
   1200   // This is computed in-place.
   1201   DCHECK(addend.is(ToDoubleRegister(instr->result())));
   1202 
   1203   __ madd_d(addend, addend, multiplier, multiplicand);
   1204 }
   1205 
   1206 
   1207 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1208   Register dividend = ToRegister(instr->dividend());
   1209   Register result = ToRegister(instr->result());
   1210   int32_t divisor = instr->divisor();
   1211   Register scratch = result.is(dividend) ? scratch0() : dividend;
   1212   DCHECK(!result.is(dividend) || !scratch.is(dividend));
   1213 
   1214   // If the divisor is 1, return the dividend.
   1215   if (divisor == 1) {
   1216     __ Move(result, dividend);
   1217     return;
   1218   }
   1219 
   1220   // If the divisor is positive, things are easy: There can be no deopts and we
   1221   // can simply do an arithmetic right shift.
   1222   uint16_t shift = WhichPowerOf2Abs(divisor);
   1223   if (divisor > 1) {
   1224     __ sra(result, dividend, shift);
   1225     return;
   1226   }
   1227 
   1228   // If the divisor is negative, we have to negate and handle edge cases.
   1229 
   1230   // dividend can be the same register as result so save the value of it
   1231   // for checking overflow.
   1232   __ Move(scratch, dividend);
   1233 
   1234   __ Subu(result, zero_reg, dividend);
   1235   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1236     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
   1237   }
   1238 
   1239   // Dividing by -1 is basically negation, unless we overflow.
   1240   __ Xor(scratch, scratch, result);
   1241   if (divisor == -1) {
   1242     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1243       DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
   1244                    Operand(zero_reg));
   1245     }
   1246     return;
   1247   }
   1248 
   1249   // If the negation could not overflow, simply shifting is OK.
   1250   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1251     __ sra(result, result, shift);
   1252     return;
   1253   }
   1254 
   1255   Label no_overflow, done;
   1256   __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
   1257   __ li(result, Operand(kMinInt / divisor));
   1258   __ Branch(&done);
   1259   __ bind(&no_overflow);
   1260   __ sra(result, result, shift);
   1261   __ bind(&done);
   1262 }
   1263 
   1264 
   1265 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1266   Register dividend = ToRegister(instr->dividend());
   1267   int32_t divisor = instr->divisor();
   1268   Register result = ToRegister(instr->result());
   1269   DCHECK(!dividend.is(result));
   1270 
   1271   if (divisor == 0) {
   1272     DeoptimizeIf(al, instr);
   1273     return;
   1274   }
   1275 
   1276   // Check for (0 / -x) that will produce negative zero.
   1277   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1278   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1279     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
   1280                  Operand(zero_reg));
   1281   }
   1282 
   1283   // Easy case: We need no dynamic check for the dividend and the flooring
   1284   // division is the same as the truncating division.
   1285   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1286       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1287     __ TruncatingDiv(result, dividend, Abs(divisor));
   1288     if (divisor < 0) __ Subu(result, zero_reg, result);
   1289     return;
   1290   }
   1291 
   1292   // In the general case we may need to adjust before and after the truncating
   1293   // division to get a flooring division.
   1294   Register temp = ToRegister(instr->temp());
   1295   DCHECK(!temp.is(dividend) && !temp.is(result));
   1296   Label needs_adjustment, done;
   1297   __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
   1298             dividend, Operand(zero_reg));
   1299   __ TruncatingDiv(result, dividend, Abs(divisor));
   1300   if (divisor < 0) __ Subu(result, zero_reg, result);
   1301   __ jmp(&done);
   1302   __ bind(&needs_adjustment);
   1303   __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   1304   __ TruncatingDiv(result, temp, Abs(divisor));
   1305   if (divisor < 0) __ Subu(result, zero_reg, result);
   1306   __ Subu(result, result, Operand(1));
   1307   __ bind(&done);
   1308 }
   1309 
   1310 
   1311 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1312 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1313   HBinaryOperation* hdiv = instr->hydrogen();
   1314   Register dividend = ToRegister(instr->dividend());
   1315   Register divisor = ToRegister(instr->divisor());
   1316   const Register result = ToRegister(instr->result());
   1317   Register remainder = scratch0();
   1318   // On MIPS div is asynchronous - it will run in the background while we
   1319   // check for special cases.
   1320   __ Div(remainder, result, dividend, divisor);
   1321 
   1322   // Check for x / 0.
   1323   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1324     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
   1325                  Operand(zero_reg));
   1326   }
   1327 
   1328   // Check for (0 / -x) that will produce negative zero.
   1329   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1330     Label left_not_zero;
   1331     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
   1332     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
   1333                  Operand(zero_reg));
   1334     __ bind(&left_not_zero);
   1335   }
   1336 
   1337   // Check for (kMinInt / -1).
   1338   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1339       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1340     Label left_not_min_int;
   1341     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
   1342     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
   1343     __ bind(&left_not_min_int);
   1344   }
   1345 
   1346   // We performed a truncating division. Correct the result if necessary.
   1347   Label done;
   1348   __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
   1349   __ Xor(remainder, remainder, Operand(divisor));
   1350   __ Branch(&done, ge, remainder, Operand(zero_reg));
   1351   __ Subu(result, result, Operand(1));
   1352   __ bind(&done);
   1353 }
   1354 
   1355 
   1356 void LCodeGen::DoMulI(LMulI* instr) {
   1357   Register scratch = scratch0();
   1358   Register result = ToRegister(instr->result());
   1359   // Note that result may alias left.
   1360   Register left = ToRegister(instr->left());
   1361   LOperand* right_op = instr->right();
   1362 
   1363   bool bailout_on_minus_zero =
   1364     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1365   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1366 
   1367   if (right_op->IsConstantOperand()) {
   1368     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1369 
   1370     if (bailout_on_minus_zero && (constant < 0)) {
   1371       // The case of a null constant will be handled separately.
   1372       // If constant is negative and left is null, the result should be -0.
   1373       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
   1374     }
   1375 
   1376     switch (constant) {
   1377       case -1:
   1378         if (overflow) {
   1379           Label no_overflow;
   1380           __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
   1381           DeoptimizeIf(al, instr);
   1382           __ bind(&no_overflow);
   1383         } else {
   1384           __ Subu(result, zero_reg, left);
   1385         }
   1386         break;
   1387       case 0:
   1388         if (bailout_on_minus_zero) {
   1389           // If left is strictly negative and the constant is null, the
   1390           // result is -0. Deoptimize if required, otherwise return 0.
   1391           DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
   1392                        Operand(zero_reg));
   1393         }
   1394         __ mov(result, zero_reg);
   1395         break;
   1396       case 1:
   1397         // Nothing to do.
   1398         __ Move(result, left);
   1399         break;
   1400       default:
   1401         // Multiplying by powers of two and powers of two plus or minus
   1402         // one can be done faster with shifted operands.
   1403         // For other constants we emit standard code.
   1404         int32_t mask = constant >> 31;
   1405         uint32_t constant_abs = (constant + mask) ^ mask;
   1406 
   1407         if (base::bits::IsPowerOfTwo32(constant_abs)) {
   1408           int32_t shift = WhichPowerOf2(constant_abs);
   1409           __ sll(result, left, shift);
   1410           // Correct the sign of the result if the constant is negative.
   1411           if (constant < 0)  __ Subu(result, zero_reg, result);
   1412         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
   1413           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1414           __ sll(scratch, left, shift);
   1415           __ Addu(result, scratch, left);
   1416           // Correct the sign of the result if the constant is negative.
   1417           if (constant < 0)  __ Subu(result, zero_reg, result);
   1418         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
   1419           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1420           __ sll(scratch, left, shift);
   1421           __ Subu(result, scratch, left);
   1422           // Correct the sign of the result if the constant is negative.
   1423           if (constant < 0)  __ Subu(result, zero_reg, result);
   1424         } else {
   1425           // Generate standard code.
   1426           __ li(at, constant);
   1427           __ Mul(result, left, at);
   1428         }
   1429     }
   1430 
   1431   } else {
   1432     DCHECK(right_op->IsRegister());
   1433     Register right = ToRegister(right_op);
   1434 
   1435     if (overflow) {
   1436       // hi:lo = left * right.
   1437       if (instr->hydrogen()->representation().IsSmi()) {
   1438         __ SmiUntag(result, left);
   1439         __ Mul(scratch, result, result, right);
   1440       } else {
   1441         __ Mul(scratch, result, left, right);
   1442       }
   1443       __ sra(at, result, 31);
   1444       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
   1445     } else {
   1446       if (instr->hydrogen()->representation().IsSmi()) {
   1447         __ SmiUntag(result, left);
   1448         __ Mul(result, result, right);
   1449       } else {
   1450         __ Mul(result, left, right);
   1451       }
   1452     }
   1453 
   1454     if (bailout_on_minus_zero) {
   1455       Label done;
   1456       __ Xor(at, left, right);
   1457       __ Branch(&done, ge, at, Operand(zero_reg));
   1458       // Bail out if the result is minus zero.
   1459       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
   1460                    Operand(zero_reg));
   1461       __ bind(&done);
   1462     }
   1463   }
   1464 }
   1465 
   1466 
   1467 void LCodeGen::DoBitI(LBitI* instr) {
   1468   LOperand* left_op = instr->left();
   1469   LOperand* right_op = instr->right();
   1470   DCHECK(left_op->IsRegister());
   1471   Register left = ToRegister(left_op);
   1472   Register result = ToRegister(instr->result());
   1473   Operand right(no_reg);
   1474 
   1475   if (right_op->IsStackSlot()) {
   1476     right = Operand(EmitLoadRegister(right_op, at));
   1477   } else {
   1478     DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
   1479     right = ToOperand(right_op);
   1480   }
   1481 
   1482   switch (instr->op()) {
   1483     case Token::BIT_AND:
   1484       __ And(result, left, right);
   1485       break;
   1486     case Token::BIT_OR:
   1487       __ Or(result, left, right);
   1488       break;
   1489     case Token::BIT_XOR:
   1490       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
   1491         __ Nor(result, zero_reg, left);
   1492       } else {
   1493         __ Xor(result, left, right);
   1494       }
   1495       break;
   1496     default:
   1497       UNREACHABLE();
   1498       break;
   1499   }
   1500 }
   1501 
   1502 
   1503 void LCodeGen::DoShiftI(LShiftI* instr) {
   1504   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1505   // result may alias either of them.
   1506   LOperand* right_op = instr->right();
   1507   Register left = ToRegister(instr->left());
   1508   Register result = ToRegister(instr->result());
   1509   Register scratch = scratch0();
   1510 
   1511   if (right_op->IsRegister()) {
   1512     // No need to mask the right operand on MIPS, it is built into the variable
   1513     // shift instructions.
   1514     switch (instr->op()) {
   1515       case Token::ROR:
   1516         __ Ror(result, left, Operand(ToRegister(right_op)));
   1517         break;
   1518       case Token::SAR:
   1519         __ srav(result, left, ToRegister(right_op));
   1520         break;
   1521       case Token::SHR:
   1522         __ srlv(result, left, ToRegister(right_op));
   1523         if (instr->can_deopt()) {
   1524           DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
   1525                        Operand(zero_reg));
   1526         }
   1527         break;
   1528       case Token::SHL:
   1529         __ sllv(result, left, ToRegister(right_op));
   1530         break;
   1531       default:
   1532         UNREACHABLE();
   1533         break;
   1534     }
   1535   } else {
   1536     // Mask the right_op operand.
   1537     int value = ToInteger32(LConstantOperand::cast(right_op));
   1538     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1539     switch (instr->op()) {
   1540       case Token::ROR:
   1541         if (shift_count != 0) {
   1542           __ Ror(result, left, Operand(shift_count));
   1543         } else {
   1544           __ Move(result, left);
   1545         }
   1546         break;
   1547       case Token::SAR:
   1548         if (shift_count != 0) {
   1549           __ sra(result, left, shift_count);
   1550         } else {
   1551           __ Move(result, left);
   1552         }
   1553         break;
   1554       case Token::SHR:
   1555         if (shift_count != 0) {
   1556           __ srl(result, left, shift_count);
   1557         } else {
   1558           if (instr->can_deopt()) {
   1559             __ And(at, left, Operand(0x80000000));
   1560             DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
   1561                          Operand(zero_reg));
   1562           }
   1563           __ Move(result, left);
   1564         }
   1565         break;
   1566       case Token::SHL:
   1567         if (shift_count != 0) {
   1568           if (instr->hydrogen_value()->representation().IsSmi() &&
   1569               instr->can_deopt()) {
   1570             if (shift_count != 1) {
   1571               __ sll(result, left, shift_count - 1);
   1572               __ SmiTagCheckOverflow(result, result, scratch);
   1573             } else {
   1574               __ SmiTagCheckOverflow(result, left, scratch);
   1575             }
   1576             DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
   1577                          Operand(zero_reg));
   1578           } else {
   1579             __ sll(result, left, shift_count);
   1580           }
   1581         } else {
   1582           __ Move(result, left);
   1583         }
   1584         break;
   1585       default:
   1586         UNREACHABLE();
   1587         break;
   1588     }
   1589   }
   1590 }
   1591 
   1592 
   1593 void LCodeGen::DoSubI(LSubI* instr) {
   1594   LOperand* left = instr->left();
   1595   LOperand* right = instr->right();
   1596   LOperand* result = instr->result();
   1597   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1598 
   1599   if (!can_overflow) {
   1600     if (right->IsStackSlot()) {
   1601       Register right_reg = EmitLoadRegister(right, at);
   1602       __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
   1603     } else {
   1604       DCHECK(right->IsRegister() || right->IsConstantOperand());
   1605       __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
   1606     }
   1607   } else {  // can_overflow.
   1608     Register scratch = scratch0();
   1609     Label no_overflow_label;
   1610     if (right->IsStackSlot()) {
   1611       Register right_reg = EmitLoadRegister(right, scratch);
   1612       __ SubBranchNoOvf(ToRegister(result), ToRegister(left),
   1613                         Operand(right_reg), &no_overflow_label);
   1614     } else {
   1615       DCHECK(right->IsRegister() || right->IsConstantOperand());
   1616       __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
   1617                         &no_overflow_label, scratch);
   1618     }
   1619     DeoptimizeIf(al, instr);
   1620     __ bind(&no_overflow_label);
   1621   }
   1622 }
   1623 
   1624 
   1625 void LCodeGen::DoConstantI(LConstantI* instr) {
   1626   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1627 }
   1628 
   1629 
   1630 void LCodeGen::DoConstantS(LConstantS* instr) {
   1631   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1632 }
   1633 
   1634 
   1635 void LCodeGen::DoConstantD(LConstantD* instr) {
   1636   DCHECK(instr->result()->IsDoubleRegister());
   1637   DoubleRegister result = ToDoubleRegister(instr->result());
   1638   double v = instr->value();
   1639   __ Move(result, v);
   1640 }
   1641 
   1642 
   1643 void LCodeGen::DoConstantE(LConstantE* instr) {
   1644   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1645 }
   1646 
   1647 
   1648 void LCodeGen::DoConstantT(LConstantT* instr) {
   1649   Handle<Object> object = instr->value(isolate());
   1650   AllowDeferredHandleDereference smi_check;
   1651   __ li(ToRegister(instr->result()), object);
   1652 }
   1653 
   1654 
   1655 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1656   Register result = ToRegister(instr->result());
   1657   Register map = ToRegister(instr->value());
   1658   __ EnumLength(result, map);
   1659 }
   1660 
   1661 
   1662 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
   1663                                            LOperand* index,
   1664                                            String::Encoding encoding) {
   1665   if (index->IsConstantOperand()) {
   1666     int offset = ToInteger32(LConstantOperand::cast(index));
   1667     if (encoding == String::TWO_BYTE_ENCODING) {
   1668       offset *= kUC16Size;
   1669     }
   1670     STATIC_ASSERT(kCharSize == 1);
   1671     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1672   }
   1673   Register scratch = scratch0();
   1674   DCHECK(!scratch.is(string));
   1675   DCHECK(!scratch.is(ToRegister(index)));
   1676   if (encoding == String::ONE_BYTE_ENCODING) {
   1677     __ Addu(scratch, string, ToRegister(index));
   1678   } else {
   1679     STATIC_ASSERT(kUC16Size == 2);
   1680     __ sll(scratch, ToRegister(index), 1);
   1681     __ Addu(scratch, string, scratch);
   1682   }
   1683   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1684 }
   1685 
   1686 
   1687 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1688   String::Encoding encoding = instr->hydrogen()->encoding();
   1689   Register string = ToRegister(instr->string());
   1690   Register result = ToRegister(instr->result());
   1691 
   1692   if (FLAG_debug_code) {
   1693     Register scratch = scratch0();
   1694     __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   1695     __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1696 
   1697     __ And(scratch, scratch,
   1698            Operand(kStringRepresentationMask | kStringEncodingMask));
   1699     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1700     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1701     __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
   1702                                 ? one_byte_seq_type : two_byte_seq_type));
   1703     __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
   1704   }
   1705 
   1706   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1707   if (encoding == String::ONE_BYTE_ENCODING) {
   1708     __ lbu(result, operand);
   1709   } else {
   1710     __ lhu(result, operand);
   1711   }
   1712 }
   1713 
   1714 
   1715 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1716   String::Encoding encoding = instr->hydrogen()->encoding();
   1717   Register string = ToRegister(instr->string());
   1718   Register value = ToRegister(instr->value());
   1719 
   1720   if (FLAG_debug_code) {
   1721     Register scratch = scratch0();
   1722     Register index = ToRegister(instr->index());
   1723     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1724     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1725     int encoding_mask =
   1726         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1727         ? one_byte_seq_type : two_byte_seq_type;
   1728     __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
   1729   }
   1730 
   1731   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1732   if (encoding == String::ONE_BYTE_ENCODING) {
   1733     __ sb(value, operand);
   1734   } else {
   1735     __ sh(value, operand);
   1736   }
   1737 }
   1738 
   1739 
   1740 void LCodeGen::DoAddI(LAddI* instr) {
   1741   LOperand* left = instr->left();
   1742   LOperand* right = instr->right();
   1743   LOperand* result = instr->result();
   1744   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1745 
   1746   if (!can_overflow) {
   1747     if (right->IsStackSlot()) {
   1748       Register right_reg = EmitLoadRegister(right, at);
   1749       __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
   1750     } else {
   1751       DCHECK(right->IsRegister() || right->IsConstantOperand());
   1752       __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
   1753     }
   1754   } else {  // can_overflow.
   1755     Register scratch = scratch1();
   1756     Label no_overflow_label;
   1757     if (right->IsStackSlot()) {
   1758       Register right_reg = EmitLoadRegister(right, scratch);
   1759       __ AddBranchNoOvf(ToRegister(result), ToRegister(left),
   1760                         Operand(right_reg), &no_overflow_label);
   1761     } else {
   1762       DCHECK(right->IsRegister() || right->IsConstantOperand());
   1763       __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
   1764                         &no_overflow_label, scratch);
   1765     }
   1766     DeoptimizeIf(al, instr);
   1767     __ bind(&no_overflow_label);
   1768   }
   1769 }
   1770 
   1771 
   1772 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1773   LOperand* left = instr->left();
   1774   LOperand* right = instr->right();
   1775   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1776   Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
   1777   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1778     Register left_reg = ToRegister(left);
   1779     Register right_reg = EmitLoadRegister(right, scratch0());
   1780     Register result_reg = ToRegister(instr->result());
   1781     Label return_right, done;
   1782     Register scratch = scratch1();
   1783     __ Slt(scratch, left_reg, Operand(right_reg));
   1784     if (condition == ge) {
   1785      __  Movz(result_reg, left_reg, scratch);
   1786      __  Movn(result_reg, right_reg, scratch);
   1787     } else {
   1788      DCHECK(condition == le);
   1789      __  Movn(result_reg, left_reg, scratch);
   1790      __  Movz(result_reg, right_reg, scratch);
   1791     }
   1792   } else {
   1793     DCHECK(instr->hydrogen()->representation().IsDouble());
   1794     FPURegister left_reg = ToDoubleRegister(left);
   1795     FPURegister right_reg = ToDoubleRegister(right);
   1796     FPURegister result_reg = ToDoubleRegister(instr->result());
   1797     Label check_nan_left, check_zero, return_left, return_right, done;
   1798     __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
   1799     __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
   1800     __ Branch(&return_right);
   1801 
   1802     __ bind(&check_zero);
   1803     // left == right != 0.
   1804     __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
   1805     // At this point, both left and right are either 0 or -0.
   1806     if (operation == HMathMinMax::kMathMin) {
   1807       __ neg_d(left_reg, left_reg);
   1808       __ sub_d(result_reg, left_reg, right_reg);
   1809       __ neg_d(result_reg, result_reg);
   1810     } else {
   1811       __ add_d(result_reg, left_reg, right_reg);
   1812     }
   1813     __ Branch(&done);
   1814 
   1815     __ bind(&check_nan_left);
   1816     // left == NaN.
   1817     __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
   1818     __ bind(&return_right);
   1819     if (!right_reg.is(result_reg)) {
   1820       __ mov_d(result_reg, right_reg);
   1821     }
   1822     __ Branch(&done);
   1823 
   1824     __ bind(&return_left);
   1825     if (!left_reg.is(result_reg)) {
   1826       __ mov_d(result_reg, left_reg);
   1827     }
   1828     __ bind(&done);
   1829   }
   1830 }
   1831 
   1832 
   1833 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1834   DoubleRegister left = ToDoubleRegister(instr->left());
   1835   DoubleRegister right = ToDoubleRegister(instr->right());
   1836   DoubleRegister result = ToDoubleRegister(instr->result());
   1837   switch (instr->op()) {
   1838     case Token::ADD:
   1839       __ add_d(result, left, right);
   1840       break;
   1841     case Token::SUB:
   1842       __ sub_d(result, left, right);
   1843       break;
   1844     case Token::MUL:
   1845       __ mul_d(result, left, right);
   1846       break;
   1847     case Token::DIV:
   1848       __ div_d(result, left, right);
   1849       break;
   1850     case Token::MOD: {
   1851       // Save a0-a3 on the stack.
   1852       RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
   1853       __ MultiPush(saved_regs);
   1854 
   1855       __ PrepareCallCFunction(0, 2, scratch0());
   1856       __ MovToFloatParameters(left, right);
   1857       __ CallCFunction(
   1858           ExternalReference::mod_two_doubles_operation(isolate()),
   1859           0, 2);
   1860       // Move the result in the double result register.
   1861       __ MovFromFloatResult(result);
   1862 
   1863       // Restore saved register.
   1864       __ MultiPop(saved_regs);
   1865       break;
   1866     }
   1867     default:
   1868       UNREACHABLE();
   1869       break;
   1870   }
   1871 }
   1872 
   1873 
   1874 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1875   DCHECK(ToRegister(instr->context()).is(cp));
   1876   DCHECK(ToRegister(instr->left()).is(a1));
   1877   DCHECK(ToRegister(instr->right()).is(a0));
   1878   DCHECK(ToRegister(instr->result()).is(v0));
   1879 
   1880   Handle<Code> code =
   1881       CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
   1882   CallCode(code, RelocInfo::CODE_TARGET, instr);
   1883   // Other arch use a nop here, to signal that there is no inlined
   1884   // patchable code. Mips does not need the nop, since our marker
   1885   // instruction (andi zero_reg) will never be used in normal code.
   1886 }
   1887 
   1888 
   1889 template<class InstrType>
   1890 void LCodeGen::EmitBranch(InstrType instr,
   1891                           Condition condition,
   1892                           Register src1,
   1893                           const Operand& src2) {
   1894   int left_block = instr->TrueDestination(chunk_);
   1895   int right_block = instr->FalseDestination(chunk_);
   1896 
   1897   int next_block = GetNextEmittedBlock();
   1898   if (right_block == left_block || condition == al) {
   1899     EmitGoto(left_block);
   1900   } else if (left_block == next_block) {
   1901     __ Branch(chunk_->GetAssemblyLabel(right_block),
   1902               NegateCondition(condition), src1, src2);
   1903   } else if (right_block == next_block) {
   1904     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
   1905   } else {
   1906     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
   1907     __ Branch(chunk_->GetAssemblyLabel(right_block));
   1908   }
   1909 }
   1910 
   1911 
   1912 template<class InstrType>
   1913 void LCodeGen::EmitBranchF(InstrType instr,
   1914                            Condition condition,
   1915                            FPURegister src1,
   1916                            FPURegister src2) {
   1917   int right_block = instr->FalseDestination(chunk_);
   1918   int left_block = instr->TrueDestination(chunk_);
   1919 
   1920   int next_block = GetNextEmittedBlock();
   1921   if (right_block == left_block) {
   1922     EmitGoto(left_block);
   1923   } else if (left_block == next_block) {
   1924     __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
   1925                NegateFpuCondition(condition), src1, src2);
   1926   } else if (right_block == next_block) {
   1927     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
   1928                condition, src1, src2);
   1929   } else {
   1930     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
   1931                condition, src1, src2);
   1932     __ Branch(chunk_->GetAssemblyLabel(right_block));
   1933   }
   1934 }
   1935 
   1936 
   1937 template <class InstrType>
   1938 void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
   1939                               Register src1, const Operand& src2) {
   1940   int true_block = instr->TrueDestination(chunk_);
   1941   __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
   1942 }
   1943 
   1944 
   1945 template <class InstrType>
   1946 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
   1947                                Register src1, const Operand& src2) {
   1948   int false_block = instr->FalseDestination(chunk_);
   1949   __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
   1950 }
   1951 
   1952 
   1953 template<class InstrType>
   1954 void LCodeGen::EmitFalseBranchF(InstrType instr,
   1955                                 Condition condition,
   1956                                 FPURegister src1,
   1957                                 FPURegister src2) {
   1958   int false_block = instr->FalseDestination(chunk_);
   1959   __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
   1960              condition, src1, src2);
   1961 }
   1962 
   1963 
   1964 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   1965   __ stop("LDebugBreak");
   1966 }
   1967 
   1968 
   1969 void LCodeGen::DoBranch(LBranch* instr) {
   1970   Representation r = instr->hydrogen()->value()->representation();
   1971   if (r.IsInteger32() || r.IsSmi()) {
   1972     DCHECK(!info()->IsStub());
   1973     Register reg = ToRegister(instr->value());
   1974     EmitBranch(instr, ne, reg, Operand(zero_reg));
   1975   } else if (r.IsDouble()) {
   1976     DCHECK(!info()->IsStub());
   1977     DoubleRegister reg = ToDoubleRegister(instr->value());
   1978     // Test the double value. Zero and NaN are false.
   1979     EmitBranchF(instr, ogl, reg, kDoubleRegZero);
   1980   } else {
   1981     DCHECK(r.IsTagged());
   1982     Register reg = ToRegister(instr->value());
   1983     HType type = instr->hydrogen()->value()->type();
   1984     if (type.IsBoolean()) {
   1985       DCHECK(!info()->IsStub());
   1986       __ LoadRoot(at, Heap::kTrueValueRootIndex);
   1987       EmitBranch(instr, eq, reg, Operand(at));
   1988     } else if (type.IsSmi()) {
   1989       DCHECK(!info()->IsStub());
   1990       EmitBranch(instr, ne, reg, Operand(zero_reg));
   1991     } else if (type.IsJSArray()) {
   1992       DCHECK(!info()->IsStub());
   1993       EmitBranch(instr, al, zero_reg, Operand(zero_reg));
   1994     } else if (type.IsHeapNumber()) {
   1995       DCHECK(!info()->IsStub());
   1996       DoubleRegister dbl_scratch = double_scratch0();
   1997       __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   1998       // Test the double value. Zero and NaN are false.
   1999       EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
   2000     } else if (type.IsString()) {
   2001       DCHECK(!info()->IsStub());
   2002       __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
   2003       EmitBranch(instr, ne, at, Operand(zero_reg));
   2004     } else {
   2005       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   2006       // Avoid deopts in the case where we've never executed this path before.
   2007       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   2008 
   2009       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   2010         // undefined -> false.
   2011         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   2012         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2013       }
   2014       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   2015         // Boolean -> its value.
   2016         __ LoadRoot(at, Heap::kTrueValueRootIndex);
   2017         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
   2018         __ LoadRoot(at, Heap::kFalseValueRootIndex);
   2019         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2020       }
   2021       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   2022         // 'null' -> false.
   2023         __ LoadRoot(at, Heap::kNullValueRootIndex);
   2024         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2025       }
   2026 
   2027       if (expected.Contains(ToBooleanStub::SMI)) {
   2028         // Smis: 0 -> false, all other -> true.
   2029         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
   2030         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2031       } else if (expected.NeedsMap()) {
   2032         // If we need a map later and have a Smi -> deopt.
   2033         __ SmiTst(reg, at);
   2034         DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
   2035       }
   2036 
   2037       const Register map = scratch0();
   2038       if (expected.NeedsMap()) {
   2039         __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2040         if (expected.CanBeUndetectable()) {
   2041           // Undetectable -> false.
   2042           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
   2043           __ And(at, at, Operand(1 << Map::kIsUndetectable));
   2044           __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
   2045         }
   2046       }
   2047 
   2048       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2049         // spec object -> true.
   2050         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2051         __ Branch(instr->TrueLabel(chunk_),
   2052                   ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
   2053       }
   2054 
   2055       if (expected.Contains(ToBooleanStub::STRING)) {
   2056         // String value -> false iff empty.
   2057         Label not_string;
   2058         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2059         __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
   2060         __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
   2061         __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
   2062         __ Branch(instr->FalseLabel(chunk_));
   2063         __ bind(&not_string);
   2064       }
   2065 
   2066       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2067         // Symbol value -> true.
   2068         const Register scratch = scratch1();
   2069         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2070         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
   2071       }
   2072 
   2073       if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
   2074         // SIMD value -> true.
   2075         const Register scratch = scratch1();
   2076         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2077         __ Branch(instr->TrueLabel(chunk_), eq, scratch,
   2078                   Operand(SIMD128_VALUE_TYPE));
   2079       }
   2080 
   2081       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2082         // heap number -> false iff +0, -0, or NaN.
   2083         DoubleRegister dbl_scratch = double_scratch0();
   2084         Label not_heap_number;
   2085         __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   2086         __ Branch(&not_heap_number, ne, map, Operand(at));
   2087         __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2088         __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2089                    ne, dbl_scratch, kDoubleRegZero);
   2090         // Falls through if dbl_scratch == 0.
   2091         __ Branch(instr->FalseLabel(chunk_));
   2092         __ bind(&not_heap_number);
   2093       }
   2094 
   2095       if (!expected.IsGeneric()) {
   2096         // We've seen something for the first time -> deopt.
   2097         // This can only happen if we are not generic already.
   2098         DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
   2099                      Operand(zero_reg));
   2100       }
   2101     }
   2102   }
   2103 }
   2104 
   2105 
   2106 void LCodeGen::EmitGoto(int block) {
   2107   if (!IsNextEmittedBlock(block)) {
   2108     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2109   }
   2110 }
   2111 
   2112 
   2113 void LCodeGen::DoGoto(LGoto* instr) {
   2114   EmitGoto(instr->block_id());
   2115 }
   2116 
   2117 
   2118 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2119   Condition cond = kNoCondition;
   2120   switch (op) {
   2121     case Token::EQ:
   2122     case Token::EQ_STRICT:
   2123       cond = eq;
   2124       break;
   2125     case Token::NE:
   2126     case Token::NE_STRICT:
   2127       cond = ne;
   2128       break;
   2129     case Token::LT:
   2130       cond = is_unsigned ? lo : lt;
   2131       break;
   2132     case Token::GT:
   2133       cond = is_unsigned ? hi : gt;
   2134       break;
   2135     case Token::LTE:
   2136       cond = is_unsigned ? ls : le;
   2137       break;
   2138     case Token::GTE:
   2139       cond = is_unsigned ? hs : ge;
   2140       break;
   2141     case Token::IN:
   2142     case Token::INSTANCEOF:
   2143     default:
   2144       UNREACHABLE();
   2145   }
   2146   return cond;
   2147 }
   2148 
   2149 
   2150 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2151   LOperand* left = instr->left();
   2152   LOperand* right = instr->right();
   2153   bool is_unsigned =
   2154       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2155       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2156   Condition cond = TokenToCondition(instr->op(), is_unsigned);
   2157 
   2158   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2159     // We can statically evaluate the comparison.
   2160     double left_val = ToDouble(LConstantOperand::cast(left));
   2161     double right_val = ToDouble(LConstantOperand::cast(right));
   2162     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2163         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2164     EmitGoto(next_block);
   2165   } else {
   2166     if (instr->is_double()) {
   2167       // Compare left and right as doubles and load the
   2168       // resulting flags into the normal status register.
   2169       FPURegister left_reg = ToDoubleRegister(left);
   2170       FPURegister right_reg = ToDoubleRegister(right);
   2171 
   2172       // If a NaN is involved, i.e. the result is unordered,
   2173       // jump to false block label.
   2174       __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
   2175                  left_reg, right_reg);
   2176 
   2177       EmitBranchF(instr, cond, left_reg, right_reg);
   2178     } else {
   2179       Register cmp_left;
   2180       Operand cmp_right = Operand(0);
   2181 
   2182       if (right->IsConstantOperand()) {
   2183         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2184         if (instr->hydrogen_value()->representation().IsSmi()) {
   2185           cmp_left = ToRegister(left);
   2186           cmp_right = Operand(Smi::FromInt(value));
   2187         } else {
   2188           cmp_left = ToRegister(left);
   2189           cmp_right = Operand(value);
   2190         }
   2191       } else if (left->IsConstantOperand()) {
   2192         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2193         if (instr->hydrogen_value()->representation().IsSmi()) {
   2194            cmp_left = ToRegister(right);
   2195            cmp_right = Operand(Smi::FromInt(value));
   2196         } else {
   2197           cmp_left = ToRegister(right);
   2198           cmp_right = Operand(value);
   2199         }
   2200         // We commuted the operands, so commute the condition.
   2201         cond = CommuteCondition(cond);
   2202       } else {
   2203         cmp_left = ToRegister(left);
   2204         cmp_right = Operand(ToRegister(right));
   2205       }
   2206 
   2207       EmitBranch(instr, cond, cmp_left, cmp_right);
   2208     }
   2209   }
   2210 }
   2211 
   2212 
   2213 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2214   Register left = ToRegister(instr->left());
   2215   Register right = ToRegister(instr->right());
   2216 
   2217   EmitBranch(instr, eq, left, Operand(right));
   2218 }
   2219 
   2220 
   2221 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2222   if (instr->hydrogen()->representation().IsTagged()) {
   2223     Register input_reg = ToRegister(instr->object());
   2224     __ li(at, Operand(factory()->the_hole_value()));
   2225     EmitBranch(instr, eq, input_reg, Operand(at));
   2226     return;
   2227   }
   2228 
   2229   DoubleRegister input_reg = ToDoubleRegister(instr->object());
   2230   EmitFalseBranchF(instr, eq, input_reg, input_reg);
   2231 
   2232   Register scratch = scratch0();
   2233   __ FmoveHigh(scratch, input_reg);
   2234   EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
   2235 }
   2236 
   2237 
   2238 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   2239   Representation rep = instr->hydrogen()->value()->representation();
   2240   DCHECK(!rep.IsInteger32());
   2241   Register scratch = ToRegister(instr->temp());
   2242 
   2243   if (rep.IsDouble()) {
   2244     DoubleRegister value = ToDoubleRegister(instr->value());
   2245     EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
   2246     __ FmoveHigh(scratch, value);
   2247     __ li(at, 0x80000000);
   2248   } else {
   2249     Register value = ToRegister(instr->value());
   2250     __ CheckMap(value,
   2251                 scratch,
   2252                 Heap::kHeapNumberMapRootIndex,
   2253                 instr->FalseLabel(chunk()),
   2254                 DO_SMI_CHECK);
   2255     __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
   2256     EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
   2257     __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
   2258     __ mov(at, zero_reg);
   2259   }
   2260   EmitBranch(instr, eq, scratch, Operand(at));
   2261 }
   2262 
   2263 
   2264 Condition LCodeGen::EmitIsString(Register input,
   2265                                  Register temp1,
   2266                                  Label* is_not_string,
   2267                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2268   if (check_needed == INLINE_SMI_CHECK) {
   2269     __ JumpIfSmi(input, is_not_string);
   2270   }
   2271   __ GetObjectType(input, temp1, temp1);
   2272 
   2273   return lt;
   2274 }
   2275 
   2276 
   2277 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2278   Register reg = ToRegister(instr->value());
   2279   Register temp1 = ToRegister(instr->temp());
   2280 
   2281   SmiCheck check_needed =
   2282       instr->hydrogen()->value()->type().IsHeapObject()
   2283           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2284   Condition true_cond =
   2285       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2286 
   2287   EmitBranch(instr, true_cond, temp1,
   2288              Operand(FIRST_NONSTRING_TYPE));
   2289 }
   2290 
   2291 
   2292 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2293   Register input_reg = EmitLoadRegister(instr->value(), at);
   2294   __ And(at, input_reg, kSmiTagMask);
   2295   EmitBranch(instr, eq, at, Operand(zero_reg));
   2296 }
   2297 
   2298 
   2299 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2300   Register input = ToRegister(instr->value());
   2301   Register temp = ToRegister(instr->temp());
   2302 
   2303   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2304     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2305   }
   2306   __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2307   __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2308   __ And(at, temp, Operand(1 << Map::kIsUndetectable));
   2309   EmitBranch(instr, ne, at, Operand(zero_reg));
   2310 }
   2311 
   2312 
   2313 static Condition ComputeCompareCondition(Token::Value op) {
   2314   switch (op) {
   2315     case Token::EQ_STRICT:
   2316     case Token::EQ:
   2317       return eq;
   2318     case Token::LT:
   2319       return lt;
   2320     case Token::GT:
   2321       return gt;
   2322     case Token::LTE:
   2323       return le;
   2324     case Token::GTE:
   2325       return ge;
   2326     default:
   2327       UNREACHABLE();
   2328       return kNoCondition;
   2329   }
   2330 }
   2331 
   2332 
   2333 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2334   DCHECK(ToRegister(instr->context()).is(cp));
   2335   DCHECK(ToRegister(instr->left()).is(a1));
   2336   DCHECK(ToRegister(instr->right()).is(a0));
   2337 
   2338   Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
   2339   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2340 
   2341   EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
   2342              Operand(zero_reg));
   2343 }
   2344 
   2345 
   2346 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2347   InstanceType from = instr->from();
   2348   InstanceType to = instr->to();
   2349   if (from == FIRST_TYPE) return to;
   2350   DCHECK(from == to || to == LAST_TYPE);
   2351   return from;
   2352 }
   2353 
   2354 
   2355 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2356   InstanceType from = instr->from();
   2357   InstanceType to = instr->to();
   2358   if (from == to) return eq;
   2359   if (to == LAST_TYPE) return hs;
   2360   if (from == FIRST_TYPE) return ls;
   2361   UNREACHABLE();
   2362   return eq;
   2363 }
   2364 
   2365 
   2366 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2367   Register scratch = scratch0();
   2368   Register input = ToRegister(instr->value());
   2369 
   2370   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2371     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2372   }
   2373 
   2374   __ GetObjectType(input, scratch, scratch);
   2375   EmitBranch(instr,
   2376              BranchCondition(instr->hydrogen()),
   2377              scratch,
   2378              Operand(TestType(instr->hydrogen())));
   2379 }
   2380 
   2381 
   2382 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2383   Register input = ToRegister(instr->value());
   2384   Register result = ToRegister(instr->result());
   2385 
   2386   __ AssertString(input);
   2387 
   2388   __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
   2389   __ IndexFromHash(result, result);
   2390 }
   2391 
   2392 
   2393 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2394     LHasCachedArrayIndexAndBranch* instr) {
   2395   Register input = ToRegister(instr->value());
   2396   Register scratch = scratch0();
   2397 
   2398   __ lw(scratch,
   2399          FieldMemOperand(input, String::kHashFieldOffset));
   2400   __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
   2401   EmitBranch(instr, eq, at, Operand(zero_reg));
   2402 }
   2403 
   2404 
   2405 // Branches to a label or falls through with the answer in flags.  Trashes
   2406 // the temp registers, but not the input.
   2407 void LCodeGen::EmitClassOfTest(Label* is_true,
   2408                                Label* is_false,
   2409                                Handle<String>class_name,
   2410                                Register input,
   2411                                Register temp,
   2412                                Register temp2) {
   2413   DCHECK(!input.is(temp));
   2414   DCHECK(!input.is(temp2));
   2415   DCHECK(!temp.is(temp2));
   2416 
   2417   __ JumpIfSmi(input, is_false);
   2418   __ GetObjectType(input, temp, temp2);
   2419   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2420     __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
   2421   } else {
   2422     __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
   2423   }
   2424 
   2425   // Check if the constructor in the map is a function.
   2426   Register instance_type = scratch1();
   2427   DCHECK(!instance_type.is(temp));
   2428   __ GetMapConstructor(temp, temp, temp2, instance_type);
   2429 
   2430   // Objects with a non-function constructor have class 'Object'.
   2431   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
   2432     __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
   2433   } else {
   2434     __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
   2435   }
   2436 
   2437   // temp now contains the constructor function. Grab the
   2438   // instance class name from there.
   2439   __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2440   __ lw(temp, FieldMemOperand(temp,
   2441                                SharedFunctionInfo::kInstanceClassNameOffset));
   2442   // The class name we are testing against is internalized since it's a literal.
   2443   // The name in the constructor is internalized because of the way the context
   2444   // is booted.  This routine isn't expected to work for random API-created
   2445   // classes and it doesn't have to because you can't access it with natives
   2446   // syntax.  Since both sides are internalized it is sufficient to use an
   2447   // identity comparison.
   2448 
   2449   // End with the address of this class_name instance in temp register.
   2450   // On MIPS, the caller must do the comparison with Handle<String>class_name.
   2451 }
   2452 
   2453 
   2454 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2455   Register input = ToRegister(instr->value());
   2456   Register temp = scratch0();
   2457   Register temp2 = ToRegister(instr->temp());
   2458   Handle<String> class_name = instr->hydrogen()->class_name();
   2459 
   2460   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2461                   class_name, input, temp, temp2);
   2462 
   2463   EmitBranch(instr, eq, temp, Operand(class_name));
   2464 }
   2465 
   2466 
   2467 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2468   Register reg = ToRegister(instr->value());
   2469   Register temp = ToRegister(instr->temp());
   2470 
   2471   __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2472   EmitBranch(instr, eq, temp, Operand(instr->map()));
   2473 }
   2474 
   2475 
   2476 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2477   DCHECK(ToRegister(instr->context()).is(cp));
   2478   DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
   2479   DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
   2480   DCHECK(ToRegister(instr->result()).is(v0));
   2481   InstanceOfStub stub(isolate());
   2482   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2483 }
   2484 
   2485 
   2486 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2487     LHasInPrototypeChainAndBranch* instr) {
   2488   Register const object = ToRegister(instr->object());
   2489   Register const object_map = scratch0();
   2490   Register const object_instance_type = scratch1();
   2491   Register const object_prototype = object_map;
   2492   Register const prototype = ToRegister(instr->prototype());
   2493 
   2494   // The {object} must be a spec object.  It's sufficient to know that {object}
   2495   // is not a smi, since all other non-spec objects have {null} prototypes and
   2496   // will be ruled out below.
   2497   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2498     __ SmiTst(object, at);
   2499     EmitFalseBranch(instr, eq, at, Operand(zero_reg));
   2500   }
   2501 
   2502   // Loop through the {object}s prototype chain looking for the {prototype}.
   2503   __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   2504   Label loop;
   2505   __ bind(&loop);
   2506 
   2507   // Deoptimize if the object needs to be access checked.
   2508   __ lbu(object_instance_type,
   2509          FieldMemOperand(object_map, Map::kBitFieldOffset));
   2510   __ And(object_instance_type, object_instance_type,
   2511          Operand(1 << Map::kIsAccessCheckNeeded));
   2512   DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
   2513                Operand(zero_reg));
   2514   // Deoptimize for proxies.
   2515   __ lbu(object_instance_type,
   2516          FieldMemOperand(object_map, Map::kInstanceTypeOffset));
   2517   DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
   2518                Operand(JS_PROXY_TYPE));
   2519 
   2520   __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
   2521   EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
   2522   __ LoadRoot(at, Heap::kNullValueRootIndex);
   2523   EmitFalseBranch(instr, eq, object_prototype, Operand(at));
   2524   __ Branch(USE_DELAY_SLOT, &loop);
   2525   __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   2526 }
   2527 
   2528 
   2529 void LCodeGen::DoCmpT(LCmpT* instr) {
   2530   DCHECK(ToRegister(instr->context()).is(cp));
   2531   Token::Value op = instr->op();
   2532 
   2533   Handle<Code> ic =
   2534       CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
   2535   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2536   // On MIPS there is no need for a "no inlined smi code" marker (nop).
   2537 
   2538   Condition condition = ComputeCompareCondition(op);
   2539   // A minor optimization that relies on LoadRoot always emitting one
   2540   // instruction.
   2541   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
   2542   Label done, check;
   2543   __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
   2544   __ bind(&check);
   2545   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2546   DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
   2547   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2548   __ bind(&done);
   2549 }
   2550 
   2551 
   2552 void LCodeGen::DoReturn(LReturn* instr) {
   2553   if (FLAG_trace && info()->IsOptimizing()) {
   2554     // Push the return value on the stack as the parameter.
   2555     // Runtime::TraceExit returns its parameter in v0. We're leaving the code
   2556     // managed by the register allocator and tearing down the frame, it's
   2557     // safe to write to the context register.
   2558     __ push(v0);
   2559     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2560     __ CallRuntime(Runtime::kTraceExit);
   2561   }
   2562   if (info()->saves_caller_doubles()) {
   2563     RestoreCallerDoubles();
   2564   }
   2565   if (NeedsEagerFrame()) {
   2566     __ mov(sp, fp);
   2567     __ Pop(ra, fp);
   2568   }
   2569   if (instr->has_constant_parameter_count()) {
   2570     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2571     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2572     if (sp_delta != 0) {
   2573       __ Addu(sp, sp, Operand(sp_delta));
   2574     }
   2575   } else {
   2576     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   2577     Register reg = ToRegister(instr->parameter_count());
   2578     // The argument count parameter is a smi
   2579     __ SmiUntag(reg);
   2580     __ sll(at, reg, kPointerSizeLog2);
   2581     __ Addu(sp, sp, at);
   2582   }
   2583 
   2584   __ Jump(ra);
   2585 }
   2586 
   2587 
   2588 template <class T>
   2589 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   2590   Register vector_register = ToRegister(instr->temp_vector());
   2591   Register slot_register = LoadWithVectorDescriptor::SlotRegister();
   2592   DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
   2593   DCHECK(slot_register.is(a0));
   2594 
   2595   AllowDeferredHandleDereference vector_structure_check;
   2596   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   2597   __ li(vector_register, vector);
   2598   // No need to allocate this register.
   2599   FeedbackVectorSlot slot = instr->hydrogen()->slot();
   2600   int index = vector->GetIndex(slot);
   2601   __ li(slot_register, Operand(Smi::FromInt(index)));
   2602 }
   2603 
   2604 
   2605 template <class T>
   2606 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
   2607   Register vector_register = ToRegister(instr->temp_vector());
   2608   Register slot_register = ToRegister(instr->temp_slot());
   2609 
   2610   AllowDeferredHandleDereference vector_structure_check;
   2611   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   2612   __ li(vector_register, vector);
   2613   FeedbackVectorSlot slot = instr->hydrogen()->slot();
   2614   int index = vector->GetIndex(slot);
   2615   __ li(slot_register, Operand(Smi::FromInt(index)));
   2616 }
   2617 
   2618 
   2619 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   2620   DCHECK(ToRegister(instr->context()).is(cp));
   2621   DCHECK(ToRegister(instr->global_object())
   2622              .is(LoadDescriptor::ReceiverRegister()));
   2623   DCHECK(ToRegister(instr->result()).is(v0));
   2624 
   2625   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   2626   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
   2627   Handle<Code> ic =
   2628       CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
   2629                                          SLOPPY, PREMONOMORPHIC).code();
   2630   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2631 }
   2632 
   2633 
   2634 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2635   Register context = ToRegister(instr->context());
   2636   Register result = ToRegister(instr->result());
   2637 
   2638   __ lw(result, ContextMemOperand(context, instr->slot_index()));
   2639   if (instr->hydrogen()->RequiresHoleCheck()) {
   2640     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2641 
   2642     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2643       DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
   2644     } else {
   2645       Label is_not_hole;
   2646       __ Branch(&is_not_hole, ne, result, Operand(at));
   2647       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2648       __ bind(&is_not_hole);
   2649     }
   2650   }
   2651 }
   2652 
   2653 
   2654 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2655   Register context = ToRegister(instr->context());
   2656   Register value = ToRegister(instr->value());
   2657   Register scratch = scratch0();
   2658   MemOperand target = ContextMemOperand(context, instr->slot_index());
   2659 
   2660   Label skip_assignment;
   2661 
   2662   if (instr->hydrogen()->RequiresHoleCheck()) {
   2663     __ lw(scratch, target);
   2664     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2665 
   2666     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2667       DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
   2668     } else {
   2669       __ Branch(&skip_assignment, ne, scratch, Operand(at));
   2670     }
   2671   }
   2672 
   2673   __ sw(value, target);
   2674   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2675     SmiCheck check_needed =
   2676         instr->hydrogen()->value()->type().IsHeapObject()
   2677             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2678     __ RecordWriteContextSlot(context,
   2679                               target.offset(),
   2680                               value,
   2681                               scratch0(),
   2682                               GetRAState(),
   2683                               kSaveFPRegs,
   2684                               EMIT_REMEMBERED_SET,
   2685                               check_needed);
   2686   }
   2687 
   2688   __ bind(&skip_assignment);
   2689 }
   2690 
   2691 
   2692 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2693   HObjectAccess access = instr->hydrogen()->access();
   2694   int offset = access.offset();
   2695   Register object = ToRegister(instr->object());
   2696 
   2697   if (access.IsExternalMemory()) {
   2698     Register result = ToRegister(instr->result());
   2699     MemOperand operand = MemOperand(object, offset);
   2700     __ Load(result, operand, access.representation());
   2701     return;
   2702   }
   2703 
   2704   if (instr->hydrogen()->representation().IsDouble()) {
   2705     DoubleRegister result = ToDoubleRegister(instr->result());
   2706     __ ldc1(result, FieldMemOperand(object, offset));
   2707     return;
   2708   }
   2709 
   2710   Register result = ToRegister(instr->result());
   2711   if (!access.IsInobject()) {
   2712     __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   2713     object = result;
   2714   }
   2715   MemOperand operand = FieldMemOperand(object, offset);
   2716   __ Load(result, operand, access.representation());
   2717 }
   2718 
   2719 
   2720 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   2721   DCHECK(ToRegister(instr->context()).is(cp));
   2722   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   2723   DCHECK(ToRegister(instr->result()).is(v0));
   2724 
   2725   // Name is always in a2.
   2726   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   2727   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
   2728   Handle<Code> ic =
   2729       CodeFactory::LoadICInOptimizedCode(
   2730           isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
   2731           instr->hydrogen()->initialization_state()).code();
   2732   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2733 }
   2734 
   2735 
   2736 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2737   Register scratch = scratch0();
   2738   Register function = ToRegister(instr->function());
   2739   Register result = ToRegister(instr->result());
   2740 
   2741   // Get the prototype or initial map from the function.
   2742   __ lw(result,
   2743          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2744 
   2745   // Check that the function has a prototype or an initial map.
   2746   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2747   DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
   2748 
   2749   // If the function does not have an initial map, we're done.
   2750   Label done;
   2751   __ GetObjectType(result, scratch, scratch);
   2752   __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
   2753 
   2754   // Get the prototype from the initial map.
   2755   __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
   2756 
   2757   // All done.
   2758   __ bind(&done);
   2759 }
   2760 
   2761 
   2762 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   2763   Register result = ToRegister(instr->result());
   2764   __ LoadRoot(result, instr->index());
   2765 }
   2766 
   2767 
   2768 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2769   Register arguments = ToRegister(instr->arguments());
   2770   Register result = ToRegister(instr->result());
   2771   // There are two words between the frame pointer and the last argument.
   2772   // Subtracting from length accounts for one of them add one more.
   2773   if (instr->length()->IsConstantOperand()) {
   2774     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   2775     if (instr->index()->IsConstantOperand()) {
   2776       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2777       int index = (const_length - const_index) + 1;
   2778       __ lw(result, MemOperand(arguments, index * kPointerSize));
   2779     } else {
   2780       Register index = ToRegister(instr->index());
   2781       __ li(at, Operand(const_length + 1));
   2782       __ Subu(result, at, index);
   2783       __ sll(at, result, kPointerSizeLog2);
   2784       __ Addu(at, arguments, at);
   2785       __ lw(result, MemOperand(at));
   2786     }
   2787   } else if (instr->index()->IsConstantOperand()) {
   2788     Register length = ToRegister(instr->length());
   2789     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2790     int loc = const_index - 1;
   2791     if (loc != 0) {
   2792       __ Subu(result, length, Operand(loc));
   2793       __ sll(at, result, kPointerSizeLog2);
   2794       __ Addu(at, arguments, at);
   2795       __ lw(result, MemOperand(at));
   2796     } else {
   2797       __ sll(at, length, kPointerSizeLog2);
   2798       __ Addu(at, arguments, at);
   2799       __ lw(result, MemOperand(at));
   2800     }
   2801   } else {
   2802     Register length = ToRegister(instr->length());
   2803     Register index = ToRegister(instr->index());
   2804     __ Subu(result, length, index);
   2805     __ Addu(result, result, 1);
   2806     __ sll(at, result, kPointerSizeLog2);
   2807     __ Addu(at, arguments, at);
   2808     __ lw(result, MemOperand(at));
   2809   }
   2810 }
   2811 
   2812 
   2813 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   2814   Register external_pointer = ToRegister(instr->elements());
   2815   Register key = no_reg;
   2816   ElementsKind elements_kind = instr->elements_kind();
   2817   bool key_is_constant = instr->key()->IsConstantOperand();
   2818   int constant_key = 0;
   2819   if (key_is_constant) {
   2820     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   2821     if (constant_key & 0xF0000000) {
   2822       Abort(kArrayIndexConstantValueTooBig);
   2823     }
   2824   } else {
   2825     key = ToRegister(instr->key());
   2826   }
   2827   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   2828   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   2829       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   2830   int base_offset = instr->base_offset();
   2831 
   2832   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   2833     FPURegister result = ToDoubleRegister(instr->result());
   2834     if (key_is_constant) {
   2835       __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
   2836     } else {
   2837       __ sll(scratch0(), key, shift_size);
   2838       __ Addu(scratch0(), scratch0(), external_pointer);
   2839     }
   2840     if (elements_kind == FLOAT32_ELEMENTS) {
   2841       __ lwc1(result, MemOperand(scratch0(), base_offset));
   2842       __ cvt_d_s(result, result);
   2843     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   2844       __ ldc1(result, MemOperand(scratch0(), base_offset));
   2845     }
   2846   } else {
   2847     Register result = ToRegister(instr->result());
   2848     MemOperand mem_operand = PrepareKeyedOperand(
   2849         key, external_pointer, key_is_constant, constant_key,
   2850         element_size_shift, shift_size, base_offset);
   2851     switch (elements_kind) {
   2852       case INT8_ELEMENTS:
   2853         __ lb(result, mem_operand);
   2854         break;
   2855       case UINT8_ELEMENTS:
   2856       case UINT8_CLAMPED_ELEMENTS:
   2857         __ lbu(result, mem_operand);
   2858         break;
   2859       case INT16_ELEMENTS:
   2860         __ lh(result, mem_operand);
   2861         break;
   2862       case UINT16_ELEMENTS:
   2863         __ lhu(result, mem_operand);
   2864         break;
   2865       case INT32_ELEMENTS:
   2866         __ lw(result, mem_operand);
   2867         break;
   2868       case UINT32_ELEMENTS:
   2869         __ lw(result, mem_operand);
   2870         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   2871           DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
   2872                        result, Operand(0x80000000));
   2873         }
   2874         break;
   2875       case FLOAT32_ELEMENTS:
   2876       case FLOAT64_ELEMENTS:
   2877       case FAST_DOUBLE_ELEMENTS:
   2878       case FAST_ELEMENTS:
   2879       case FAST_SMI_ELEMENTS:
   2880       case FAST_HOLEY_DOUBLE_ELEMENTS:
   2881       case FAST_HOLEY_ELEMENTS:
   2882       case FAST_HOLEY_SMI_ELEMENTS:
   2883       case DICTIONARY_ELEMENTS:
   2884       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   2885       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   2886         UNREACHABLE();
   2887         break;
   2888     }
   2889   }
   2890 }
   2891 
   2892 
   2893 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   2894   Register elements = ToRegister(instr->elements());
   2895   bool key_is_constant = instr->key()->IsConstantOperand();
   2896   Register key = no_reg;
   2897   DoubleRegister result = ToDoubleRegister(instr->result());
   2898   Register scratch = scratch0();
   2899 
   2900   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   2901 
   2902   int base_offset = instr->base_offset();
   2903   if (key_is_constant) {
   2904     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   2905     if (constant_key & 0xF0000000) {
   2906       Abort(kArrayIndexConstantValueTooBig);
   2907     }
   2908     base_offset += constant_key * kDoubleSize;
   2909   }
   2910   __ Addu(scratch, elements, Operand(base_offset));
   2911 
   2912   if (!key_is_constant) {
   2913     key = ToRegister(instr->key());
   2914     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   2915         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   2916     __ sll(at, key, shift_size);
   2917     __ Addu(scratch, scratch, at);
   2918   }
   2919 
   2920   __ ldc1(result, MemOperand(scratch));
   2921 
   2922   if (instr->hydrogen()->RequiresHoleCheck()) {
   2923     __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
   2924     DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
   2925                  Operand(kHoleNanUpper32));
   2926   }
   2927 }
   2928 
   2929 
   2930 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   2931   Register elements = ToRegister(instr->elements());
   2932   Register result = ToRegister(instr->result());
   2933   Register scratch = scratch0();
   2934   Register store_base = scratch;
   2935   int offset = instr->base_offset();
   2936 
   2937   if (instr->key()->IsConstantOperand()) {
   2938     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   2939     offset += ToInteger32(const_operand) * kPointerSize;
   2940     store_base = elements;
   2941   } else {
   2942     Register key = ToRegister(instr->key());
   2943     // Even though the HLoadKeyed instruction forces the input
   2944     // representation for the key to be an integer, the input gets replaced
   2945     // during bound check elimination with the index argument to the bounds
   2946     // check, which can be tagged, so that case must be handled here, too.
   2947     if (instr->hydrogen()->key()->representation().IsSmi()) {
   2948       __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
   2949       __ addu(scratch, elements, scratch);
   2950     } else {
   2951       __ sll(scratch, key, kPointerSizeLog2);
   2952       __ addu(scratch, elements, scratch);
   2953     }
   2954   }
   2955   __ lw(result, MemOperand(store_base, offset));
   2956 
   2957   // Check for the hole value.
   2958   if (instr->hydrogen()->RequiresHoleCheck()) {
   2959     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   2960       __ SmiTst(result, scratch);
   2961       DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
   2962                    Operand(zero_reg));
   2963     } else {
   2964       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   2965       DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
   2966     }
   2967   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   2968     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
   2969     Label done;
   2970     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   2971     __ Branch(&done, ne, result, Operand(scratch));
   2972     if (info()->IsStub()) {
   2973       // A stub can safely convert the hole to undefined only if the array
   2974       // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
   2975       // it needs to bail out.
   2976       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
   2977       __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
   2978       DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
   2979                    Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
   2980     }
   2981     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2982     __ bind(&done);
   2983   }
   2984 }
   2985 
   2986 
   2987 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   2988   if (instr->is_fixed_typed_array()) {
   2989     DoLoadKeyedExternalArray(instr);
   2990   } else if (instr->hydrogen()->representation().IsDouble()) {
   2991     DoLoadKeyedFixedDoubleArray(instr);
   2992   } else {
   2993     DoLoadKeyedFixedArray(instr);
   2994   }
   2995 }
   2996 
   2997 
   2998 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
   2999                                          Register base,
   3000                                          bool key_is_constant,
   3001                                          int constant_key,
   3002                                          int element_size,
   3003                                          int shift_size,
   3004                                          int base_offset) {
   3005   if (key_is_constant) {
   3006     return MemOperand(base, (constant_key << element_size) + base_offset);
   3007   }
   3008 
   3009   if (base_offset == 0) {
   3010     if (shift_size >= 0) {
   3011       __ sll(scratch0(), key, shift_size);
   3012       __ Addu(scratch0(), base, scratch0());
   3013       return MemOperand(scratch0());
   3014     } else {
   3015       DCHECK_EQ(-1, shift_size);
   3016       __ srl(scratch0(), key, 1);
   3017       __ Addu(scratch0(), base, scratch0());
   3018       return MemOperand(scratch0());
   3019     }
   3020   }
   3021 
   3022   if (shift_size >= 0) {
   3023     __ sll(scratch0(), key, shift_size);
   3024     __ Addu(scratch0(), base, scratch0());
   3025     return MemOperand(scratch0(), base_offset);
   3026   } else {
   3027     DCHECK_EQ(-1, shift_size);
   3028     __ sra(scratch0(), key, 1);
   3029     __ Addu(scratch0(), base, scratch0());
   3030     return MemOperand(scratch0(), base_offset);
   3031   }
   3032 }
   3033 
   3034 
   3035 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3036   DCHECK(ToRegister(instr->context()).is(cp));
   3037   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   3038   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
   3039 
   3040   if (instr->hydrogen()->HasVectorAndSlot()) {
   3041     EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
   3042   }
   3043 
   3044   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
   3045                         isolate(), instr->hydrogen()->language_mode(),
   3046                         instr->hydrogen()->initialization_state()).code();
   3047   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3048 }
   3049 
   3050 
   3051 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3052   Register scratch = scratch0();
   3053   Register temp = scratch1();
   3054   Register result = ToRegister(instr->result());
   3055 
   3056   if (instr->hydrogen()->from_inlined()) {
   3057     __ Subu(result, sp, 2 * kPointerSize);
   3058   } else {
   3059     // Check if the calling frame is an arguments adaptor frame.
   3060     Label done, adapted;
   3061     __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3062     __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
   3063     __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3064 
   3065     // Result is the frame pointer for the frame if not adapted and for the real
   3066     // frame below the adaptor frame if adapted.
   3067     __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
   3068     __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
   3069   }
   3070 }
   3071 
   3072 
   3073 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3074   Register elem = ToRegister(instr->elements());
   3075   Register result = ToRegister(instr->result());
   3076 
   3077   Label done;
   3078 
   3079   // If no arguments adaptor frame the number of arguments is fixed.
   3080   __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
   3081   __ Branch(&done, eq, fp, Operand(elem));
   3082 
   3083   // Arguments adaptor frame present. Get argument length from there.
   3084   __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3085   __ lw(result,
   3086         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3087   __ SmiUntag(result);
   3088 
   3089   // Argument length is in result register.
   3090   __ bind(&done);
   3091 }
   3092 
   3093 
   3094 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3095   Register receiver = ToRegister(instr->receiver());
   3096   Register function = ToRegister(instr->function());
   3097   Register result = ToRegister(instr->result());
   3098   Register scratch = scratch0();
   3099 
   3100   // If the receiver is null or undefined, we have to pass the global
   3101   // object as a receiver to normal functions. Values have to be
   3102   // passed unchanged to builtins and strict-mode functions.
   3103   Label global_object, result_in_receiver;
   3104 
   3105   if (!instr->hydrogen()->known_function()) {
   3106     // Do not transform the receiver to object for strict mode
   3107     // functions.
   3108     __ lw(scratch,
   3109            FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3110     __ lw(scratch,
   3111            FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3112 
   3113     // Do not transform the receiver to object for builtins.
   3114     int32_t strict_mode_function_mask =
   3115         1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
   3116     int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
   3117     __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
   3118     __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
   3119   }
   3120 
   3121   // Normal function. Replace undefined or null with global receiver.
   3122   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
   3123   __ Branch(&global_object, eq, receiver, Operand(scratch));
   3124   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3125   __ Branch(&global_object, eq, receiver, Operand(scratch));
   3126 
   3127   // Deoptimize if the receiver is not a JS object.
   3128   __ SmiTst(receiver, scratch);
   3129   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
   3130 
   3131   __ GetObjectType(receiver, scratch, scratch);
   3132   DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
   3133                Operand(FIRST_JS_RECEIVER_TYPE));
   3134 
   3135   __ Branch(&result_in_receiver);
   3136   __ bind(&global_object);
   3137   __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
   3138   __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
   3139   __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
   3140 
   3141   if (result.is(receiver)) {
   3142     __ bind(&result_in_receiver);
   3143   } else {
   3144     Label result_ok;
   3145     __ Branch(&result_ok);
   3146     __ bind(&result_in_receiver);
   3147     __ mov(result, receiver);
   3148     __ bind(&result_ok);
   3149   }
   3150 }
   3151 
   3152 
   3153 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3154   Register receiver = ToRegister(instr->receiver());
   3155   Register function = ToRegister(instr->function());
   3156   Register length = ToRegister(instr->length());
   3157   Register elements = ToRegister(instr->elements());
   3158   Register scratch = scratch0();
   3159   DCHECK(receiver.is(a0));  // Used for parameter count.
   3160   DCHECK(function.is(a1));  // Required by InvokeFunction.
   3161   DCHECK(ToRegister(instr->result()).is(v0));
   3162 
   3163   // Copy the arguments to this function possibly from the
   3164   // adaptor frame below it.
   3165   const uint32_t kArgumentsLimit = 1 * KB;
   3166   DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
   3167                Operand(kArgumentsLimit));
   3168 
   3169   // Push the receiver and use the register to keep the original
   3170   // number of arguments.
   3171   __ push(receiver);
   3172   __ Move(receiver, length);
   3173   // The arguments are at a one pointer size offset from elements.
   3174   __ Addu(elements, elements, Operand(1 * kPointerSize));
   3175 
   3176   // Loop through the arguments pushing them onto the execution
   3177   // stack.
   3178   Label invoke, loop;
   3179   // length is a small non-negative integer, due to the test above.
   3180   __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
   3181   __ sll(scratch, length, 2);
   3182   __ bind(&loop);
   3183   __ Addu(scratch, elements, scratch);
   3184   __ lw(scratch, MemOperand(scratch));
   3185   __ push(scratch);
   3186   __ Subu(length, length, Operand(1));
   3187   __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
   3188   __ sll(scratch, length, 2);
   3189 
   3190   __ bind(&invoke);
   3191   DCHECK(instr->HasPointerMap());
   3192   LPointerMap* pointers = instr->pointer_map();
   3193   SafepointGenerator safepoint_generator(
   3194       this, pointers, Safepoint::kLazyDeopt);
   3195   // The number of arguments is stored in receiver which is a0, as expected
   3196   // by InvokeFunction.
   3197   ParameterCount actual(receiver);
   3198   __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
   3199                     safepoint_generator);
   3200 }
   3201 
   3202 
   3203 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3204   LOperand* argument = instr->value();
   3205   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3206     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3207   } else {
   3208     Register argument_reg = EmitLoadRegister(argument, at);
   3209     __ push(argument_reg);
   3210   }
   3211 }
   3212 
   3213 
   3214 void LCodeGen::DoDrop(LDrop* instr) {
   3215   __ Drop(instr->count());
   3216 }
   3217 
   3218 
   3219 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3220   Register result = ToRegister(instr->result());
   3221   __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3222 }
   3223 
   3224 
   3225 void LCodeGen::DoContext(LContext* instr) {
   3226   // If there is a non-return use, the context must be moved to a register.
   3227   Register result = ToRegister(instr->result());
   3228   if (info()->IsOptimizing()) {
   3229     __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3230   } else {
   3231     // If there is no frame, the context must be in cp.
   3232     DCHECK(result.is(cp));
   3233   }
   3234 }
   3235 
   3236 
   3237 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3238   DCHECK(ToRegister(instr->context()).is(cp));
   3239   __ li(scratch0(), instr->hydrogen()->pairs());
   3240   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   3241   __ Push(scratch0(), scratch1());
   3242   CallRuntime(Runtime::kDeclareGlobals, instr);
   3243 }
   3244 
   3245 
   3246 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3247                                  int formal_parameter_count, int arity,
   3248                                  LInstruction* instr) {
   3249   bool dont_adapt_arguments =
   3250       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3251   bool can_invoke_directly =
   3252       dont_adapt_arguments || formal_parameter_count == arity;
   3253 
   3254   Register function_reg = a1;
   3255   LPointerMap* pointers = instr->pointer_map();
   3256 
   3257   if (can_invoke_directly) {
   3258     // Change context.
   3259     __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
   3260 
   3261     // Always initialize new target and number of actual arguments.
   3262     __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
   3263     __ li(a0, Operand(arity));
   3264 
   3265     // Invoke function.
   3266     __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
   3267     __ Call(at);
   3268 
   3269     // Set up deoptimization.
   3270     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3271   } else {
   3272     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3273     ParameterCount count(arity);
   3274     ParameterCount expected(formal_parameter_count);
   3275     __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
   3276   }
   3277 }
   3278 
   3279 
   3280 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3281   DCHECK(instr->context() != NULL);
   3282   DCHECK(ToRegister(instr->context()).is(cp));
   3283   Register input = ToRegister(instr->value());
   3284   Register result = ToRegister(instr->result());
   3285   Register scratch = scratch0();
   3286 
   3287   // Deoptimize if not a heap number.
   3288   __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3289   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   3290   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
   3291 
   3292   Label done;
   3293   Register exponent = scratch0();
   3294   scratch = no_reg;
   3295   __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3296   // Check the sign of the argument. If the argument is positive, just
   3297   // return it.
   3298   __ Move(result, input);
   3299   __ And(at, exponent, Operand(HeapNumber::kSignMask));
   3300   __ Branch(&done, eq, at, Operand(zero_reg));
   3301 
   3302   // Input is negative. Reverse its sign.
   3303   // Preserve the value of all registers.
   3304   {
   3305     PushSafepointRegistersScope scope(this);
   3306 
   3307     // Registers were saved at the safepoint, so we can use
   3308     // many scratch registers.
   3309     Register tmp1 = input.is(a1) ? a0 : a1;
   3310     Register tmp2 = input.is(a2) ? a0 : a2;
   3311     Register tmp3 = input.is(a3) ? a0 : a3;
   3312     Register tmp4 = input.is(t0) ? a0 : t0;
   3313 
   3314     // exponent: floating point exponent value.
   3315 
   3316     Label allocated, slow;
   3317     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3318     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3319     __ Branch(&allocated);
   3320 
   3321     // Slow case: Call the runtime system to do the number allocation.
   3322     __ bind(&slow);
   3323 
   3324     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3325                             instr->context());
   3326     // Set the pointer to the new heap number in tmp.
   3327     if (!tmp1.is(v0))
   3328       __ mov(tmp1, v0);
   3329     // Restore input_reg after call to runtime.
   3330     __ LoadFromSafepointRegisterSlot(input, input);
   3331     __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3332 
   3333     __ bind(&allocated);
   3334     // exponent: floating point exponent value.
   3335     // tmp1: allocated heap number.
   3336     __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
   3337     __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3338     __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3339     __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3340 
   3341     __ StoreToSafepointRegisterSlot(tmp1, result);
   3342   }
   3343 
   3344   __ bind(&done);
   3345 }
   3346 
   3347 
   3348 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3349   Register input = ToRegister(instr->value());
   3350   Register result = ToRegister(instr->result());
   3351   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   3352   Label done;
   3353   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
   3354   __ mov(result, input);
   3355   __ subu(result, zero_reg, input);
   3356   // Overflow if result is still negative, i.e. 0x80000000.
   3357   DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
   3358   __ bind(&done);
   3359 }
   3360 
   3361 
   3362 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3363   // Class for deferred case.
   3364   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
   3365    public:
   3366     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3367         : LDeferredCode(codegen), instr_(instr) { }
   3368     void Generate() override {
   3369       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3370     }
   3371     LInstruction* instr() override { return instr_; }
   3372 
   3373    private:
   3374     LMathAbs* instr_;
   3375   };
   3376 
   3377   Representation r = instr->hydrogen()->value()->representation();
   3378   if (r.IsDouble()) {
   3379     FPURegister input = ToDoubleRegister(instr->value());
   3380     FPURegister result = ToDoubleRegister(instr->result());
   3381     __ abs_d(result, input);
   3382   } else if (r.IsSmiOrInteger32()) {
   3383     EmitIntegerMathAbs(instr);
   3384   } else {
   3385     // Representation is tagged.
   3386     DeferredMathAbsTaggedHeapNumber* deferred =
   3387         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3388     Register input = ToRegister(instr->value());
   3389     // Smi check.
   3390     __ JumpIfNotSmi(input, deferred->entry());
   3391     // If smi, handle it directly.
   3392     EmitIntegerMathAbs(instr);
   3393     __ bind(deferred->exit());
   3394   }
   3395 }
   3396 
   3397 
   3398 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3399   DoubleRegister input = ToDoubleRegister(instr->value());
   3400   Register result = ToRegister(instr->result());
   3401   Register scratch1 = scratch0();
   3402   Register except_flag = ToRegister(instr->temp());
   3403 
   3404   __ EmitFPUTruncate(kRoundToMinusInf,
   3405                      result,
   3406                      input,
   3407                      scratch1,
   3408                      double_scratch0(),
   3409                      except_flag);
   3410 
   3411   // Deopt if the operation did not succeed.
   3412   DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
   3413                Operand(zero_reg));
   3414 
   3415   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3416     // Test for -0.
   3417     Label done;
   3418     __ Branch(&done, ne, result, Operand(zero_reg));
   3419     __ Mfhc1(scratch1, input);
   3420     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   3421     DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
   3422                  Operand(zero_reg));
   3423     __ bind(&done);
   3424   }
   3425 }
   3426 
   3427 
   3428 void LCodeGen::DoMathRound(LMathRound* instr) {
   3429   DoubleRegister input = ToDoubleRegister(instr->value());
   3430   Register result = ToRegister(instr->result());
   3431   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3432   Register scratch = scratch0();
   3433   Label done, check_sign_on_zero;
   3434 
   3435   // Extract exponent bits.
   3436   __ Mfhc1(result, input);
   3437   __ Ext(scratch,
   3438          result,
   3439          HeapNumber::kExponentShift,
   3440          HeapNumber::kExponentBits);
   3441 
   3442   // If the number is in ]-0.5, +0.5[, the result is +/- 0.
   3443   Label skip1;
   3444   __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
   3445   __ mov(result, zero_reg);
   3446   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3447     __ Branch(&check_sign_on_zero);
   3448   } else {
   3449     __ Branch(&done);
   3450   }
   3451   __ bind(&skip1);
   3452 
   3453   // The following conversion will not work with numbers
   3454   // outside of ]-2^32, 2^32[.
   3455   DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
   3456                Operand(HeapNumber::kExponentBias + 32));
   3457 
   3458   // Save the original sign for later comparison.
   3459   __ And(scratch, result, Operand(HeapNumber::kSignMask));
   3460 
   3461   __ Move(double_scratch0(), 0.5);
   3462   __ add_d(double_scratch0(), input, double_scratch0());
   3463 
   3464   // Check sign of the result: if the sign changed, the input
   3465   // value was in ]0.5, 0[ and the result should be -0.
   3466   __ Mfhc1(result, double_scratch0());
   3467   __ Xor(result, result, Operand(scratch));
   3468   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3469     // ARM uses 'mi' here, which is 'lt'
   3470     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
   3471   } else {
   3472     Label skip2;
   3473     // ARM uses 'mi' here, which is 'lt'
   3474     // Negating it results in 'ge'
   3475     __ Branch(&skip2, ge, result, Operand(zero_reg));
   3476     __ mov(result, zero_reg);
   3477     __ Branch(&done);
   3478     __ bind(&skip2);
   3479   }
   3480 
   3481   Register except_flag = scratch;
   3482   __ EmitFPUTruncate(kRoundToMinusInf,
   3483                      result,
   3484                      double_scratch0(),
   3485                      at,
   3486                      double_scratch1,
   3487                      except_flag);
   3488 
   3489   DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
   3490                Operand(zero_reg));
   3491 
   3492   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3493     // Test for -0.
   3494     __ Branch(&done, ne, result, Operand(zero_reg));
   3495     __ bind(&check_sign_on_zero);
   3496     __ Mfhc1(scratch, input);
   3497     __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
   3498     DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
   3499                  Operand(zero_reg));
   3500   }
   3501   __ bind(&done);
   3502 }
   3503 
   3504 
   3505 void LCodeGen::DoMathFround(LMathFround* instr) {
   3506   DoubleRegister input = ToDoubleRegister(instr->value());
   3507   DoubleRegister result = ToDoubleRegister(instr->result());
   3508   __ cvt_s_d(result.low(), input);
   3509   __ cvt_d_s(result, result.low());
   3510 }
   3511 
   3512 
   3513 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3514   DoubleRegister input = ToDoubleRegister(instr->value());
   3515   DoubleRegister result = ToDoubleRegister(instr->result());
   3516   __ sqrt_d(result, input);
   3517 }
   3518 
   3519 
   3520 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3521   DoubleRegister input = ToDoubleRegister(instr->value());
   3522   DoubleRegister result = ToDoubleRegister(instr->result());
   3523   DoubleRegister temp = ToDoubleRegister(instr->temp());
   3524 
   3525   DCHECK(!input.is(result));
   3526 
   3527   // Note that according to ECMA-262 15.8.2.13:
   3528   // Math.pow(-Infinity, 0.5) == Infinity
   3529   // Math.sqrt(-Infinity) == NaN
   3530   Label done;
   3531   __ Move(temp, static_cast<double>(-V8_INFINITY));
   3532   __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
   3533   // Set up Infinity in the delay slot.
   3534   // result is overwritten if the branch is not taken.
   3535   __ neg_d(result, temp);
   3536 
   3537   // Add +0 to convert -0 to +0.
   3538   __ add_d(result, input, kDoubleRegZero);
   3539   __ sqrt_d(result, result);
   3540   __ bind(&done);
   3541 }
   3542 
   3543 
   3544 void LCodeGen::DoPower(LPower* instr) {
   3545   Representation exponent_type = instr->hydrogen()->right()->representation();
   3546   // Having marked this as a call, we can use any registers.
   3547   // Just make sure that the input/output registers are the expected ones.
   3548   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3549   DCHECK(!instr->right()->IsDoubleRegister() ||
   3550          ToDoubleRegister(instr->right()).is(f4));
   3551   DCHECK(!instr->right()->IsRegister() ||
   3552          ToRegister(instr->right()).is(tagged_exponent));
   3553   DCHECK(ToDoubleRegister(instr->left()).is(f2));
   3554   DCHECK(ToDoubleRegister(instr->result()).is(f0));
   3555 
   3556   if (exponent_type.IsSmi()) {
   3557     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3558     __ CallStub(&stub);
   3559   } else if (exponent_type.IsTagged()) {
   3560     Label no_deopt;
   3561     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3562     DCHECK(!t3.is(tagged_exponent));
   3563     __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
   3564     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   3565     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
   3566     __ bind(&no_deopt);
   3567     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3568     __ CallStub(&stub);
   3569   } else if (exponent_type.IsInteger32()) {
   3570     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3571     __ CallStub(&stub);
   3572   } else {
   3573     DCHECK(exponent_type.IsDouble());
   3574     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3575     __ CallStub(&stub);
   3576   }
   3577 }
   3578 
   3579 
   3580 void LCodeGen::DoMathExp(LMathExp* instr) {
   3581   DoubleRegister input = ToDoubleRegister(instr->value());
   3582   DoubleRegister result = ToDoubleRegister(instr->result());
   3583   DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
   3584   DoubleRegister double_scratch2 = double_scratch0();
   3585   Register temp1 = ToRegister(instr->temp1());
   3586   Register temp2 = ToRegister(instr->temp2());
   3587 
   3588   MathExpGenerator::EmitMathExp(
   3589       masm(), input, result, double_scratch1, double_scratch2,
   3590       temp1, temp2, scratch0());
   3591 }
   3592 
   3593 
   3594 void LCodeGen::DoMathLog(LMathLog* instr) {
   3595   __ PrepareCallCFunction(0, 1, scratch0());
   3596   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3597   __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
   3598                    0, 1);
   3599   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3600 }
   3601 
   3602 
   3603 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3604   Register input = ToRegister(instr->value());
   3605   Register result = ToRegister(instr->result());
   3606   __ Clz(result, input);
   3607 }
   3608 
   3609 
   3610 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3611   DCHECK(ToRegister(instr->context()).is(cp));
   3612   DCHECK(ToRegister(instr->function()).is(a1));
   3613   DCHECK(instr->HasPointerMap());
   3614 
   3615   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   3616   if (known_function.is_null()) {
   3617     LPointerMap* pointers = instr->pointer_map();
   3618     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3619     ParameterCount count(instr->arity());
   3620     __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
   3621   } else {
   3622     CallKnownFunction(known_function,
   3623                       instr->hydrogen()->formal_parameter_count(),
   3624                       instr->arity(), instr);
   3625   }
   3626 }
   3627 
   3628 
   3629 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3630   DCHECK(ToRegister(instr->result()).is(v0));
   3631 
   3632   if (instr->hydrogen()->IsTailCall()) {
   3633     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
   3634 
   3635     if (instr->target()->IsConstantOperand()) {
   3636       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3637       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3638       __ Jump(code, RelocInfo::CODE_TARGET);
   3639     } else {
   3640       DCHECK(instr->target()->IsRegister());
   3641       Register target = ToRegister(instr->target());
   3642       __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3643       __ Jump(target);
   3644     }
   3645   } else {
   3646     LPointerMap* pointers = instr->pointer_map();
   3647     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3648 
   3649     if (instr->target()->IsConstantOperand()) {
   3650       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3651       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3652       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   3653       __ Call(code, RelocInfo::CODE_TARGET);
   3654     } else {
   3655       DCHECK(instr->target()->IsRegister());
   3656       Register target = ToRegister(instr->target());
   3657       generator.BeforeCall(__ CallSize(target));
   3658       __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3659       __ Call(target);
   3660     }
   3661     generator.AfterCall();
   3662   }
   3663 }
   3664 
   3665 
   3666 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
   3667   DCHECK(ToRegister(instr->function()).is(a1));
   3668   DCHECK(ToRegister(instr->result()).is(v0));
   3669 
   3670   // Change context.
   3671   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
   3672 
   3673   // Always initialize new target and number of actual arguments.
   3674   __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
   3675   __ li(a0, Operand(instr->arity()));
   3676 
   3677   // Load the code entry address
   3678   __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
   3679   __ Call(at);
   3680 
   3681   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3682 }
   3683 
   3684 
   3685 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   3686   DCHECK(ToRegister(instr->context()).is(cp));
   3687   DCHECK(ToRegister(instr->function()).is(a1));
   3688   DCHECK(ToRegister(instr->result()).is(v0));
   3689 
   3690   int arity = instr->arity();
   3691   ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
   3692   if (instr->hydrogen()->HasVectorAndSlot()) {
   3693     Register slot_register = ToRegister(instr->temp_slot());
   3694     Register vector_register = ToRegister(instr->temp_vector());
   3695     DCHECK(slot_register.is(a3));
   3696     DCHECK(vector_register.is(a2));
   3697 
   3698     AllowDeferredHandleDereference vector_structure_check;
   3699     Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   3700     int index = vector->GetIndex(instr->hydrogen()->slot());
   3701 
   3702     __ li(vector_register, vector);
   3703     __ li(slot_register, Operand(Smi::FromInt(index)));
   3704 
   3705     Handle<Code> ic =
   3706         CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
   3707     CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3708   } else {
   3709     __ li(a0, Operand(arity));
   3710     CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
   3711   }
   3712 }
   3713 
   3714 
   3715 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3716   DCHECK(ToRegister(instr->context()).is(cp));
   3717   DCHECK(ToRegister(instr->constructor()).is(a1));
   3718   DCHECK(ToRegister(instr->result()).is(v0));
   3719 
   3720   __ li(a0, Operand(instr->arity()));
   3721   if (instr->arity() == 1) {
   3722     // We only need the allocation site for the case we have a length argument.
   3723     // The case may bail out to the runtime, which will determine the correct
   3724     // elements kind with the site.
   3725     __ li(a2, instr->hydrogen()->site());
   3726   } else {
   3727     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
   3728   }
   3729   ElementsKind kind = instr->hydrogen()->elements_kind();
   3730   AllocationSiteOverrideMode override_mode =
   3731       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3732           ? DISABLE_ALLOCATION_SITES
   3733           : DONT_OVERRIDE;
   3734 
   3735   if (instr->arity() == 0) {
   3736     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3737     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3738   } else if (instr->arity() == 1) {
   3739     Label done;
   3740     if (IsFastPackedElementsKind(kind)) {
   3741       Label packed_case;
   3742       // We might need a change here,
   3743       // look at the first argument.
   3744       __ lw(t1, MemOperand(sp, 0));
   3745       __ Branch(&packed_case, eq, t1, Operand(zero_reg));
   3746 
   3747       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   3748       ArraySingleArgumentConstructorStub stub(isolate(),
   3749                                               holey_kind,
   3750                                               override_mode);
   3751       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3752       __ jmp(&done);
   3753       __ bind(&packed_case);
   3754     }
   3755 
   3756     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   3757     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3758     __ bind(&done);
   3759   } else {
   3760     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
   3761     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3762   }
   3763 }
   3764 
   3765 
   3766 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   3767   CallRuntime(instr->function(), instr->arity(), instr);
   3768 }
   3769 
   3770 
   3771 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   3772   Register function = ToRegister(instr->function());
   3773   Register code_object = ToRegister(instr->code_object());
   3774   __ Addu(code_object, code_object,
   3775           Operand(Code::kHeaderSize - kHeapObjectTag));
   3776   __ sw(code_object,
   3777         FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   3778 }
   3779 
   3780 
   3781 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   3782   Register result = ToRegister(instr->result());
   3783   Register base = ToRegister(instr->base_object());
   3784   if (instr->offset()->IsConstantOperand()) {
   3785     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   3786     __ Addu(result, base, Operand(ToInteger32(offset)));
   3787   } else {
   3788     Register offset = ToRegister(instr->offset());
   3789     __ Addu(result, base, offset);
   3790   }
   3791 }
   3792 
   3793 
   3794 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   3795   Representation representation = instr->representation();
   3796 
   3797   Register object = ToRegister(instr->object());
   3798   Register scratch = scratch0();
   3799   HObjectAccess access = instr->hydrogen()->access();
   3800   int offset = access.offset();
   3801 
   3802   if (access.IsExternalMemory()) {
   3803     Register value = ToRegister(instr->value());
   3804     MemOperand operand = MemOperand(object, offset);
   3805     __ Store(value, operand, representation);
   3806     return;
   3807   }
   3808 
   3809   __ AssertNotSmi(object);
   3810 
   3811   DCHECK(!representation.IsSmi() ||
   3812          !instr->value()->IsConstantOperand() ||
   3813          IsSmi(LConstantOperand::cast(instr->value())));
   3814   if (representation.IsDouble()) {
   3815     DCHECK(access.IsInobject());
   3816     DCHECK(!instr->hydrogen()->has_transition());
   3817     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   3818     DoubleRegister value = ToDoubleRegister(instr->value());
   3819     __ sdc1(value, FieldMemOperand(object, offset));
   3820     return;
   3821   }
   3822 
   3823   if (instr->hydrogen()->has_transition()) {
   3824     Handle<Map> transition = instr->hydrogen()->transition_map();
   3825     AddDeprecationDependency(transition);
   3826     __ li(scratch, Operand(transition));
   3827     __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   3828     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   3829       Register temp = ToRegister(instr->temp());
   3830       // Update the write barrier for the map field.
   3831       __ RecordWriteForMap(object,
   3832                            scratch,
   3833                            temp,
   3834                            GetRAState(),
   3835                            kSaveFPRegs);
   3836     }
   3837   }
   3838 
   3839   // Do the store.
   3840   Register value = ToRegister(instr->value());
   3841   if (access.IsInobject()) {
   3842     MemOperand operand = FieldMemOperand(object, offset);
   3843     __ Store(value, operand, representation);
   3844     if (instr->hydrogen()->NeedsWriteBarrier()) {
   3845       // Update the write barrier for the object for in-object properties.
   3846       __ RecordWriteField(object,
   3847                           offset,
   3848                           value,
   3849                           scratch,
   3850                           GetRAState(),
   3851                           kSaveFPRegs,
   3852                           EMIT_REMEMBERED_SET,
   3853                           instr->hydrogen()->SmiCheckForWriteBarrier(),
   3854                           instr->hydrogen()->PointersToHereCheckForValue());
   3855     }
   3856   } else {
   3857     __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
   3858     MemOperand operand = FieldMemOperand(scratch, offset);
   3859     __ Store(value, operand, representation);
   3860     if (instr->hydrogen()->NeedsWriteBarrier()) {
   3861       // Update the write barrier for the properties array.
   3862       // object is used as a scratch register.
   3863       __ RecordWriteField(scratch,
   3864                           offset,
   3865                           value,
   3866                           object,
   3867                           GetRAState(),
   3868                           kSaveFPRegs,
   3869                           EMIT_REMEMBERED_SET,
   3870                           instr->hydrogen()->SmiCheckForWriteBarrier(),
   3871                           instr->hydrogen()->PointersToHereCheckForValue());
   3872     }
   3873   }
   3874 }
   3875 
   3876 
   3877 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   3878   DCHECK(ToRegister(instr->context()).is(cp));
   3879   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   3880   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   3881 
   3882   if (instr->hydrogen()->HasVectorAndSlot()) {
   3883     EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
   3884   }
   3885 
   3886   __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
   3887   Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
   3888                         isolate(), instr->language_mode(),
   3889                         instr->hydrogen()->initialization_state()).code();
   3890   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3891 }
   3892 
   3893 
   3894 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   3895   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
   3896   Operand operand(0);
   3897   Register reg;
   3898   if (instr->index()->IsConstantOperand()) {
   3899     operand = ToOperand(instr->index());
   3900     reg = ToRegister(instr->length());
   3901     cc = CommuteCondition(cc);
   3902   } else {
   3903     reg = ToRegister(instr->index());
   3904     operand = ToOperand(instr->length());
   3905   }
   3906   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   3907     Label done;
   3908     __ Branch(&done, NegateCondition(cc), reg, operand);
   3909     __ stop("eliminated bounds check failed");
   3910     __ bind(&done);
   3911   } else {
   3912     DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
   3913   }
   3914 }
   3915 
   3916 
   3917 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   3918   Register external_pointer = ToRegister(instr->elements());
   3919   Register key = no_reg;
   3920   ElementsKind elements_kind = instr->elements_kind();
   3921   bool key_is_constant = instr->key()->IsConstantOperand();
   3922   int constant_key = 0;
   3923   if (key_is_constant) {
   3924     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3925     if (constant_key & 0xF0000000) {
   3926       Abort(kArrayIndexConstantValueTooBig);
   3927     }
   3928   } else {
   3929     key = ToRegister(instr->key());
   3930   }
   3931   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3932   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3933       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3934   int base_offset = instr->base_offset();
   3935 
   3936   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   3937     Register address = scratch0();
   3938     FPURegister value(ToDoubleRegister(instr->value()));
   3939     if (key_is_constant) {
   3940       if (constant_key != 0) {
   3941         __ Addu(address, external_pointer,
   3942                 Operand(constant_key << element_size_shift));
   3943       } else {
   3944         address = external_pointer;
   3945       }
   3946     } else {
   3947       __ sll(address, key, shift_size);
   3948       __ Addu(address, external_pointer, address);
   3949     }
   3950 
   3951     if (elements_kind == FLOAT32_ELEMENTS) {
   3952       __ cvt_s_d(double_scratch0(), value);
   3953       __ swc1(double_scratch0(), MemOperand(address, base_offset));
   3954     } else {  // Storing doubles, not floats.
   3955       __ sdc1(value, MemOperand(address, base_offset));
   3956     }
   3957   } else {
   3958     Register value(ToRegister(instr->value()));
   3959     MemOperand mem_operand = PrepareKeyedOperand(
   3960         key, external_pointer, key_is_constant, constant_key,
   3961         element_size_shift, shift_size,
   3962         base_offset);
   3963     switch (elements_kind) {
   3964       case UINT8_ELEMENTS:
   3965       case UINT8_CLAMPED_ELEMENTS:
   3966       case INT8_ELEMENTS:
   3967         __ sb(value, mem_operand);
   3968         break;
   3969       case INT16_ELEMENTS:
   3970       case UINT16_ELEMENTS:
   3971         __ sh(value, mem_operand);
   3972         break;
   3973       case INT32_ELEMENTS:
   3974       case UINT32_ELEMENTS:
   3975         __ sw(value, mem_operand);
   3976         break;
   3977       case FLOAT32_ELEMENTS:
   3978       case FLOAT64_ELEMENTS:
   3979       case FAST_DOUBLE_ELEMENTS:
   3980       case FAST_ELEMENTS:
   3981       case FAST_SMI_ELEMENTS:
   3982       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3983       case FAST_HOLEY_ELEMENTS:
   3984       case FAST_HOLEY_SMI_ELEMENTS:
   3985       case DICTIONARY_ELEMENTS:
   3986       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   3987       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   3988         UNREACHABLE();
   3989         break;
   3990     }
   3991   }
   3992 }
   3993 
   3994 
   3995 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   3996   DoubleRegister value = ToDoubleRegister(instr->value());
   3997   Register elements = ToRegister(instr->elements());
   3998   Register scratch = scratch0();
   3999   Register scratch_1 = scratch1();
   4000   DoubleRegister double_scratch = double_scratch0();
   4001   bool key_is_constant = instr->key()->IsConstantOperand();
   4002   int base_offset = instr->base_offset();
   4003   Label not_nan, done;
   4004 
   4005   // Calculate the effective address of the slot in the array to store the
   4006   // double value.
   4007   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4008   if (key_is_constant) {
   4009     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4010     if (constant_key & 0xF0000000) {
   4011       Abort(kArrayIndexConstantValueTooBig);
   4012     }
   4013     __ Addu(scratch, elements,
   4014            Operand((constant_key << element_size_shift) + base_offset));
   4015   } else {
   4016     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4017         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4018     __ Addu(scratch, elements, Operand(base_offset));
   4019     __ sll(at, ToRegister(instr->key()), shift_size);
   4020     __ Addu(scratch, scratch, at);
   4021   }
   4022 
   4023   if (instr->NeedsCanonicalization()) {
   4024     Label is_nan;
   4025     // Check for NaN. All NaNs must be canonicalized.
   4026     __ BranchF(NULL, &is_nan, eq, value, value);
   4027     __ Branch(&not_nan);
   4028 
   4029     // Only load canonical NaN if the comparison above set the overflow.
   4030     __ bind(&is_nan);
   4031     __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
   4032     __ ldc1(double_scratch,
   4033             FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
   4034     __ sdc1(double_scratch, MemOperand(scratch, 0));
   4035     __ Branch(&done);
   4036   }
   4037 
   4038   __ bind(&not_nan);
   4039   __ sdc1(value, MemOperand(scratch, 0));
   4040   __ bind(&done);
   4041 }
   4042 
   4043 
   4044 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4045   Register value = ToRegister(instr->value());
   4046   Register elements = ToRegister(instr->elements());
   4047   Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
   4048       : no_reg;
   4049   Register scratch = scratch0();
   4050   Register store_base = scratch;
   4051   int offset = instr->base_offset();
   4052 
   4053   // Do the store.
   4054   if (instr->key()->IsConstantOperand()) {
   4055     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   4056     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4057     offset += ToInteger32(const_operand) * kPointerSize;
   4058     store_base = elements;
   4059   } else {
   4060     // Even though the HLoadKeyed instruction forces the input
   4061     // representation for the key to be an integer, the input gets replaced
   4062     // during bound check elimination with the index argument to the bounds
   4063     // check, which can be tagged, so that case must be handled here, too.
   4064     if (instr->hydrogen()->key()->representation().IsSmi()) {
   4065       __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
   4066       __ addu(scratch, elements, scratch);
   4067     } else {
   4068       __ sll(scratch, key, kPointerSizeLog2);
   4069       __ addu(scratch, elements, scratch);
   4070     }
   4071   }
   4072   __ sw(value, MemOperand(store_base, offset));
   4073 
   4074   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4075     SmiCheck check_needed =
   4076         instr->hydrogen()->value()->type().IsHeapObject()
   4077             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4078     // Compute address of modified element and store it into key register.
   4079     __ Addu(key, store_base, Operand(offset));
   4080     __ RecordWrite(elements,
   4081                    key,
   4082                    value,
   4083                    GetRAState(),
   4084                    kSaveFPRegs,
   4085                    EMIT_REMEMBERED_SET,
   4086                    check_needed,
   4087                    instr->hydrogen()->PointersToHereCheckForValue());
   4088   }
   4089 }
   4090 
   4091 
   4092 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4093   // By cases: external, fast double
   4094   if (instr->is_fixed_typed_array()) {
   4095     DoStoreKeyedExternalArray(instr);
   4096   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4097     DoStoreKeyedFixedDoubleArray(instr);
   4098   } else {
   4099     DoStoreKeyedFixedArray(instr);
   4100   }
   4101 }
   4102 
   4103 
   4104 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4105   DCHECK(ToRegister(instr->context()).is(cp));
   4106   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   4107   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   4108   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   4109 
   4110   if (instr->hydrogen()->HasVectorAndSlot()) {
   4111     EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
   4112   }
   4113 
   4114   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
   4115                         isolate(), instr->language_mode(),
   4116                         instr->hydrogen()->initialization_state()).code();
   4117   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4118 }
   4119 
   4120 
   4121 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   4122   class DeferredMaybeGrowElements final : public LDeferredCode {
   4123    public:
   4124     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   4125         : LDeferredCode(codegen), instr_(instr) {}
   4126     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   4127     LInstruction* instr() override { return instr_; }
   4128 
   4129    private:
   4130     LMaybeGrowElements* instr_;
   4131   };
   4132 
   4133   Register result = v0;
   4134   DeferredMaybeGrowElements* deferred =
   4135       new (zone()) DeferredMaybeGrowElements(this, instr);
   4136   LOperand* key = instr->key();
   4137   LOperand* current_capacity = instr->current_capacity();
   4138 
   4139   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   4140   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   4141   DCHECK(key->IsConstantOperand() || key->IsRegister());
   4142   DCHECK(current_capacity->IsConstantOperand() ||
   4143          current_capacity->IsRegister());
   4144 
   4145   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   4146     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4147     int32_t constant_capacity =
   4148         ToInteger32(LConstantOperand::cast(current_capacity));
   4149     if (constant_key >= constant_capacity) {
   4150       // Deferred case.
   4151       __ jmp(deferred->entry());
   4152     }
   4153   } else if (key->IsConstantOperand()) {
   4154     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4155     __ Branch(deferred->entry(), le, ToRegister(current_capacity),
   4156               Operand(constant_key));
   4157   } else if (current_capacity->IsConstantOperand()) {
   4158     int32_t constant_capacity =
   4159         ToInteger32(LConstantOperand::cast(current_capacity));
   4160     __ Branch(deferred->entry(), ge, ToRegister(key),
   4161               Operand(constant_capacity));
   4162   } else {
   4163     __ Branch(deferred->entry(), ge, ToRegister(key),
   4164               Operand(ToRegister(current_capacity)));
   4165   }
   4166 
   4167   if (instr->elements()->IsRegister()) {
   4168     __ mov(result, ToRegister(instr->elements()));
   4169   } else {
   4170     __ lw(result, ToMemOperand(instr->elements()));
   4171   }
   4172 
   4173   __ bind(deferred->exit());
   4174 }
   4175 
   4176 
   4177 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   4178   // TODO(3095996): Get rid of this. For now, we need to make the
   4179   // result register contain a valid pointer because it is already
   4180   // contained in the register pointer map.
   4181   Register result = v0;
   4182   __ mov(result, zero_reg);
   4183 
   4184   // We have to call a stub.
   4185   {
   4186     PushSafepointRegistersScope scope(this);
   4187     if (instr->object()->IsRegister()) {
   4188       __ mov(result, ToRegister(instr->object()));
   4189     } else {
   4190       __ lw(result, ToMemOperand(instr->object()));
   4191     }
   4192 
   4193     LOperand* key = instr->key();
   4194     if (key->IsConstantOperand()) {
   4195       __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
   4196     } else {
   4197       __ mov(a3, ToRegister(key));
   4198       __ SmiTag(a3);
   4199     }
   4200 
   4201     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
   4202                                instr->hydrogen()->kind());
   4203     __ mov(a0, result);
   4204     __ CallStub(&stub);
   4205     RecordSafepointWithLazyDeopt(
   4206         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4207     __ StoreToSafepointRegisterSlot(result, result);
   4208   }
   4209 
   4210   // Deopt on smi, which means the elements array changed to dictionary mode.
   4211   __ SmiTst(result, at);
   4212   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
   4213 }
   4214 
   4215 
   4216 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4217   Register object_reg = ToRegister(instr->object());
   4218   Register scratch = scratch0();
   4219 
   4220   Handle<Map> from_map = instr->original_map();
   4221   Handle<Map> to_map = instr->transitioned_map();
   4222   ElementsKind from_kind = instr->from_kind();
   4223   ElementsKind to_kind = instr->to_kind();
   4224 
   4225   Label not_applicable;
   4226   __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4227   __ Branch(&not_applicable, ne, scratch, Operand(from_map));
   4228 
   4229   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4230     Register new_map_reg = ToRegister(instr->new_map_temp());
   4231     __ li(new_map_reg, Operand(to_map));
   4232     __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4233     // Write barrier.
   4234     __ RecordWriteForMap(object_reg,
   4235                          new_map_reg,
   4236                          scratch,
   4237                          GetRAState(),
   4238                          kDontSaveFPRegs);
   4239   } else {
   4240     DCHECK(object_reg.is(a0));
   4241     DCHECK(ToRegister(instr->context()).is(cp));
   4242     PushSafepointRegistersScope scope(this);
   4243     __ li(a1, Operand(to_map));
   4244     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
   4245     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
   4246     __ CallStub(&stub);
   4247     RecordSafepointWithRegisters(
   4248         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   4249   }
   4250   __ bind(&not_applicable);
   4251 }
   4252 
   4253 
   4254 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4255   Register object = ToRegister(instr->object());
   4256   Register temp = ToRegister(instr->temp());
   4257   Label no_memento_found;
   4258   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
   4259                                      ne, &no_memento_found);
   4260   DeoptimizeIf(al, instr);
   4261   __ bind(&no_memento_found);
   4262 }
   4263 
   4264 
   4265 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4266   DCHECK(ToRegister(instr->context()).is(cp));
   4267   DCHECK(ToRegister(instr->left()).is(a1));
   4268   DCHECK(ToRegister(instr->right()).is(a0));
   4269   StringAddStub stub(isolate(),
   4270                      instr->hydrogen()->flags(),
   4271                      instr->hydrogen()->pretenure_flag());
   4272   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4273 }
   4274 
   4275 
   4276 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4277   class DeferredStringCharCodeAt final : public LDeferredCode {
   4278    public:
   4279     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4280         : LDeferredCode(codegen), instr_(instr) { }
   4281     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4282     LInstruction* instr() override { return instr_; }
   4283 
   4284    private:
   4285     LStringCharCodeAt* instr_;
   4286   };
   4287 
   4288   DeferredStringCharCodeAt* deferred =
   4289       new(zone()) DeferredStringCharCodeAt(this, instr);
   4290   StringCharLoadGenerator::Generate(masm(),
   4291                                     ToRegister(instr->string()),
   4292                                     ToRegister(instr->index()),
   4293                                     ToRegister(instr->result()),
   4294                                     deferred->entry());
   4295   __ bind(deferred->exit());
   4296 }
   4297 
   4298 
   4299 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4300   Register string = ToRegister(instr->string());
   4301   Register result = ToRegister(instr->result());
   4302   Register scratch = scratch0();
   4303 
   4304   // TODO(3095996): Get rid of this. For now, we need to make the
   4305   // result register contain a valid pointer because it is already
   4306   // contained in the register pointer map.
   4307   __ mov(result, zero_reg);
   4308 
   4309   PushSafepointRegistersScope scope(this);
   4310   __ push(string);
   4311   // Push the index as a smi. This is safe because of the checks in
   4312   // DoStringCharCodeAt above.
   4313   if (instr->index()->IsConstantOperand()) {
   4314     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4315     __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
   4316     __ push(scratch);
   4317   } else {
   4318     Register index = ToRegister(instr->index());
   4319     __ SmiTag(index);
   4320     __ push(index);
   4321   }
   4322   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
   4323                           instr->context());
   4324   __ AssertSmi(v0);
   4325   __ SmiUntag(v0);
   4326   __ StoreToSafepointRegisterSlot(v0, result);
   4327 }
   4328 
   4329 
   4330 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4331   class DeferredStringCharFromCode final : public LDeferredCode {
   4332    public:
   4333     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4334         : LDeferredCode(codegen), instr_(instr) { }
   4335     void Generate() override {
   4336       codegen()->DoDeferredStringCharFromCode(instr_);
   4337     }
   4338     LInstruction* instr() override { return instr_; }
   4339 
   4340    private:
   4341     LStringCharFromCode* instr_;
   4342   };
   4343 
   4344   DeferredStringCharFromCode* deferred =
   4345       new(zone()) DeferredStringCharFromCode(this, instr);
   4346 
   4347   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   4348   Register char_code = ToRegister(instr->char_code());
   4349   Register result = ToRegister(instr->result());
   4350   Register scratch = scratch0();
   4351   DCHECK(!char_code.is(result));
   4352 
   4353   __ Branch(deferred->entry(), hi,
   4354             char_code, Operand(String::kMaxOneByteCharCode));
   4355   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4356   __ sll(scratch, char_code, kPointerSizeLog2);
   4357   __ Addu(result, result, scratch);
   4358   __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4359   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   4360   __ Branch(deferred->entry(), eq, result, Operand(scratch));
   4361   __ bind(deferred->exit());
   4362 }
   4363 
   4364 
   4365 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4366   Register char_code = ToRegister(instr->char_code());
   4367   Register result = ToRegister(instr->result());
   4368 
   4369   // TODO(3095996): Get rid of this. For now, we need to make the
   4370   // result register contain a valid pointer because it is already
   4371   // contained in the register pointer map.
   4372   __ mov(result, zero_reg);
   4373 
   4374   PushSafepointRegistersScope scope(this);
   4375   __ SmiTag(char_code);
   4376   __ push(char_code);
   4377   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   4378                           instr->context());
   4379   __ StoreToSafepointRegisterSlot(v0, result);
   4380 }
   4381 
   4382 
   4383 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4384   LOperand* input = instr->value();
   4385   DCHECK(input->IsRegister() || input->IsStackSlot());
   4386   LOperand* output = instr->result();
   4387   DCHECK(output->IsDoubleRegister());
   4388   FPURegister single_scratch = double_scratch0().low();
   4389   if (input->IsStackSlot()) {
   4390     Register scratch = scratch0();
   4391     __ lw(scratch, ToMemOperand(input));
   4392     __ mtc1(scratch, single_scratch);
   4393   } else {
   4394     __ mtc1(ToRegister(input), single_scratch);
   4395   }
   4396   __ cvt_d_w(ToDoubleRegister(output), single_scratch);
   4397 }
   4398 
   4399 
   4400 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4401   LOperand* input = instr->value();
   4402   LOperand* output = instr->result();
   4403 
   4404   __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22);
   4405 }
   4406 
   4407 
   4408 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4409   class DeferredNumberTagI final : public LDeferredCode {
   4410    public:
   4411     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4412         : LDeferredCode(codegen), instr_(instr) { }
   4413     void Generate() override {
   4414       codegen()->DoDeferredNumberTagIU(instr_,
   4415                                        instr_->value(),
   4416                                        instr_->temp1(),
   4417                                        instr_->temp2(),
   4418                                        SIGNED_INT32);
   4419     }
   4420     LInstruction* instr() override { return instr_; }
   4421 
   4422    private:
   4423     LNumberTagI* instr_;
   4424   };
   4425 
   4426   Register src = ToRegister(instr->value());
   4427   Register dst = ToRegister(instr->result());
   4428   Register overflow = scratch0();
   4429 
   4430   DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
   4431   __ SmiTagCheckOverflow(dst, src, overflow);
   4432   __ BranchOnOverflow(deferred->entry(), overflow);
   4433   __ bind(deferred->exit());
   4434 }
   4435 
   4436 
   4437 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4438   class DeferredNumberTagU final : public LDeferredCode {
   4439    public:
   4440     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4441         : LDeferredCode(codegen), instr_(instr) { }
   4442     void Generate() override {
   4443       codegen()->DoDeferredNumberTagIU(instr_,
   4444                                        instr_->value(),
   4445                                        instr_->temp1(),
   4446                                        instr_->temp2(),
   4447                                        UNSIGNED_INT32);
   4448     }
   4449     LInstruction* instr() override { return instr_; }
   4450 
   4451    private:
   4452     LNumberTagU* instr_;
   4453   };
   4454 
   4455   Register input = ToRegister(instr->value());
   4456   Register result = ToRegister(instr->result());
   4457 
   4458   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4459   __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
   4460   __ SmiTag(result, input);
   4461   __ bind(deferred->exit());
   4462 }
   4463 
   4464 
   4465 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
   4466                                      LOperand* value,
   4467                                      LOperand* temp1,
   4468                                      LOperand* temp2,
   4469                                      IntegerSignedness signedness) {
   4470   Label done, slow;
   4471   Register src = ToRegister(value);
   4472   Register dst = ToRegister(instr->result());
   4473   Register tmp1 = scratch0();
   4474   Register tmp2 = ToRegister(temp1);
   4475   Register tmp3 = ToRegister(temp2);
   4476   DoubleRegister dbl_scratch = double_scratch0();
   4477 
   4478   if (signedness == SIGNED_INT32) {
   4479     // There was overflow, so bits 30 and 31 of the original integer
   4480     // disagree. Try to allocate a heap number in new space and store
   4481     // the value in there. If that fails, call the runtime system.
   4482     if (dst.is(src)) {
   4483       __ SmiUntag(src, dst);
   4484       __ Xor(src, src, Operand(0x80000000));
   4485     }
   4486     __ mtc1(src, dbl_scratch);
   4487     __ cvt_d_w(dbl_scratch, dbl_scratch);
   4488   } else {
   4489     __ Cvt_d_uw(dbl_scratch, src, f22);
   4490   }
   4491 
   4492   if (FLAG_inline_new) {
   4493     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
   4494     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
   4495     __ Branch(&done);
   4496   }
   4497 
   4498   // Slow case: Call the runtime system to do the number allocation.
   4499   __ bind(&slow);
   4500   {
   4501     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4502     // result register is stored, as this register is in the pointer map, but
   4503     // contains an integer value.
   4504     __ mov(dst, zero_reg);
   4505 
   4506     // Preserve the value of all registers.
   4507     PushSafepointRegistersScope scope(this);
   4508 
   4509     // NumberTagI and NumberTagD use the context from the frame, rather than
   4510     // the environment's HContext or HInlinedContext value.
   4511     // They only call Runtime::kAllocateHeapNumber.
   4512     // The corresponding HChange instructions are added in a phase that does
   4513     // not have easy access to the local context.
   4514     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4515     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4516     RecordSafepointWithRegisters(
   4517         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4518     __ Subu(v0, v0, kHeapObjectTag);
   4519     __ StoreToSafepointRegisterSlot(v0, dst);
   4520   }
   4521 
   4522 
   4523   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4524   // number.
   4525   __ bind(&done);
   4526   __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
   4527   __ Addu(dst, dst, kHeapObjectTag);
   4528 }
   4529 
   4530 
   4531 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4532   class DeferredNumberTagD final : public LDeferredCode {
   4533    public:
   4534     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4535         : LDeferredCode(codegen), instr_(instr) { }
   4536     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
   4537     LInstruction* instr() override { return instr_; }
   4538 
   4539    private:
   4540     LNumberTagD* instr_;
   4541   };
   4542 
   4543   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   4544   Register scratch = scratch0();
   4545   Register reg = ToRegister(instr->result());
   4546   Register temp1 = ToRegister(instr->temp());
   4547   Register temp2 = ToRegister(instr->temp2());
   4548 
   4549   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4550   if (FLAG_inline_new) {
   4551     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4552     // We want the untagged address first for performance
   4553     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
   4554                           DONT_TAG_RESULT);
   4555   } else {
   4556     __ Branch(deferred->entry());
   4557   }
   4558   __ bind(deferred->exit());
   4559   __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
   4560   // Now that we have finished with the object's real address tag it
   4561   __ Addu(reg, reg, kHeapObjectTag);
   4562 }
   4563 
   4564 
   4565 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4566   // TODO(3095996): Get rid of this. For now, we need to make the
   4567   // result register contain a valid pointer because it is already
   4568   // contained in the register pointer map.
   4569   Register reg = ToRegister(instr->result());
   4570   __ mov(reg, zero_reg);
   4571 
   4572   PushSafepointRegistersScope scope(this);
   4573   // NumberTagI and NumberTagD use the context from the frame, rather than
   4574   // the environment's HContext or HInlinedContext value.
   4575   // They only call Runtime::kAllocateHeapNumber.
   4576   // The corresponding HChange instructions are added in a phase that does
   4577   // not have easy access to the local context.
   4578   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4579   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4580   RecordSafepointWithRegisters(
   4581       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4582   __ Subu(v0, v0, kHeapObjectTag);
   4583   __ StoreToSafepointRegisterSlot(v0, reg);
   4584 }
   4585 
   4586 
   4587 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4588   HChange* hchange = instr->hydrogen();
   4589   Register input = ToRegister(instr->value());
   4590   Register output = ToRegister(instr->result());
   4591   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4592       hchange->value()->CheckFlag(HValue::kUint32)) {
   4593     __ And(at, input, Operand(0xc0000000));
   4594     DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
   4595   }
   4596   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4597       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4598     __ SmiTagCheckOverflow(output, input, at);
   4599     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
   4600   } else {
   4601     __ SmiTag(output, input);
   4602   }
   4603 }
   4604 
   4605 
   4606 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4607   Register scratch = scratch0();
   4608   Register input = ToRegister(instr->value());
   4609   Register result = ToRegister(instr->result());
   4610   if (instr->needs_check()) {
   4611     STATIC_ASSERT(kHeapObjectTag == 1);
   4612     // If the input is a HeapObject, value of scratch won't be zero.
   4613     __ And(scratch, input, Operand(kHeapObjectTag));
   4614     __ SmiUntag(result, input);
   4615     DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
   4616   } else {
   4617     __ SmiUntag(result, input);
   4618   }
   4619 }
   4620 
   4621 
   4622 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
   4623                                 DoubleRegister result_reg,
   4624                                 NumberUntagDMode mode) {
   4625   bool can_convert_undefined_to_nan =
   4626       instr->hydrogen()->can_convert_undefined_to_nan();
   4627   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
   4628 
   4629   Register scratch = scratch0();
   4630   Label convert, load_smi, done;
   4631   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4632     // Smi check.
   4633     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4634     // Heap number map check.
   4635     __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4636     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   4637     if (can_convert_undefined_to_nan) {
   4638       __ Branch(&convert, ne, scratch, Operand(at));
   4639     } else {
   4640       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
   4641                    Operand(at));
   4642     }
   4643     // Load heap number.
   4644     __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4645     if (deoptimize_on_minus_zero) {
   4646       __ mfc1(at, result_reg.low());
   4647       __ Branch(&done, ne, at, Operand(zero_reg));
   4648       __ Mfhc1(scratch, result_reg);
   4649       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
   4650                    Operand(HeapNumber::kSignMask));
   4651     }
   4652     __ Branch(&done);
   4653     if (can_convert_undefined_to_nan) {
   4654       __ bind(&convert);
   4655       // Convert undefined (and hole) to NaN.
   4656       __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   4657       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
   4658                    Operand(at));
   4659       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4660       __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4661       __ Branch(&done);
   4662     }
   4663   } else {
   4664     __ SmiUntag(scratch, input_reg);
   4665     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4666   }
   4667   // Smi to double register conversion
   4668   __ bind(&load_smi);
   4669   // scratch: untagged value of input_reg
   4670   __ mtc1(scratch, result_reg);
   4671   __ cvt_d_w(result_reg, result_reg);
   4672   __ bind(&done);
   4673 }
   4674 
   4675 
   4676 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4677   Register input_reg = ToRegister(instr->value());
   4678   Register scratch1 = scratch0();
   4679   Register scratch2 = ToRegister(instr->temp());
   4680   DoubleRegister double_scratch = double_scratch0();
   4681   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4682 
   4683   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4684   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4685 
   4686   Label done;
   4687 
   4688   // The input is a tagged HeapObject.
   4689   // Heap number map check.
   4690   __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4691   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   4692   // This 'at' value and scratch1 map value are used for tests in both clauses
   4693   // of the if.
   4694 
   4695   if (instr->truncating()) {
   4696     // Performs a truncating conversion of a floating point number as used by
   4697     // the JS bitwise operations.
   4698     Label no_heap_number, check_bools, check_false;
   4699     // Check HeapNumber map.
   4700     __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
   4701     __ mov(scratch2, input_reg);  // In delay slot.
   4702     __ TruncateHeapNumberToI(input_reg, scratch2);
   4703     __ Branch(&done);
   4704 
   4705     // Check for Oddballs. Undefined/False is converted to zero and True to one
   4706     // for truncating conversions.
   4707     __ bind(&no_heap_number);
   4708     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   4709     __ Branch(&check_bools, ne, input_reg, Operand(at));
   4710     DCHECK(ToRegister(instr->result()).is(input_reg));
   4711     __ Branch(USE_DELAY_SLOT, &done);
   4712     __ mov(input_reg, zero_reg);  // In delay slot.
   4713 
   4714     __ bind(&check_bools);
   4715     __ LoadRoot(at, Heap::kTrueValueRootIndex);
   4716     __ Branch(&check_false, ne, scratch2, Operand(at));
   4717     __ Branch(USE_DELAY_SLOT, &done);
   4718     __ li(input_reg, Operand(1));  // In delay slot.
   4719 
   4720     __ bind(&check_false);
   4721     __ LoadRoot(at, Heap::kFalseValueRootIndex);
   4722     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
   4723                  scratch2, Operand(at));
   4724     __ Branch(USE_DELAY_SLOT, &done);
   4725     __ mov(input_reg, zero_reg);  // In delay slot.
   4726   } else {
   4727     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
   4728                  Operand(at));
   4729 
   4730     // Load the double value.
   4731     __ ldc1(double_scratch,
   4732             FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4733 
   4734     Register except_flag = scratch2;
   4735     __ EmitFPUTruncate(kRoundToZero,
   4736                        input_reg,
   4737                        double_scratch,
   4738                        scratch1,
   4739                        double_scratch2,
   4740                        except_flag,
   4741                        kCheckForInexactConversion);
   4742 
   4743     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
   4744                  Operand(zero_reg));
   4745 
   4746     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4747       __ Branch(&done, ne, input_reg, Operand(zero_reg));
   4748 
   4749       __ Mfhc1(scratch1, double_scratch);
   4750       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4751       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
   4752                    Operand(zero_reg));
   4753     }
   4754   }
   4755   __ bind(&done);
   4756 }
   4757 
   4758 
   4759 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4760   class DeferredTaggedToI final : public LDeferredCode {
   4761    public:
   4762     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4763         : LDeferredCode(codegen), instr_(instr) { }
   4764     void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
   4765     LInstruction* instr() override { return instr_; }
   4766 
   4767    private:
   4768     LTaggedToI* instr_;
   4769   };
   4770 
   4771   LOperand* input = instr->value();
   4772   DCHECK(input->IsRegister());
   4773   DCHECK(input->Equals(instr->result()));
   4774 
   4775   Register input_reg = ToRegister(input);
   4776 
   4777   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4778     __ SmiUntag(input_reg);
   4779   } else {
   4780     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   4781 
   4782     // Let the deferred code handle the HeapObject case.
   4783     __ JumpIfNotSmi(input_reg, deferred->entry());
   4784 
   4785     // Smi to int32 conversion.
   4786     __ SmiUntag(input_reg);
   4787     __ bind(deferred->exit());
   4788   }
   4789 }
   4790 
   4791 
   4792 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4793   LOperand* input = instr->value();
   4794   DCHECK(input->IsRegister());
   4795   LOperand* result = instr->result();
   4796   DCHECK(result->IsDoubleRegister());
   4797 
   4798   Register input_reg = ToRegister(input);
   4799   DoubleRegister result_reg = ToDoubleRegister(result);
   4800 
   4801   HValue* value = instr->hydrogen()->value();
   4802   NumberUntagDMode mode = value->representation().IsSmi()
   4803       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4804 
   4805   EmitNumberUntagD(instr, input_reg, result_reg, mode);
   4806 }
   4807 
   4808 
   4809 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4810   Register result_reg = ToRegister(instr->result());
   4811   Register scratch1 = scratch0();
   4812   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4813 
   4814   if (instr->truncating()) {
   4815     __ TruncateDoubleToI(result_reg, double_input);
   4816   } else {
   4817     Register except_flag = LCodeGen::scratch1();
   4818 
   4819     __ EmitFPUTruncate(kRoundToMinusInf,
   4820                        result_reg,
   4821                        double_input,
   4822                        scratch1,
   4823                        double_scratch0(),
   4824                        except_flag,
   4825                        kCheckForInexactConversion);
   4826 
   4827     // Deopt if the operation did not succeed (except_flag != 0).
   4828     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
   4829                  Operand(zero_reg));
   4830 
   4831     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4832       Label done;
   4833       __ Branch(&done, ne, result_reg, Operand(zero_reg));
   4834       __ Mfhc1(scratch1, double_input);
   4835       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4836       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
   4837                    Operand(zero_reg));
   4838       __ bind(&done);
   4839     }
   4840   }
   4841 }
   4842 
   4843 
   4844 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   4845   Register result_reg = ToRegister(instr->result());
   4846   Register scratch1 = LCodeGen::scratch0();
   4847   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4848 
   4849   if (instr->truncating()) {
   4850     __ TruncateDoubleToI(result_reg, double_input);
   4851   } else {
   4852     Register except_flag = LCodeGen::scratch1();
   4853 
   4854     __ EmitFPUTruncate(kRoundToMinusInf,
   4855                        result_reg,
   4856                        double_input,
   4857                        scratch1,
   4858                        double_scratch0(),
   4859                        except_flag,
   4860                        kCheckForInexactConversion);
   4861 
   4862     // Deopt if the operation did not succeed (except_flag != 0).
   4863     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
   4864                  Operand(zero_reg));
   4865 
   4866     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4867       Label done;
   4868       __ Branch(&done, ne, result_reg, Operand(zero_reg));
   4869       __ Mfhc1(scratch1, double_input);
   4870       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4871       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
   4872                    Operand(zero_reg));
   4873       __ bind(&done);
   4874     }
   4875   }
   4876   __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
   4877   DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
   4878 }
   4879 
   4880 
   4881 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   4882   LOperand* input = instr->value();
   4883   __ SmiTst(ToRegister(input), at);
   4884   DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
   4885 }
   4886 
   4887 
   4888 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   4889   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   4890     LOperand* input = instr->value();
   4891     __ SmiTst(ToRegister(input), at);
   4892     DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
   4893   }
   4894 }
   4895 
   4896 
   4897 void LCodeGen::DoCheckArrayBufferNotNeutered(
   4898     LCheckArrayBufferNotNeutered* instr) {
   4899   Register view = ToRegister(instr->view());
   4900   Register scratch = scratch0();
   4901 
   4902   __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
   4903   __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
   4904   __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
   4905   DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
   4906 }
   4907 
   4908 
   4909 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   4910   Register input = ToRegister(instr->value());
   4911   Register scratch = scratch0();
   4912 
   4913   __ GetObjectType(input, scratch, scratch);
   4914 
   4915   if (instr->hydrogen()->is_interval_check()) {
   4916     InstanceType first;
   4917     InstanceType last;
   4918     instr->hydrogen()->GetCheckInterval(&first, &last);
   4919 
   4920     // If there is only one type in the interval check for equality.
   4921     if (first == last) {
   4922       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
   4923                    Operand(first));
   4924     } else {
   4925       DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
   4926                    Operand(first));
   4927       // Omit check for the last type.
   4928       if (last != LAST_TYPE) {
   4929         DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
   4930                      Operand(last));
   4931       }
   4932     }
   4933   } else {
   4934     uint8_t mask;
   4935     uint8_t tag;
   4936     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   4937 
   4938     if (base::bits::IsPowerOfTwo32(mask)) {
   4939       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
   4940       __ And(at, scratch, mask);
   4941       DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
   4942                    at, Operand(zero_reg));
   4943     } else {
   4944       __ And(scratch, scratch, Operand(mask));
   4945       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
   4946                    Operand(tag));
   4947     }
   4948   }
   4949 }
   4950 
   4951 
   4952 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   4953   Register reg = ToRegister(instr->value());
   4954   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   4955   AllowDeferredHandleDereference smi_check;
   4956   if (isolate()->heap()->InNewSpace(*object)) {
   4957     Register reg = ToRegister(instr->value());
   4958     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   4959     __ li(at, Operand(cell));
   4960     __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
   4961     DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
   4962   } else {
   4963     DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
   4964   }
   4965 }
   4966 
   4967 
   4968 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   4969   {
   4970     PushSafepointRegistersScope scope(this);
   4971     __ push(object);
   4972     __ mov(cp, zero_reg);
   4973     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   4974     RecordSafepointWithRegisters(
   4975         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   4976     __ StoreToSafepointRegisterSlot(v0, scratch0());
   4977   }
   4978   __ SmiTst(scratch0(), at);
   4979   DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
   4980                Operand(zero_reg));
   4981 }
   4982 
   4983 
   4984 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   4985   class DeferredCheckMaps final : public LDeferredCode {
   4986    public:
   4987     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   4988         : LDeferredCode(codegen), instr_(instr), object_(object) {
   4989       SetExit(check_maps());
   4990     }
   4991     void Generate() override {
   4992       codegen()->DoDeferredInstanceMigration(instr_, object_);
   4993     }
   4994     Label* check_maps() { return &check_maps_; }
   4995     LInstruction* instr() override { return instr_; }
   4996 
   4997    private:
   4998     LCheckMaps* instr_;
   4999     Label check_maps_;
   5000     Register object_;
   5001   };
   5002 
   5003   if (instr->hydrogen()->IsStabilityCheck()) {
   5004     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5005     for (int i = 0; i < maps->size(); ++i) {
   5006       AddStabilityDependency(maps->at(i).handle());
   5007     }
   5008     return;
   5009   }
   5010 
   5011   Register map_reg = scratch0();
   5012   LOperand* input = instr->value();
   5013   DCHECK(input->IsRegister());
   5014   Register reg = ToRegister(input);
   5015   __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
   5016 
   5017   DeferredCheckMaps* deferred = NULL;
   5018   if (instr->hydrogen()->HasMigrationTarget()) {
   5019     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   5020     __ bind(deferred->check_maps());
   5021   }
   5022 
   5023   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5024   Label success;
   5025   for (int i = 0; i < maps->size() - 1; i++) {
   5026     Handle<Map> map = maps->at(i).handle();
   5027     __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
   5028   }
   5029   Handle<Map> map = maps->at(maps->size() - 1).handle();
   5030   // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
   5031   if (instr->hydrogen()->HasMigrationTarget()) {
   5032     __ Branch(deferred->entry(), ne, map_reg, Operand(map));
   5033   } else {
   5034     DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
   5035   }
   5036 
   5037   __ bind(&success);
   5038 }
   5039 
   5040 
   5041 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5042   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
   5043   Register result_reg = ToRegister(instr->result());
   5044   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5045   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
   5046 }
   5047 
   5048 
   5049 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5050   Register unclamped_reg = ToRegister(instr->unclamped());
   5051   Register result_reg = ToRegister(instr->result());
   5052   __ ClampUint8(result_reg, unclamped_reg);
   5053 }
   5054 
   5055 
   5056 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5057   Register scratch = scratch0();
   5058   Register input_reg = ToRegister(instr->unclamped());
   5059   Register result_reg = ToRegister(instr->result());
   5060   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5061   Label is_smi, done, heap_number;
   5062 
   5063   // Both smi and heap number cases are handled.
   5064   __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
   5065 
   5066   // Check for heap number
   5067   __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5068   __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
   5069 
   5070   // Check for undefined. Undefined is converted to zero for clamping
   5071   // conversions.
   5072   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
   5073                Operand(factory()->undefined_value()));
   5074   __ mov(result_reg, zero_reg);
   5075   __ jmp(&done);
   5076 
   5077   // Heap number
   5078   __ bind(&heap_number);
   5079   __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
   5080                                              HeapNumber::kValueOffset));
   5081   __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
   5082   __ jmp(&done);
   5083 
   5084   __ bind(&is_smi);
   5085   __ ClampUint8(result_reg, scratch);
   5086 
   5087   __ bind(&done);
   5088 }
   5089 
   5090 
   5091 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
   5092   DoubleRegister value_reg = ToDoubleRegister(instr->value());
   5093   Register result_reg = ToRegister(instr->result());
   5094   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
   5095     __ FmoveHigh(result_reg, value_reg);
   5096   } else {
   5097     __ FmoveLow(result_reg, value_reg);
   5098   }
   5099 }
   5100 
   5101 
   5102 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
   5103   Register hi_reg = ToRegister(instr->hi());
   5104   Register lo_reg = ToRegister(instr->lo());
   5105   DoubleRegister result_reg = ToDoubleRegister(instr->result());
   5106   __ Move(result_reg, lo_reg, hi_reg);
   5107 }
   5108 
   5109 
   5110 void LCodeGen::DoAllocate(LAllocate* instr) {
   5111   class DeferredAllocate final : public LDeferredCode {
   5112    public:
   5113     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5114         : LDeferredCode(codegen), instr_(instr) { }
   5115     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
   5116     LInstruction* instr() override { return instr_; }
   5117 
   5118    private:
   5119     LAllocate* instr_;
   5120   };
   5121 
   5122   DeferredAllocate* deferred =
   5123       new(zone()) DeferredAllocate(this, instr);
   5124 
   5125   Register result = ToRegister(instr->result());
   5126   Register scratch = ToRegister(instr->temp1());
   5127   Register scratch2 = ToRegister(instr->temp2());
   5128 
   5129   // Allocate memory for the object.
   5130   AllocationFlags flags = TAG_OBJECT;
   5131   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5132     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5133   }
   5134   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5135     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5136     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5137   }
   5138   if (instr->size()->IsConstantOperand()) {
   5139     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5140     CHECK(size <= Page::kMaxRegularHeapObjectSize);
   5141     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5142   } else {
   5143     Register size = ToRegister(instr->size());
   5144     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5145   }
   5146 
   5147   __ bind(deferred->exit());
   5148 
   5149   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5150     STATIC_ASSERT(kHeapObjectTag == 1);
   5151     if (instr->size()->IsConstantOperand()) {
   5152       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5153       __ li(scratch, Operand(size - kHeapObjectTag));
   5154     } else {
   5155       __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
   5156     }
   5157     __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5158     Label loop;
   5159     __ bind(&loop);
   5160     __ Subu(scratch, scratch, Operand(kPointerSize));
   5161     __ Addu(at, result, Operand(scratch));
   5162     __ sw(scratch2, MemOperand(at));
   5163     __ Branch(&loop, ge, scratch, Operand(zero_reg));
   5164   }
   5165 }
   5166 
   5167 
   5168 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5169   Register result = ToRegister(instr->result());
   5170 
   5171   // TODO(3095996): Get rid of this. For now, we need to make the
   5172   // result register contain a valid pointer because it is already
   5173   // contained in the register pointer map.
   5174   __ mov(result, zero_reg);
   5175 
   5176   PushSafepointRegistersScope scope(this);
   5177   if (instr->size()->IsRegister()) {
   5178     Register size = ToRegister(instr->size());
   5179     DCHECK(!size.is(result));
   5180     __ SmiTag(size);
   5181     __ push(size);
   5182   } else {
   5183     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5184     if (size >= 0 && size <= Smi::kMaxValue) {
   5185       __ Push(Smi::FromInt(size));
   5186     } else {
   5187       // We should never get here at runtime => abort
   5188       __ stop("invalid allocation size");
   5189       return;
   5190     }
   5191   }
   5192 
   5193   int flags = AllocateDoubleAlignFlag::encode(
   5194       instr->hydrogen()->MustAllocateDoubleAligned());
   5195   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5196     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5197     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   5198   } else {
   5199     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5200   }
   5201   __ Push(Smi::FromInt(flags));
   5202 
   5203   CallRuntimeFromDeferred(
   5204       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   5205   __ StoreToSafepointRegisterSlot(v0, result);
   5206 }
   5207 
   5208 
   5209 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5210   DCHECK(ToRegister(instr->value()).is(a0));
   5211   DCHECK(ToRegister(instr->result()).is(v0));
   5212   __ push(a0);
   5213   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5214 }
   5215 
   5216 
   5217 void LCodeGen::DoTypeof(LTypeof* instr) {
   5218   DCHECK(ToRegister(instr->value()).is(a3));
   5219   DCHECK(ToRegister(instr->result()).is(v0));
   5220   Label end, do_call;
   5221   Register value_register = ToRegister(instr->value());
   5222   __ JumpIfNotSmi(value_register, &do_call);
   5223   __ li(v0, Operand(isolate()->factory()->number_string()));
   5224   __ jmp(&end);
   5225   __ bind(&do_call);
   5226   TypeofStub stub(isolate());
   5227   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5228   __ bind(&end);
   5229 }
   5230 
   5231 
   5232 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5233   Register input = ToRegister(instr->value());
   5234 
   5235   Register cmp1 = no_reg;
   5236   Operand cmp2 = Operand(no_reg);
   5237 
   5238   Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
   5239                                                   instr->FalseLabel(chunk_),
   5240                                                   input,
   5241                                                   instr->type_literal(),
   5242                                                   &cmp1,
   5243                                                   &cmp2);
   5244 
   5245   DCHECK(cmp1.is_valid());
   5246   DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
   5247 
   5248   if (final_branch_condition != kNoCondition) {
   5249     EmitBranch(instr, final_branch_condition, cmp1, cmp2);
   5250   }
   5251 }
   5252 
   5253 
   5254 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   5255                                  Label* false_label,
   5256                                  Register input,
   5257                                  Handle<String> type_name,
   5258                                  Register* cmp1,
   5259                                  Operand* cmp2) {
   5260   // This function utilizes the delay slot heavily. This is used to load
   5261   // values that are always usable without depending on the type of the input
   5262   // register.
   5263   Condition final_branch_condition = kNoCondition;
   5264   Register scratch = scratch0();
   5265   Factory* factory = isolate()->factory();
   5266   if (String::Equals(type_name, factory->number_string())) {
   5267     __ JumpIfSmi(input, true_label);
   5268     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
   5269     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   5270     *cmp1 = input;
   5271     *cmp2 = Operand(at);
   5272     final_branch_condition = eq;
   5273 
   5274   } else if (String::Equals(type_name, factory->string_string())) {
   5275     __ JumpIfSmi(input, false_label);
   5276     __ GetObjectType(input, input, scratch);
   5277     *cmp1 = scratch;
   5278     *cmp2 = Operand(FIRST_NONSTRING_TYPE);
   5279     final_branch_condition = lt;
   5280 
   5281   } else if (String::Equals(type_name, factory->symbol_string())) {
   5282     __ JumpIfSmi(input, false_label);
   5283     __ GetObjectType(input, input, scratch);
   5284     *cmp1 = scratch;
   5285     *cmp2 = Operand(SYMBOL_TYPE);
   5286     final_branch_condition = eq;
   5287 
   5288   } else if (String::Equals(type_name, factory->boolean_string())) {
   5289     __ LoadRoot(at, Heap::kTrueValueRootIndex);
   5290     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5291     __ LoadRoot(at, Heap::kFalseValueRootIndex);
   5292     *cmp1 = at;
   5293     *cmp2 = Operand(input);
   5294     final_branch_condition = eq;
   5295 
   5296   } else if (String::Equals(type_name, factory->undefined_string())) {
   5297     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   5298     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5299     // The first instruction of JumpIfSmi is an And - it is safe in the delay
   5300     // slot.
   5301     __ JumpIfSmi(input, false_label);
   5302     // Check for undetectable objects => true.
   5303     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
   5304     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
   5305     __ And(at, at, 1 << Map::kIsUndetectable);
   5306     *cmp1 = at;
   5307     *cmp2 = Operand(zero_reg);
   5308     final_branch_condition = ne;
   5309 
   5310   } else if (String::Equals(type_name, factory->function_string())) {
   5311     __ JumpIfSmi(input, false_label);
   5312     __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5313     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5314     __ And(scratch, scratch,
   5315            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5316     *cmp1 = scratch;
   5317     *cmp2 = Operand(1 << Map::kIsCallable);
   5318     final_branch_condition = eq;
   5319 
   5320   } else if (String::Equals(type_name, factory->object_string())) {
   5321     __ JumpIfSmi(input, false_label);
   5322     __ LoadRoot(at, Heap::kNullValueRootIndex);
   5323     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5324     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5325     __ GetObjectType(input, scratch, scratch1());
   5326     __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
   5327     // Check for callable or undetectable objects => false.
   5328     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5329     __ And(at, scratch,
   5330            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5331     *cmp1 = at;
   5332     *cmp2 = Operand(zero_reg);
   5333     final_branch_condition = eq;
   5334 
   5335 // clang-format off
   5336 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
   5337   } else if (String::Equals(type_name, factory->type##_string())) {  \
   5338     __ JumpIfSmi(input, false_label);                                \
   5339     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));    \
   5340     __ LoadRoot(at, Heap::k##Type##MapRootIndex);                    \
   5341     *cmp1 = input;                                                   \
   5342     *cmp2 = Operand(at);                                             \
   5343     final_branch_condition = eq;
   5344   SIMD128_TYPES(SIMD128_TYPE)
   5345 #undef SIMD128_TYPE
   5346     // clang-format on
   5347 
   5348   } else {
   5349     *cmp1 = at;
   5350     *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
   5351     __ Branch(false_label);
   5352   }
   5353 
   5354   return final_branch_condition;
   5355 }
   5356 
   5357 
   5358 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5359   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   5360     // Ensure that we have enough space after the previous lazy-bailout
   5361     // instruction for patching the code here.
   5362     int current_pc = masm()->pc_offset();
   5363     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5364       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5365       DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
   5366       while (padding_size > 0) {
   5367         __ nop();
   5368         padding_size -= Assembler::kInstrSize;
   5369       }
   5370     }
   5371   }
   5372   last_lazy_deopt_pc_ = masm()->pc_offset();
   5373 }
   5374 
   5375 
   5376 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5377   last_lazy_deopt_pc_ = masm()->pc_offset();
   5378   DCHECK(instr->HasEnvironment());
   5379   LEnvironment* env = instr->environment();
   5380   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5381   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5382 }
   5383 
   5384 
   5385 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5386   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5387   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5388   // needed return address), even though the implementation of LAZY and EAGER is
   5389   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5390   // the special case below.
   5391   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5392     type = Deoptimizer::LAZY;
   5393   }
   5394 
   5395   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
   5396                Operand(zero_reg));
   5397 }
   5398 
   5399 
   5400 void LCodeGen::DoDummy(LDummy* instr) {
   5401   // Nothing to see here, move on!
   5402 }
   5403 
   5404 
   5405 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5406   // Nothing to see here, move on!
   5407 }
   5408 
   5409 
   5410 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5411   PushSafepointRegistersScope scope(this);
   5412   LoadContextFromDeferred(instr->context());
   5413   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5414   RecordSafepointWithLazyDeopt(
   5415       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5416   DCHECK(instr->HasEnvironment());
   5417   LEnvironment* env = instr->environment();
   5418   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5419 }
   5420 
   5421 
   5422 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5423   class DeferredStackCheck final : public LDeferredCode {
   5424    public:
   5425     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5426         : LDeferredCode(codegen), instr_(instr) { }
   5427     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
   5428     LInstruction* instr() override { return instr_; }
   5429 
   5430    private:
   5431     LStackCheck* instr_;
   5432   };
   5433 
   5434   DCHECK(instr->HasEnvironment());
   5435   LEnvironment* env = instr->environment();
   5436   // There is no LLazyBailout instruction for stack-checks. We have to
   5437   // prepare for lazy deoptimization explicitly here.
   5438   if (instr->hydrogen()->is_function_entry()) {
   5439     // Perform stack overflow check.
   5440     Label done;
   5441     __ LoadRoot(at, Heap::kStackLimitRootIndex);
   5442     __ Branch(&done, hs, sp, Operand(at));
   5443     DCHECK(instr->context()->IsRegister());
   5444     DCHECK(ToRegister(instr->context()).is(cp));
   5445     CallCode(isolate()->builtins()->StackCheck(),
   5446              RelocInfo::CODE_TARGET,
   5447              instr);
   5448     __ bind(&done);
   5449   } else {
   5450     DCHECK(instr->hydrogen()->is_backwards_branch());
   5451     // Perform stack overflow check if this goto needs it before jumping.
   5452     DeferredStackCheck* deferred_stack_check =
   5453         new(zone()) DeferredStackCheck(this, instr);
   5454     __ LoadRoot(at, Heap::kStackLimitRootIndex);
   5455     __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
   5456     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5457     __ bind(instr->done_label());
   5458     deferred_stack_check->SetExit(instr->done_label());
   5459     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5460     // Don't record a deoptimization index for the safepoint here.
   5461     // This will be done explicitly when emitting call and the safepoint in
   5462     // the deferred code.
   5463   }
   5464 }
   5465 
   5466 
   5467 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5468   // This is a pseudo-instruction that ensures that the environment here is
   5469   // properly registered for deoptimization and records the assembler's PC
   5470   // offset.
   5471   LEnvironment* environment = instr->environment();
   5472 
   5473   // If the environment were already registered, we would have no way of
   5474   // backpatching it with the spill slot operands.
   5475   DCHECK(!environment->HasBeenRegistered());
   5476   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5477 
   5478   GenerateOsrPrologue();
   5479 }
   5480 
   5481 
   5482 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5483   Register result = ToRegister(instr->result());
   5484   Register object = ToRegister(instr->object());
   5485   __ And(at, object, kSmiTagMask);
   5486   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
   5487 
   5488   STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
   5489   __ GetObjectType(object, a1, a1);
   5490   DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
   5491                Operand(JS_PROXY_TYPE));
   5492 
   5493   Label use_cache, call_runtime;
   5494   DCHECK(object.is(a0));
   5495   Register null_value = t1;
   5496   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   5497   __ CheckEnumCache(null_value, &call_runtime);
   5498 
   5499   __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
   5500   __ Branch(&use_cache);
   5501 
   5502   // Get the set of properties to enumerate.
   5503   __ bind(&call_runtime);
   5504   __ push(object);
   5505   CallRuntime(Runtime::kGetPropertyNamesFast, instr);
   5506 
   5507   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   5508   DCHECK(result.is(v0));
   5509   __ LoadRoot(at, Heap::kMetaMapRootIndex);
   5510   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
   5511   __ bind(&use_cache);
   5512 }
   5513 
   5514 
   5515 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5516   Register map = ToRegister(instr->map());
   5517   Register result = ToRegister(instr->result());
   5518   Label load_cache, done;
   5519   __ EnumLength(result, map);
   5520   __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
   5521   __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
   5522   __ jmp(&done);
   5523 
   5524   __ bind(&load_cache);
   5525   __ LoadInstanceDescriptors(map, result);
   5526   __ lw(result,
   5527         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5528   __ lw(result,
   5529         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5530   DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
   5531 
   5532   __ bind(&done);
   5533 }
   5534 
   5535 
   5536 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5537   Register object = ToRegister(instr->value());
   5538   Register map = ToRegister(instr->map());
   5539   __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5540   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
   5541 }
   5542 
   5543 
   5544 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5545                                            Register result,
   5546                                            Register object,
   5547                                            Register index) {
   5548   PushSafepointRegistersScope scope(this);
   5549   __ Push(object, index);
   5550   __ mov(cp, zero_reg);
   5551   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5552   RecordSafepointWithRegisters(
   5553      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5554   __ StoreToSafepointRegisterSlot(v0, result);
   5555 }
   5556 
   5557 
   5558 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5559   class DeferredLoadMutableDouble final : public LDeferredCode {
   5560    public:
   5561     DeferredLoadMutableDouble(LCodeGen* codegen,
   5562                               LLoadFieldByIndex* instr,
   5563                               Register result,
   5564                               Register object,
   5565                               Register index)
   5566         : LDeferredCode(codegen),
   5567           instr_(instr),
   5568           result_(result),
   5569           object_(object),
   5570           index_(index) {
   5571     }
   5572     void Generate() override {
   5573       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5574     }
   5575     LInstruction* instr() override { return instr_; }
   5576 
   5577    private:
   5578     LLoadFieldByIndex* instr_;
   5579     Register result_;
   5580     Register object_;
   5581     Register index_;
   5582   };
   5583 
   5584   Register object = ToRegister(instr->object());
   5585   Register index = ToRegister(instr->index());
   5586   Register result = ToRegister(instr->result());
   5587   Register scratch = scratch0();
   5588 
   5589   DeferredLoadMutableDouble* deferred;
   5590   deferred = new(zone()) DeferredLoadMutableDouble(
   5591       this, instr, result, object, index);
   5592 
   5593   Label out_of_object, done;
   5594 
   5595   __ And(scratch, index, Operand(Smi::FromInt(1)));
   5596   __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
   5597   __ sra(index, index, 1);
   5598 
   5599   __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
   5600   __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
   5601 
   5602   STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
   5603   __ Addu(scratch, object, scratch);
   5604   __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5605 
   5606   __ Branch(&done);
   5607 
   5608   __ bind(&out_of_object);
   5609   __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5610   // Index is equal to negated out of object property index plus 1.
   5611   __ Subu(scratch, result, scratch);
   5612   __ lw(result, FieldMemOperand(scratch,
   5613                                 FixedArray::kHeaderSize - kPointerSize));
   5614   __ bind(deferred->exit());
   5615   __ bind(&done);
   5616 }
   5617 
   5618 
   5619 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
   5620   Register context = ToRegister(instr->context());
   5621   __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
   5622 }
   5623 
   5624 
   5625 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
   5626   Handle<ScopeInfo> scope_info = instr->scope_info();
   5627   __ li(at, scope_info);
   5628   __ Push(at, ToRegister(instr->function()));
   5629   CallRuntime(Runtime::kPushBlockContext, instr);
   5630   RecordSafepoint(Safepoint::kNoLazyDeopt);
   5631 }
   5632 
   5633 
   5634 #undef __
   5635 
   5636 }  // namespace internal
   5637 }  // namespace v8
   5638