Home | History | Annotate | Download | only in mips64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/crankshaft/mips64/lithium-codegen-mips64.h"
      6 
      7 #include "src/builtins/builtins-constructor.h"
      8 #include "src/code-factory.h"
      9 #include "src/code-stubs.h"
     10 #include "src/crankshaft/hydrogen-osr.h"
     11 #include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
     12 #include "src/ic/ic.h"
     13 #include "src/ic/stub-cache.h"
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 
     19 class SafepointGenerator final : public CallWrapper {
     20  public:
     21   SafepointGenerator(LCodeGen* codegen,
     22                      LPointerMap* pointers,
     23                      Safepoint::DeoptMode mode)
     24       : codegen_(codegen),
     25         pointers_(pointers),
     26         deopt_mode_(mode) { }
     27   virtual ~SafepointGenerator() {}
     28 
     29   void BeforeCall(int call_size) const override {}
     30 
     31   void AfterCall() const override {
     32     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     33   }
     34 
     35  private:
     36   LCodeGen* codegen_;
     37   LPointerMap* pointers_;
     38   Safepoint::DeoptMode deopt_mode_;
     39 };
     40 
     41 LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
     42     LCodeGen* codegen)
     43     : codegen_(codegen) {
     44   DCHECK(codegen_->info()->is_calling());
     45   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
     46   codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
     47 
     48   StoreRegistersStateStub stub(codegen_->isolate());
     49   codegen_->masm_->push(ra);
     50   codegen_->masm_->CallStub(&stub);
     51 }
     52 
     53 LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
     54   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
     55   RestoreRegistersStateStub stub(codegen_->isolate());
     56   codegen_->masm_->push(ra);
     57   codegen_->masm_->CallStub(&stub);
     58   codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     59 }
     60 
     61 #define __ masm()->
     62 
     63 bool LCodeGen::GenerateCode() {
     64   LPhase phase("Z_Code generation", chunk());
     65   DCHECK(is_unused());
     66   status_ = GENERATING;
     67 
     68   // Open a frame scope to indicate that there is a frame on the stack.  The
     69   // NONE indicates that the scope shouldn't actually generate code to set up
     70   // the frame (that is done in GeneratePrologue).
     71   FrameScope frame_scope(masm_, StackFrame::NONE);
     72 
     73   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
     74          GenerateJumpTable() && GenerateSafepointTable();
     75 }
     76 
     77 
     78 void LCodeGen::FinishCode(Handle<Code> code) {
     79   DCHECK(is_done());
     80   code->set_stack_slots(GetTotalFrameSlotCount());
     81   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     82   PopulateDeoptimizationData(code);
     83 }
     84 
     85 
     86 void LCodeGen::SaveCallerDoubles() {
     87   DCHECK(info()->saves_caller_doubles());
     88   DCHECK(NeedsEagerFrame());
     89   Comment(";;; Save clobbered callee double registers");
     90   int count = 0;
     91   BitVector* doubles = chunk()->allocated_double_registers();
     92   BitVector::Iterator save_iterator(doubles);
     93   while (!save_iterator.Done()) {
     94     __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
     95             MemOperand(sp, count * kDoubleSize));
     96     save_iterator.Advance();
     97     count++;
     98   }
     99 }
    100 
    101 
    102 void LCodeGen::RestoreCallerDoubles() {
    103   DCHECK(info()->saves_caller_doubles());
    104   DCHECK(NeedsEagerFrame());
    105   Comment(";;; Restore clobbered callee double registers");
    106   BitVector* doubles = chunk()->allocated_double_registers();
    107   BitVector::Iterator save_iterator(doubles);
    108   int count = 0;
    109   while (!save_iterator.Done()) {
    110     __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
    111             MemOperand(sp, count * kDoubleSize));
    112     save_iterator.Advance();
    113     count++;
    114   }
    115 }
    116 
    117 
    118 bool LCodeGen::GeneratePrologue() {
    119   DCHECK(is_generating());
    120 
    121   if (info()->IsOptimizing()) {
    122     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    123 
    124     // a1: Callee's JS function.
    125     // cp: Callee's context.
    126     // fp: Caller's frame pointer.
    127     // lr: Caller's pc.
    128   }
    129 
    130   info()->set_prologue_offset(masm_->pc_offset());
    131   if (NeedsEagerFrame()) {
    132     if (info()->IsStub()) {
    133       __ StubPrologue(StackFrame::STUB);
    134     } else {
    135       __ Prologue(info()->GeneratePreagedPrologue());
    136     }
    137     frame_is_built_ = true;
    138   }
    139 
    140   // Reserve space for the stack slots needed by the code.
    141   int slots = GetStackSlotCount();
    142   if (slots > 0) {
    143     if (FLAG_debug_code) {
    144       __ Dsubu(sp,  sp, Operand(slots * kPointerSize));
    145       __ Push(a0, a1);
    146       __ Daddu(a0, sp, Operand(slots *  kPointerSize));
    147       __ li(a1, Operand(kSlotsZapValue));
    148       Label loop;
    149       __ bind(&loop);
    150       __ Dsubu(a0, a0, Operand(kPointerSize));
    151       __ sd(a1, MemOperand(a0, 2 * kPointerSize));
    152       __ Branch(&loop, ne, a0, Operand(sp));
    153       __ Pop(a0, a1);
    154     } else {
    155       __ Dsubu(sp, sp, Operand(slots * kPointerSize));
    156     }
    157   }
    158 
    159   if (info()->saves_caller_doubles()) {
    160     SaveCallerDoubles();
    161   }
    162   return !is_aborted();
    163 }
    164 
    165 
    166 void LCodeGen::DoPrologue(LPrologue* instr) {
    167   Comment(";;; Prologue begin");
    168 
    169   // Possibly allocate a local context.
    170   if (info()->scope()->NeedsContext()) {
    171     Comment(";;; Allocate local context");
    172     bool need_write_barrier = true;
    173     // Argument to NewContext is the function, which is in a1.
    174     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    175     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    176     if (info()->scope()->is_script_scope()) {
    177       __ push(a1);
    178       __ Push(info()->scope()->scope_info());
    179       __ CallRuntime(Runtime::kNewScriptContext);
    180       deopt_mode = Safepoint::kLazyDeopt;
    181     } else {
    182       if (slots <=
    183           ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
    184         Callable callable = CodeFactory::FastNewFunctionContext(
    185             isolate(), info()->scope()->scope_type());
    186         __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
    187               Operand(slots));
    188         __ Call(callable.code(), RelocInfo::CODE_TARGET);
    189         // Result of the FastNewFunctionContext builtin is always in new space.
    190         need_write_barrier = false;
    191       } else {
    192         __ push(a1);
    193         __ Push(Smi::FromInt(info()->scope()->scope_type()));
    194         __ CallRuntime(Runtime::kNewFunctionContext);
    195       }
    196     }
    197     RecordSafepoint(deopt_mode);
    198 
    199     // Context is returned in both v0. It replaces the context passed to us.
    200     // It's saved in the stack and kept live in cp.
    201     __ mov(cp, v0);
    202     __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
    203     // Copy any necessary parameters into the context.
    204     int num_parameters = info()->scope()->num_parameters();
    205     int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
    206     for (int i = first_parameter; i < num_parameters; i++) {
    207       Variable* var = (i == -1) ? info()->scope()->receiver()
    208                                 : info()->scope()->parameter(i);
    209       if (var->IsContextSlot()) {
    210         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    211             (num_parameters - 1 - i) * kPointerSize;
    212         // Load parameter from stack.
    213         __ ld(a0, MemOperand(fp, parameter_offset));
    214         // Store it in the context.
    215         MemOperand target = ContextMemOperand(cp, var->index());
    216         __ sd(a0, target);
    217         // Update the write barrier. This clobbers a3 and a0.
    218         if (need_write_barrier) {
    219           __ RecordWriteContextSlot(
    220               cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
    221         } else if (FLAG_debug_code) {
    222           Label done;
    223           __ JumpIfInNewSpace(cp, a0, &done);
    224           __ Abort(kExpectedNewSpaceObject);
    225           __ bind(&done);
    226         }
    227       }
    228     }
    229     Comment(";;; End allocate local context");
    230   }
    231 
    232   Comment(";;; Prologue end");
    233 }
    234 
    235 
    236 void LCodeGen::GenerateOsrPrologue() {
    237   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    238   // are none, at the OSR entrypoint instruction.
    239   if (osr_pc_offset_ >= 0) return;
    240 
    241   osr_pc_offset_ = masm()->pc_offset();
    242 
    243   // Adjust the frame size, subsuming the unoptimized frame into the
    244   // optimized frame.
    245   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    246   DCHECK(slots >= 0);
    247   __ Dsubu(sp, sp, Operand(slots * kPointerSize));
    248 }
    249 
    250 
    251 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    252   if (instr->IsCall()) {
    253     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    254   }
    255   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    256     safepoints_.BumpLastLazySafepointIndex();
    257   }
    258 }
    259 
    260 
    261 bool LCodeGen::GenerateDeferredCode() {
    262   DCHECK(is_generating());
    263   if (deferred_.length() > 0) {
    264     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    265       LDeferredCode* code = deferred_[i];
    266 
    267       HValue* value =
    268           instructions_->at(code->instruction_index())->hydrogen_value();
    269       RecordAndWritePosition(value->position());
    270 
    271       Comment(";;; <@%d,#%d> "
    272               "-------------------- Deferred %s --------------------",
    273               code->instruction_index(),
    274               code->instr()->hydrogen_value()->id(),
    275               code->instr()->Mnemonic());
    276       __ bind(code->entry());
    277       if (NeedsDeferredFrame()) {
    278         Comment(";;; Build frame");
    279         DCHECK(!frame_is_built_);
    280         DCHECK(info()->IsStub());
    281         frame_is_built_ = true;
    282         __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
    283         __ PushCommonFrame(scratch0());
    284         Comment(";;; Deferred code");
    285       }
    286       code->Generate();
    287       if (NeedsDeferredFrame()) {
    288         Comment(";;; Destroy frame");
    289         DCHECK(frame_is_built_);
    290         __ PopCommonFrame(scratch0());
    291         frame_is_built_ = false;
    292       }
    293       __ jmp(code->exit());
    294     }
    295   }
    296   // Deferred code is the last part of the instruction sequence. Mark
    297   // the generated code as done unless we bailed out.
    298   if (!is_aborted()) status_ = DONE;
    299   return !is_aborted();
    300 }
    301 
    302 
    303 bool LCodeGen::GenerateJumpTable() {
    304   if (jump_table_.length() > 0) {
    305     Comment(";;; -------------------- Jump table --------------------");
    306     Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
    307     Label table_start, call_deopt_entry;
    308 
    309     __ bind(&table_start);
    310     Label needs_frame;
    311     Address base = jump_table_[0]->address;
    312     for (int i = 0; i < jump_table_.length(); i++) {
    313       Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
    314       __ bind(&table_entry->label);
    315       Address entry = table_entry->address;
    316       DeoptComment(table_entry->deopt_info);
    317 
    318       // Second-level deopt table entries are contiguous and small, so instead
    319       // of loading the full, absolute address of each one, load the base
    320       // address and add an immediate offset.
    321       if (is_int16(entry - base)) {
    322         if (table_entry->needs_frame) {
    323           DCHECK(!info()->saves_caller_doubles());
    324           Comment(";;; call deopt with frame");
    325           __ PushCommonFrame();
    326           __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
    327           __ li(t9, Operand(entry - base));
    328         } else {
    329           __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT);
    330           __ li(t9, Operand(entry - base));
    331         }
    332 
    333       } else {
    334         __ li(t9, Operand(entry - base));
    335         if (table_entry->needs_frame) {
    336           DCHECK(!info()->saves_caller_doubles());
    337           Comment(";;; call deopt with frame");
    338           __ PushCommonFrame();
    339           __ BranchAndLink(&needs_frame);
    340         } else {
    341           __ BranchAndLink(&call_deopt_entry);
    342         }
    343       }
    344     }
    345     if (needs_frame.is_linked()) {
    346       __ bind(&needs_frame);
    347       // This variant of deopt can only be used with stubs. Since we don't
    348       // have a function pointer to install in the stack frame that we're
    349       // building, install a special marker there instead.
    350       __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
    351       __ push(at);
    352       DCHECK(info()->IsStub());
    353     }
    354 
    355     Comment(";;; call deopt");
    356     __ bind(&call_deopt_entry);
    357 
    358     if (info()->saves_caller_doubles()) {
    359       DCHECK(info()->IsStub());
    360       RestoreCallerDoubles();
    361     }
    362 
    363     __ li(at,
    364           Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY));
    365     __ Daddu(t9, t9, Operand(at));
    366     __ Jump(t9);
    367   }
    368   // The deoptimization jump table is the last part of the instruction
    369   // sequence. Mark the generated code as done unless we bailed out.
    370   if (!is_aborted()) status_ = DONE;
    371   return !is_aborted();
    372 }
    373 
    374 
    375 bool LCodeGen::GenerateSafepointTable() {
    376   DCHECK(is_done());
    377   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
    378   return !is_aborted();
    379 }
    380 
    381 
    382 Register LCodeGen::ToRegister(int index) const {
    383   return Register::from_code(index);
    384 }
    385 
    386 
    387 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
    388   return DoubleRegister::from_code(index);
    389 }
    390 
    391 
    392 Register LCodeGen::ToRegister(LOperand* op) const {
    393   DCHECK(op->IsRegister());
    394   return ToRegister(op->index());
    395 }
    396 
    397 
    398 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    399   if (op->IsRegister()) {
    400     return ToRegister(op->index());
    401   } else if (op->IsConstantOperand()) {
    402     LConstantOperand* const_op = LConstantOperand::cast(op);
    403     HConstant* constant = chunk_->LookupConstant(const_op);
    404     Handle<Object> literal = constant->handle(isolate());
    405     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    406     if (r.IsInteger32()) {
    407       AllowDeferredHandleDereference get_number;
    408       DCHECK(literal->IsNumber());
    409       __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
    410     } else if (r.IsSmi()) {
    411       DCHECK(constant->HasSmiValue());
    412       __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
    413     } else if (r.IsDouble()) {
    414       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    415     } else {
    416       DCHECK(r.IsSmiOrTagged());
    417       __ li(scratch, literal);
    418     }
    419     return scratch;
    420   } else if (op->IsStackSlot()) {
    421     __ ld(scratch, ToMemOperand(op));
    422     return scratch;
    423   }
    424   UNREACHABLE();
    425   return scratch;
    426 }
    427 
    428 
    429 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    430   DCHECK(op->IsDoubleRegister());
    431   return ToDoubleRegister(op->index());
    432 }
    433 
    434 
    435 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
    436                                                 FloatRegister flt_scratch,
    437                                                 DoubleRegister dbl_scratch) {
    438   if (op->IsDoubleRegister()) {
    439     return ToDoubleRegister(op->index());
    440   } else if (op->IsConstantOperand()) {
    441     LConstantOperand* const_op = LConstantOperand::cast(op);
    442     HConstant* constant = chunk_->LookupConstant(const_op);
    443     Handle<Object> literal = constant->handle(isolate());
    444     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    445     if (r.IsInteger32()) {
    446       DCHECK(literal->IsNumber());
    447       __ li(at, Operand(static_cast<int32_t>(literal->Number())));
    448       __ mtc1(at, flt_scratch);
    449       __ cvt_d_w(dbl_scratch, flt_scratch);
    450       return dbl_scratch;
    451     } else if (r.IsDouble()) {
    452       Abort(kUnsupportedDoubleImmediate);
    453     } else if (r.IsTagged()) {
    454       Abort(kUnsupportedTaggedImmediate);
    455     }
    456   } else if (op->IsStackSlot()) {
    457     MemOperand mem_op = ToMemOperand(op);
    458     __ ldc1(dbl_scratch, mem_op);
    459     return dbl_scratch;
    460   }
    461   UNREACHABLE();
    462   return dbl_scratch;
    463 }
    464 
    465 
    466 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    467   HConstant* constant = chunk_->LookupConstant(op);
    468   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    469   return constant->handle(isolate());
    470 }
    471 
    472 
    473 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    474   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    475 }
    476 
    477 
    478 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    479   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    480 }
    481 
    482 
    483 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    484   // return ToRepresentation(op, Representation::Integer32());
    485   HConstant* constant = chunk_->LookupConstant(op);
    486   return constant->Integer32Value();
    487 }
    488 
    489 
    490 int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
    491                                             const Representation& r) const {
    492   HConstant* constant = chunk_->LookupConstant(op);
    493   int32_t value = constant->Integer32Value();
    494   if (r.IsInteger32()) return value;
    495   DCHECK(r.IsSmiOrTagged());
    496   return reinterpret_cast<int64_t>(Smi::FromInt(value));
    497 }
    498 
    499 
    500 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    501   HConstant* constant = chunk_->LookupConstant(op);
    502   return Smi::FromInt(constant->Integer32Value());
    503 }
    504 
    505 
    506 double LCodeGen::ToDouble(LConstantOperand* op) const {
    507   HConstant* constant = chunk_->LookupConstant(op);
    508   DCHECK(constant->HasDoubleValue());
    509   return constant->DoubleValue();
    510 }
    511 
    512 
    513 Operand LCodeGen::ToOperand(LOperand* op) {
    514   if (op->IsConstantOperand()) {
    515     LConstantOperand* const_op = LConstantOperand::cast(op);
    516     HConstant* constant = chunk()->LookupConstant(const_op);
    517     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    518     if (r.IsSmi()) {
    519       DCHECK(constant->HasSmiValue());
    520       return Operand(Smi::FromInt(constant->Integer32Value()));
    521     } else if (r.IsInteger32()) {
    522       DCHECK(constant->HasInteger32Value());
    523       return Operand(constant->Integer32Value());
    524     } else if (r.IsDouble()) {
    525       Abort(kToOperandUnsupportedDoubleImmediate);
    526     }
    527     DCHECK(r.IsTagged());
    528     return Operand(constant->handle(isolate()));
    529   } else if (op->IsRegister()) {
    530     return Operand(ToRegister(op));
    531   } else if (op->IsDoubleRegister()) {
    532     Abort(kToOperandIsDoubleRegisterUnimplemented);
    533     return Operand((int64_t)0);
    534   }
    535   // Stack slots not implemented, use ToMemOperand instead.
    536   UNREACHABLE();
    537   return Operand((int64_t)0);
    538 }
    539 
    540 
    541 static int ArgumentsOffsetWithoutFrame(int index) {
    542   DCHECK(index < 0);
    543   return -(index + 1) * kPointerSize;
    544 }
    545 
    546 
    547 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    548   DCHECK(!op->IsRegister());
    549   DCHECK(!op->IsDoubleRegister());
    550   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    551   if (NeedsEagerFrame()) {
    552     return MemOperand(fp, FrameSlotToFPOffset(op->index()));
    553   } else {
    554     // Retrieve parameter without eager stack-frame relative to the
    555     // stack-pointer.
    556     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    557   }
    558 }
    559 
    560 
    561 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    562   DCHECK(op->IsDoubleStackSlot());
    563   if (NeedsEagerFrame()) {
    564     // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
    565     return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize);
    566   } else {
    567     // Retrieve parameter without eager stack-frame relative to the
    568     // stack-pointer.
    569     // return MemOperand(
    570     //    sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    571     return MemOperand(
    572         sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
    573   }
    574 }
    575 
    576 
    577 void LCodeGen::WriteTranslation(LEnvironment* environment,
    578                                 Translation* translation) {
    579   if (environment == NULL) return;
    580 
    581   // The translation includes one command per value in the environment.
    582   int translation_size = environment->translation_size();
    583 
    584   WriteTranslation(environment->outer(), translation);
    585   WriteTranslationFrame(environment, translation);
    586 
    587   int object_index = 0;
    588   int dematerialized_index = 0;
    589   for (int i = 0; i < translation_size; ++i) {
    590     LOperand* value = environment->values()->at(i);
    591     AddToTranslation(
    592         environment, translation, value, environment->HasTaggedValueAt(i),
    593         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    594   }
    595 }
    596 
    597 
    598 void LCodeGen::AddToTranslation(LEnvironment* environment,
    599                                 Translation* translation,
    600                                 LOperand* op,
    601                                 bool is_tagged,
    602                                 bool is_uint32,
    603                                 int* object_index_pointer,
    604                                 int* dematerialized_index_pointer) {
    605   if (op == LEnvironment::materialization_marker()) {
    606     int object_index = (*object_index_pointer)++;
    607     if (environment->ObjectIsDuplicateAt(object_index)) {
    608       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    609       translation->DuplicateObject(dupe_of);
    610       return;
    611     }
    612     int object_length = environment->ObjectLengthAt(object_index);
    613     if (environment->ObjectIsArgumentsAt(object_index)) {
    614       translation->BeginArgumentsObject(object_length);
    615     } else {
    616       translation->BeginCapturedObject(object_length);
    617     }
    618     int dematerialized_index = *dematerialized_index_pointer;
    619     int env_offset = environment->translation_size() + dematerialized_index;
    620     *dematerialized_index_pointer += object_length;
    621     for (int i = 0; i < object_length; ++i) {
    622       LOperand* value = environment->values()->at(env_offset + i);
    623       AddToTranslation(environment,
    624                        translation,
    625                        value,
    626                        environment->HasTaggedValueAt(env_offset + i),
    627                        environment->HasUint32ValueAt(env_offset + i),
    628                        object_index_pointer,
    629                        dematerialized_index_pointer);
    630     }
    631     return;
    632   }
    633 
    634   if (op->IsStackSlot()) {
    635     int index = op->index();
    636     if (is_tagged) {
    637       translation->StoreStackSlot(index);
    638     } else if (is_uint32) {
    639       translation->StoreUint32StackSlot(index);
    640     } else {
    641       translation->StoreInt32StackSlot(index);
    642     }
    643   } else if (op->IsDoubleStackSlot()) {
    644     int index = op->index();
    645     translation->StoreDoubleStackSlot(index);
    646   } else if (op->IsRegister()) {
    647     Register reg = ToRegister(op);
    648     if (is_tagged) {
    649       translation->StoreRegister(reg);
    650     } else if (is_uint32) {
    651       translation->StoreUint32Register(reg);
    652     } else {
    653       translation->StoreInt32Register(reg);
    654     }
    655   } else if (op->IsDoubleRegister()) {
    656     DoubleRegister reg = ToDoubleRegister(op);
    657     translation->StoreDoubleRegister(reg);
    658   } else if (op->IsConstantOperand()) {
    659     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    660     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    661     translation->StoreLiteral(src_index);
    662   } else {
    663     UNREACHABLE();
    664   }
    665 }
    666 
    667 
    668 void LCodeGen::CallCode(Handle<Code> code,
    669                         RelocInfo::Mode mode,
    670                         LInstruction* instr) {
    671   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    672 }
    673 
    674 
    675 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    676                                RelocInfo::Mode mode,
    677                                LInstruction* instr,
    678                                SafepointMode safepoint_mode) {
    679   DCHECK(instr != NULL);
    680   __ Call(code, mode);
    681   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    682 }
    683 
    684 
    685 void LCodeGen::CallRuntime(const Runtime::Function* function,
    686                            int num_arguments,
    687                            LInstruction* instr,
    688                            SaveFPRegsMode save_doubles) {
    689   DCHECK(instr != NULL);
    690 
    691   __ CallRuntime(function, num_arguments, save_doubles);
    692 
    693   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    694 }
    695 
    696 
    697 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    698   if (context->IsRegister()) {
    699     __ Move(cp, ToRegister(context));
    700   } else if (context->IsStackSlot()) {
    701     __ ld(cp, ToMemOperand(context));
    702   } else if (context->IsConstantOperand()) {
    703     HConstant* constant =
    704         chunk_->LookupConstant(LConstantOperand::cast(context));
    705     __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
    706   } else {
    707     UNREACHABLE();
    708   }
    709 }
    710 
    711 
    712 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    713                                        int argc,
    714                                        LInstruction* instr,
    715                                        LOperand* context) {
    716   LoadContextFromDeferred(context);
    717   __ CallRuntimeSaveDoubles(id);
    718   RecordSafepointWithRegisters(
    719       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    720 }
    721 
    722 
    723 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    724                                                     Safepoint::DeoptMode mode) {
    725   environment->set_has_been_used();
    726   if (!environment->HasBeenRegistered()) {
    727     // Physical stack frame layout:
    728     // -x ............. -4  0 ..................................... y
    729     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    730 
    731     // Layout of the environment:
    732     // 0 ..................................................... size-1
    733     // [parameters] [locals] [expression stack including arguments]
    734 
    735     // Layout of the translation:
    736     // 0 ........................................................ size - 1 + 4
    737     // [expression stack including arguments] [locals] [4 words] [parameters]
    738     // |>------------  translation_size ------------<|
    739 
    740     int frame_count = 0;
    741     int jsframe_count = 0;
    742     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    743       ++frame_count;
    744       if (e->frame_type() == JS_FUNCTION) {
    745         ++jsframe_count;
    746       }
    747     }
    748     Translation translation(&translations_, frame_count, jsframe_count, zone());
    749     WriteTranslation(environment, &translation);
    750     int deoptimization_index = deoptimizations_.length();
    751     int pc_offset = masm()->pc_offset();
    752     environment->Register(deoptimization_index,
    753                           translation.index(),
    754                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    755     deoptimizations_.Add(environment, zone());
    756   }
    757 }
    758 
    759 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
    760                             DeoptimizeReason deopt_reason,
    761                             Deoptimizer::BailoutType bailout_type,
    762                             Register src1, const Operand& src2) {
    763   LEnvironment* environment = instr->environment();
    764   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    765   DCHECK(environment->HasBeenRegistered());
    766   int id = environment->deoptimization_index();
    767   Address entry =
    768       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    769   if (entry == NULL) {
    770     Abort(kBailoutWasNotPrepared);
    771     return;
    772   }
    773 
    774   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    775     Register scratch = scratch0();
    776     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    777     Label no_deopt;
    778     __ Push(a1, scratch);
    779     __ li(scratch, Operand(count));
    780     __ lw(a1, MemOperand(scratch));
    781     __ Subu(a1, a1, Operand(1));
    782     __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
    783     __ li(a1, Operand(FLAG_deopt_every_n_times));
    784     __ sw(a1, MemOperand(scratch));
    785     __ Pop(a1, scratch);
    786 
    787     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    788     __ bind(&no_deopt);
    789     __ sw(a1, MemOperand(scratch));
    790     __ Pop(a1, scratch);
    791   }
    792 
    793   if (info()->ShouldTrapOnDeopt()) {
    794     Label skip;
    795     if (condition != al) {
    796       __ Branch(&skip, NegateCondition(condition), src1, src2);
    797     }
    798     __ stop("trap_on_deopt");
    799     __ bind(&skip);
    800   }
    801 
    802   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
    803 
    804   DCHECK(info()->IsStub() || frame_is_built_);
    805   // Go through jump table if we need to handle condition, build frame, or
    806   // restore caller doubles.
    807   if (condition == al && frame_is_built_ &&
    808       !info()->saves_caller_doubles()) {
    809     DeoptComment(deopt_info);
    810     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
    811   } else {
    812     Deoptimizer::JumpTableEntry* table_entry =
    813         new (zone()) Deoptimizer::JumpTableEntry(
    814             entry, deopt_info, bailout_type, !frame_is_built_);
    815     // We often have several deopts to the same entry, reuse the last
    816     // jump entry if this is the case.
    817     if (FLAG_trace_deopt || isolate()->is_profiling() ||
    818         jump_table_.is_empty() ||
    819         !table_entry->IsEquivalentTo(*jump_table_.last())) {
    820       jump_table_.Add(table_entry, zone());
    821     }
    822     __ Branch(&jump_table_.last()->label, condition, src1, src2);
    823   }
    824 }
    825 
    826 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
    827                             DeoptimizeReason deopt_reason, Register src1,
    828                             const Operand& src2) {
    829   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    830       ? Deoptimizer::LAZY
    831       : Deoptimizer::EAGER;
    832   DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
    833 }
    834 
    835 
    836 void LCodeGen::RecordSafepointWithLazyDeopt(
    837     LInstruction* instr, SafepointMode safepoint_mode) {
    838   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    839     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    840   } else {
    841     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    842     RecordSafepointWithRegisters(
    843         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    844   }
    845 }
    846 
    847 
    848 void LCodeGen::RecordSafepoint(
    849     LPointerMap* pointers,
    850     Safepoint::Kind kind,
    851     int arguments,
    852     Safepoint::DeoptMode deopt_mode) {
    853   DCHECK(expected_safepoint_kind_ == kind);
    854 
    855   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    856   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
    857       kind, arguments, deopt_mode);
    858   for (int i = 0; i < operands->length(); i++) {
    859     LOperand* pointer = operands->at(i);
    860     if (pointer->IsStackSlot()) {
    861       safepoint.DefinePointerSlot(pointer->index(), zone());
    862     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    863       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    864     }
    865   }
    866 }
    867 
    868 
    869 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    870                                Safepoint::DeoptMode deopt_mode) {
    871   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    872 }
    873 
    874 
    875 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    876   LPointerMap empty_pointers(zone());
    877   RecordSafepoint(&empty_pointers, deopt_mode);
    878 }
    879 
    880 
    881 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    882                                             int arguments,
    883                                             Safepoint::DeoptMode deopt_mode) {
    884   RecordSafepoint(
    885       pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    886 }
    887 
    888 
    889 static const char* LabelType(LLabel* label) {
    890   if (label->is_loop_header()) return " (loop header)";
    891   if (label->is_osr_entry()) return " (OSR entry)";
    892   return "";
    893 }
    894 
    895 
    896 void LCodeGen::DoLabel(LLabel* label) {
    897   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    898           current_instruction_,
    899           label->hydrogen_value()->id(),
    900           label->block_id(),
    901           LabelType(label));
    902   __ bind(label->label());
    903   current_block_ = label->block_id();
    904   DoGap(label);
    905 }
    906 
    907 
    908 void LCodeGen::DoParallelMove(LParallelMove* move) {
    909   resolver_.Resolve(move);
    910 }
    911 
    912 
    913 void LCodeGen::DoGap(LGap* gap) {
    914   for (int i = LGap::FIRST_INNER_POSITION;
    915        i <= LGap::LAST_INNER_POSITION;
    916        i++) {
    917     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    918     LParallelMove* move = gap->GetParallelMove(inner_pos);
    919     if (move != NULL) DoParallelMove(move);
    920   }
    921 }
    922 
    923 
    924 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
    925   DoGap(instr);
    926 }
    927 
    928 
    929 void LCodeGen::DoParameter(LParameter* instr) {
    930   // Nothing to do.
    931 }
    932 
    933 
    934 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
    935   GenerateOsrPrologue();
    936 }
    937 
    938 
    939 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
    940   Register dividend = ToRegister(instr->dividend());
    941   int32_t divisor = instr->divisor();
    942   DCHECK(dividend.is(ToRegister(instr->result())));
    943 
    944   // Theoretically, a variation of the branch-free code for integer division by
    945   // a power of 2 (calculating the remainder via an additional multiplication
    946   // (which gets simplified to an 'and') and subtraction) should be faster, and
    947   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
    948   // indicate that positive dividends are heavily favored, so the branching
    949   // version performs better.
    950   HMod* hmod = instr->hydrogen();
    951   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
    952   Label dividend_is_not_negative, done;
    953 
    954   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
    955     __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
    956     // Note: The code below even works when right contains kMinInt.
    957     __ dsubu(dividend, zero_reg, dividend);
    958     __ And(dividend, dividend, Operand(mask));
    959     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    960       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
    961                    Operand(zero_reg));
    962     }
    963     __ Branch(USE_DELAY_SLOT, &done);
    964     __ dsubu(dividend, zero_reg, dividend);
    965   }
    966 
    967   __ bind(&dividend_is_not_negative);
    968   __ And(dividend, dividend, Operand(mask));
    969   __ bind(&done);
    970 }
    971 
    972 
    973 void LCodeGen::DoModByConstI(LModByConstI* instr) {
    974   Register dividend = ToRegister(instr->dividend());
    975   int32_t divisor = instr->divisor();
    976   Register result = ToRegister(instr->result());
    977   DCHECK(!dividend.is(result));
    978 
    979   if (divisor == 0) {
    980     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
    981     return;
    982   }
    983 
    984   __ TruncatingDiv(result, dividend, Abs(divisor));
    985   __ Dmul(result, result, Operand(Abs(divisor)));
    986   __ Dsubu(result, dividend, Operand(result));
    987 
    988   // Check for negative zero.
    989   HMod* hmod = instr->hydrogen();
    990   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    991     Label remainder_not_zero;
    992     __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
    993     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
    994                  Operand(zero_reg));
    995     __ bind(&remainder_not_zero);
    996   }
    997 }
    998 
    999 
   1000 void LCodeGen::DoModI(LModI* instr) {
   1001   HMod* hmod = instr->hydrogen();
   1002   const Register left_reg = ToRegister(instr->left());
   1003   const Register right_reg = ToRegister(instr->right());
   1004   const Register result_reg = ToRegister(instr->result());
   1005 
   1006   // div runs in the background while we check for special cases.
   1007   __ Dmod(result_reg, left_reg, right_reg);
   1008 
   1009   Label done;
   1010   // Check for x % 0, we have to deopt in this case because we can't return a
   1011   // NaN.
   1012   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1013     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
   1014                  Operand(zero_reg));
   1015   }
   1016 
   1017   // Check for kMinInt % -1, div will return kMinInt, which is not what we
   1018   // want. We have to deopt if we care about -0, because we can't return that.
   1019   if (hmod->CheckFlag(HValue::kCanOverflow)) {
   1020     Label no_overflow_possible;
   1021     __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
   1022     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1023       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
   1024                    Operand(-1));
   1025     } else {
   1026       __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
   1027       __ Branch(USE_DELAY_SLOT, &done);
   1028       __ mov(result_reg, zero_reg);
   1029     }
   1030     __ bind(&no_overflow_possible);
   1031   }
   1032 
   1033   // If we care about -0, test if the dividend is <0 and the result is 0.
   1034   __ Branch(&done, ge, left_reg, Operand(zero_reg));
   1035 
   1036   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1037     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
   1038                  Operand(zero_reg));
   1039   }
   1040   __ bind(&done);
   1041 }
   1042 
   1043 
   1044 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1045   Register dividend = ToRegister(instr->dividend());
   1046   int32_t divisor = instr->divisor();
   1047   Register result = ToRegister(instr->result());
   1048   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   1049   DCHECK(!result.is(dividend));
   1050 
   1051   // Check for (0 / -x) that will produce negative zero.
   1052   HDiv* hdiv = instr->hydrogen();
   1053   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1054     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
   1055                  Operand(zero_reg));
   1056   }
   1057   // Check for (kMinInt / -1).
   1058   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1059     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
   1060                  Operand(kMinInt));
   1061   }
   1062   // Deoptimize if remainder will not be 0.
   1063   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   1064       divisor != 1 && divisor != -1) {
   1065     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1066     __ And(at, dividend, Operand(mask));
   1067     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
   1068                  Operand(zero_reg));
   1069   }
   1070 
   1071   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   1072     __ Dsubu(result, zero_reg, dividend);
   1073     return;
   1074   }
   1075   uint16_t shift = WhichPowerOf2Abs(divisor);
   1076   if (shift == 0) {
   1077     __ Move(result, dividend);
   1078   } else if (shift == 1) {
   1079     __ dsrl32(result, dividend, 31);
   1080     __ Daddu(result, dividend, Operand(result));
   1081   } else {
   1082     __ dsra32(result, dividend, 31);
   1083     __ dsrl32(result, result, 32 - shift);
   1084     __ Daddu(result, dividend, Operand(result));
   1085   }
   1086   if (shift > 0) __ dsra(result, result, shift);
   1087   if (divisor < 0) __ Dsubu(result, zero_reg, result);
   1088 }
   1089 
   1090 
   1091 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1092   Register dividend = ToRegister(instr->dividend());
   1093   int32_t divisor = instr->divisor();
   1094   Register result = ToRegister(instr->result());
   1095   DCHECK(!dividend.is(result));
   1096 
   1097   if (divisor == 0) {
   1098     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
   1099     return;
   1100   }
   1101 
   1102   // Check for (0 / -x) that will produce negative zero.
   1103   HDiv* hdiv = instr->hydrogen();
   1104   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1105     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
   1106                  Operand(zero_reg));
   1107   }
   1108 
   1109   __ TruncatingDiv(result, dividend, Abs(divisor));
   1110   if (divisor < 0) __ Subu(result, zero_reg, result);
   1111 
   1112   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1113     __ Dmul(scratch0(), result, Operand(divisor));
   1114     __ Dsubu(scratch0(), scratch0(), dividend);
   1115     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
   1116                  Operand(zero_reg));
   1117   }
   1118 }
   1119 
   1120 
   1121 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1122 void LCodeGen::DoDivI(LDivI* instr) {
   1123   HBinaryOperation* hdiv = instr->hydrogen();
   1124   Register dividend = ToRegister(instr->dividend());
   1125   Register divisor = ToRegister(instr->divisor());
   1126   const Register result = ToRegister(instr->result());
   1127 
   1128   // On MIPS div is asynchronous - it will run in the background while we
   1129   // check for special cases.
   1130   __ Div(result, dividend, divisor);
   1131 
   1132   // Check for x / 0.
   1133   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1134     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
   1135                  Operand(zero_reg));
   1136   }
   1137 
   1138   // Check for (0 / -x) that will produce negative zero.
   1139   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1140     Label left_not_zero;
   1141     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
   1142     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
   1143                  Operand(zero_reg));
   1144     __ bind(&left_not_zero);
   1145   }
   1146 
   1147   // Check for (kMinInt / -1).
   1148   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1149       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1150     Label left_not_min_int;
   1151     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
   1152     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
   1153     __ bind(&left_not_min_int);
   1154   }
   1155 
   1156   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1157     // Calculate remainder.
   1158     Register remainder = ToRegister(instr->temp());
   1159     if (kArchVariant != kMips64r6) {
   1160       __ mfhi(remainder);
   1161     } else {
   1162       __ dmod(remainder, dividend, divisor);
   1163     }
   1164     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
   1165                  Operand(zero_reg));
   1166   }
   1167 }
   1168 
   1169 
   1170 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1171   DoubleRegister addend = ToDoubleRegister(instr->addend());
   1172   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1173   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1174 
   1175   // This is computed in-place.
   1176   DCHECK(addend.is(ToDoubleRegister(instr->result())));
   1177 
   1178   __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
   1179 }
   1180 
   1181 
   1182 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1183   Register dividend = ToRegister(instr->dividend());
   1184   Register result = ToRegister(instr->result());
   1185   int32_t divisor = instr->divisor();
   1186   Register scratch = result.is(dividend) ? scratch0() : dividend;
   1187   DCHECK(!result.is(dividend) || !scratch.is(dividend));
   1188 
   1189   // If the divisor is 1, return the dividend.
   1190   if (divisor == 0) {
   1191     __ Move(result, dividend);
   1192     return;
   1193   }
   1194 
   1195   // If the divisor is positive, things are easy: There can be no deopts and we
   1196   // can simply do an arithmetic right shift.
   1197   uint16_t shift = WhichPowerOf2Abs(divisor);
   1198   if (divisor > 1) {
   1199     __ dsra(result, dividend, shift);
   1200     return;
   1201   }
   1202 
   1203   // If the divisor is negative, we have to negate and handle edge cases.
   1204   // Dividend can be the same register as result so save the value of it
   1205   // for checking overflow.
   1206   __ Move(scratch, dividend);
   1207 
   1208   __ Dsubu(result, zero_reg, dividend);
   1209   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1210     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
   1211                  Operand(zero_reg));
   1212   }
   1213 
   1214   __ Xor(scratch, scratch, result);
   1215   // Dividing by -1 is basically negation, unless we overflow.
   1216   if (divisor == -1) {
   1217     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1218       DeoptimizeIf(gt, instr, DeoptimizeReason::kOverflow, result,
   1219                    Operand(kMaxInt));
   1220     }
   1221     return;
   1222   }
   1223 
   1224   // If the negation could not overflow, simply shifting is OK.
   1225   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1226     __ dsra(result, result, shift);
   1227     return;
   1228   }
   1229 
   1230   Label no_overflow, done;
   1231   __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
   1232   __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
   1233   __ Branch(&done);
   1234   __ bind(&no_overflow);
   1235   __ dsra(result, result, shift);
   1236   __ bind(&done);
   1237 }
   1238 
   1239 
   1240 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1241   Register dividend = ToRegister(instr->dividend());
   1242   int32_t divisor = instr->divisor();
   1243   Register result = ToRegister(instr->result());
   1244   DCHECK(!dividend.is(result));
   1245 
   1246   if (divisor == 0) {
   1247     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
   1248     return;
   1249   }
   1250 
   1251   // Check for (0 / -x) that will produce negative zero.
   1252   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1253   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1254     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
   1255                  Operand(zero_reg));
   1256   }
   1257 
   1258   // Easy case: We need no dynamic check for the dividend and the flooring
   1259   // division is the same as the truncating division.
   1260   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1261       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1262     __ TruncatingDiv(result, dividend, Abs(divisor));
   1263     if (divisor < 0) __ Dsubu(result, zero_reg, result);
   1264     return;
   1265   }
   1266 
   1267   // In the general case we may need to adjust before and after the truncating
   1268   // division to get a flooring division.
   1269   Register temp = ToRegister(instr->temp());
   1270   DCHECK(!temp.is(dividend) && !temp.is(result));
   1271   Label needs_adjustment, done;
   1272   __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
   1273             dividend, Operand(zero_reg));
   1274   __ TruncatingDiv(result, dividend, Abs(divisor));
   1275   if (divisor < 0) __ Dsubu(result, zero_reg, result);
   1276   __ jmp(&done);
   1277   __ bind(&needs_adjustment);
   1278   __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   1279   __ TruncatingDiv(result, temp, Abs(divisor));
   1280   if (divisor < 0) __ Dsubu(result, zero_reg, result);
   1281   __ Dsubu(result, result, Operand(1));
   1282   __ bind(&done);
   1283 }
   1284 
   1285 
   1286 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1287 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1288   HBinaryOperation* hdiv = instr->hydrogen();
   1289   Register dividend = ToRegister(instr->dividend());
   1290   Register divisor = ToRegister(instr->divisor());
   1291   const Register result = ToRegister(instr->result());
   1292 
   1293   // On MIPS div is asynchronous - it will run in the background while we
   1294   // check for special cases.
   1295   __ Ddiv(result, dividend, divisor);
   1296 
   1297   // Check for x / 0.
   1298   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1299     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
   1300                  Operand(zero_reg));
   1301   }
   1302 
   1303   // Check for (0 / -x) that will produce negative zero.
   1304   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1305     Label left_not_zero;
   1306     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
   1307     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
   1308                  Operand(zero_reg));
   1309     __ bind(&left_not_zero);
   1310   }
   1311 
   1312   // Check for (kMinInt / -1).
   1313   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1314       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1315     Label left_not_min_int;
   1316     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
   1317     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
   1318     __ bind(&left_not_min_int);
   1319   }
   1320 
   1321   // We performed a truncating division. Correct the result if necessary.
   1322   Label done;
   1323   Register remainder = scratch0();
   1324   if (kArchVariant != kMips64r6) {
   1325     __ mfhi(remainder);
   1326   } else {
   1327     __ dmod(remainder, dividend, divisor);
   1328   }
   1329   __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
   1330   __ Xor(remainder, remainder, Operand(divisor));
   1331   __ Branch(&done, ge, remainder, Operand(zero_reg));
   1332   __ Dsubu(result, result, Operand(1));
   1333   __ bind(&done);
   1334 }
   1335 
   1336 
   1337 void LCodeGen::DoMulS(LMulS* instr) {
   1338   Register scratch = scratch0();
   1339   Register result = ToRegister(instr->result());
   1340   // Note that result may alias left.
   1341   Register left = ToRegister(instr->left());
   1342   LOperand* right_op = instr->right();
   1343 
   1344   bool bailout_on_minus_zero =
   1345     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1346   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1347 
   1348   if (right_op->IsConstantOperand()) {
   1349     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1350 
   1351     if (bailout_on_minus_zero && (constant < 0)) {
   1352       // The case of a null constant will be handled separately.
   1353       // If constant is negative and left is null, the result should be -0.
   1354       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
   1355                    Operand(zero_reg));
   1356     }
   1357 
   1358     switch (constant) {
   1359       case -1:
   1360         if (overflow) {
   1361           Label no_overflow;
   1362           __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
   1363           DeoptimizeIf(al, instr);
   1364           __ bind(&no_overflow);
   1365         } else {
   1366           __ Dsubu(result, zero_reg, left);
   1367         }
   1368         break;
   1369       case 0:
   1370         if (bailout_on_minus_zero) {
   1371           // If left is strictly negative and the constant is null, the
   1372           // result is -0. Deoptimize if required, otherwise return 0.
   1373           DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
   1374                        Operand(zero_reg));
   1375         }
   1376         __ mov(result, zero_reg);
   1377         break;
   1378       case 1:
   1379         // Nothing to do.
   1380         __ Move(result, left);
   1381         break;
   1382       default:
   1383         // Multiplying by powers of two and powers of two plus or minus
   1384         // one can be done faster with shifted operands.
   1385         // For other constants we emit standard code.
   1386         int32_t mask = constant >> 31;
   1387         uint32_t constant_abs = (constant + mask) ^ mask;
   1388 
   1389         if (base::bits::IsPowerOfTwo32(constant_abs)) {
   1390           int32_t shift = WhichPowerOf2(constant_abs);
   1391           __ dsll(result, left, shift);
   1392           // Correct the sign of the result if the constant is negative.
   1393           if (constant < 0) __ Dsubu(result, zero_reg, result);
   1394         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
   1395           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1396           __ Dlsa(result, left, left, shift);
   1397           // Correct the sign of the result if the constant is negative.
   1398           if (constant < 0) __ Dsubu(result, zero_reg, result);
   1399         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
   1400           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1401           __ dsll(scratch, left, shift);
   1402           __ Dsubu(result, scratch, left);
   1403           // Correct the sign of the result if the constant is negative.
   1404           if (constant < 0) __ Dsubu(result, zero_reg, result);
   1405         } else {
   1406           // Generate standard code.
   1407           __ li(at, constant);
   1408           __ Dmul(result, left, at);
   1409         }
   1410     }
   1411   } else {
   1412     DCHECK(right_op->IsRegister());
   1413     Register right = ToRegister(right_op);
   1414 
   1415     if (overflow) {
   1416       // hi:lo = left * right.
   1417       __ Dmulh(result, left, right);
   1418       __ dsra32(scratch, result, 0);
   1419       __ sra(at, result, 31);
   1420       __ SmiTag(result);
   1421       DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
   1422                    Operand(at));
   1423     } else {
   1424       __ SmiUntag(result, left);
   1425       __ dmul(result, result, right);
   1426     }
   1427 
   1428     if (bailout_on_minus_zero) {
   1429       Label done;
   1430       __ Xor(at, left, right);
   1431       __ Branch(&done, ge, at, Operand(zero_reg));
   1432       // Bail out if the result is minus zero.
   1433       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
   1434                    Operand(zero_reg));
   1435       __ bind(&done);
   1436     }
   1437   }
   1438 }
   1439 
   1440 
   1441 void LCodeGen::DoMulI(LMulI* instr) {
   1442   Register scratch = scratch0();
   1443   Register result = ToRegister(instr->result());
   1444   // Note that result may alias left.
   1445   Register left = ToRegister(instr->left());
   1446   LOperand* right_op = instr->right();
   1447 
   1448   bool bailout_on_minus_zero =
   1449       instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1450   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1451 
   1452   if (right_op->IsConstantOperand()) {
   1453     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1454 
   1455     if (bailout_on_minus_zero && (constant < 0)) {
   1456       // The case of a null constant will be handled separately.
   1457       // If constant is negative and left is null, the result should be -0.
   1458       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
   1459                    Operand(zero_reg));
   1460     }
   1461 
   1462     switch (constant) {
   1463       case -1:
   1464         if (overflow) {
   1465           Label no_overflow;
   1466           __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
   1467           DeoptimizeIf(al, instr);
   1468           __ bind(&no_overflow);
   1469         } else {
   1470           __ Subu(result, zero_reg, left);
   1471         }
   1472         break;
   1473       case 0:
   1474         if (bailout_on_minus_zero) {
   1475           // If left is strictly negative and the constant is null, the
   1476           // result is -0. Deoptimize if required, otherwise return 0.
   1477           DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
   1478                        Operand(zero_reg));
   1479         }
   1480         __ mov(result, zero_reg);
   1481         break;
   1482       case 1:
   1483         // Nothing to do.
   1484         __ Move(result, left);
   1485         break;
   1486       default:
   1487         // Multiplying by powers of two and powers of two plus or minus
   1488         // one can be done faster with shifted operands.
   1489         // For other constants we emit standard code.
   1490         int32_t mask = constant >> 31;
   1491         uint32_t constant_abs = (constant + mask) ^ mask;
   1492 
   1493         if (base::bits::IsPowerOfTwo32(constant_abs)) {
   1494           int32_t shift = WhichPowerOf2(constant_abs);
   1495           __ sll(result, left, shift);
   1496           // Correct the sign of the result if the constant is negative.
   1497           if (constant < 0) __ Subu(result, zero_reg, result);
   1498         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
   1499           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1500           __ Lsa(result, left, left, shift);
   1501           // Correct the sign of the result if the constant is negative.
   1502           if (constant < 0) __ Subu(result, zero_reg, result);
   1503         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
   1504           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1505           __ sll(scratch, left, shift);
   1506           __ Subu(result, scratch, left);
   1507           // Correct the sign of the result if the constant is negative.
   1508           if (constant < 0) __ Subu(result, zero_reg, result);
   1509         } else {
   1510           // Generate standard code.
   1511           __ li(at, constant);
   1512           __ Mul(result, left, at);
   1513         }
   1514     }
   1515 
   1516   } else {
   1517     DCHECK(right_op->IsRegister());
   1518     Register right = ToRegister(right_op);
   1519 
   1520     if (overflow) {
   1521       // hi:lo = left * right.
   1522       __ Dmul(result, left, right);
   1523       __ dsra32(scratch, result, 0);
   1524       __ sra(at, result, 31);
   1525 
   1526       DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
   1527                    Operand(at));
   1528     } else {
   1529       __ mul(result, left, right);
   1530     }
   1531 
   1532     if (bailout_on_minus_zero) {
   1533       Label done;
   1534       __ Xor(at, left, right);
   1535       __ Branch(&done, ge, at, Operand(zero_reg));
   1536       // Bail out if the result is minus zero.
   1537       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
   1538                    Operand(zero_reg));
   1539       __ bind(&done);
   1540     }
   1541   }
   1542 }
   1543 
   1544 
   1545 void LCodeGen::DoBitI(LBitI* instr) {
   1546   LOperand* left_op = instr->left();
   1547   LOperand* right_op = instr->right();
   1548   DCHECK(left_op->IsRegister());
   1549   Register left = ToRegister(left_op);
   1550   Register result = ToRegister(instr->result());
   1551   Operand right(no_reg);
   1552 
   1553   if (right_op->IsStackSlot()) {
   1554     right = Operand(EmitLoadRegister(right_op, at));
   1555   } else {
   1556     DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
   1557     right = ToOperand(right_op);
   1558   }
   1559 
   1560   switch (instr->op()) {
   1561     case Token::BIT_AND:
   1562       __ And(result, left, right);
   1563       break;
   1564     case Token::BIT_OR:
   1565       __ Or(result, left, right);
   1566       break;
   1567     case Token::BIT_XOR:
   1568       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
   1569         __ Nor(result, zero_reg, left);
   1570       } else {
   1571         __ Xor(result, left, right);
   1572       }
   1573       break;
   1574     default:
   1575       UNREACHABLE();
   1576       break;
   1577   }
   1578 }
   1579 
   1580 
   1581 void LCodeGen::DoShiftI(LShiftI* instr) {
   1582   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1583   // result may alias either of them.
   1584   LOperand* right_op = instr->right();
   1585   Register left = ToRegister(instr->left());
   1586   Register result = ToRegister(instr->result());
   1587 
   1588   if (right_op->IsRegister()) {
   1589     // No need to mask the right operand on MIPS, it is built into the variable
   1590     // shift instructions.
   1591     switch (instr->op()) {
   1592       case Token::ROR:
   1593         __ Ror(result, left, Operand(ToRegister(right_op)));
   1594         break;
   1595       case Token::SAR:
   1596         __ srav(result, left, ToRegister(right_op));
   1597         break;
   1598       case Token::SHR:
   1599         __ srlv(result, left, ToRegister(right_op));
   1600         if (instr->can_deopt()) {
   1601            // TODO(yy): (-1) >>> 0. anything else?
   1602            DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
   1603                         Operand(zero_reg));
   1604            DeoptimizeIf(gt, instr, DeoptimizeReason::kNegativeValue, result,
   1605                         Operand(kMaxInt));
   1606         }
   1607         break;
   1608       case Token::SHL:
   1609         __ sllv(result, left, ToRegister(right_op));
   1610         break;
   1611       default:
   1612         UNREACHABLE();
   1613         break;
   1614     }
   1615   } else {
   1616     // Mask the right_op operand.
   1617     int value = ToInteger32(LConstantOperand::cast(right_op));
   1618     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1619     switch (instr->op()) {
   1620       case Token::ROR:
   1621         if (shift_count != 0) {
   1622           __ Ror(result, left, Operand(shift_count));
   1623         } else {
   1624           __ Move(result, left);
   1625         }
   1626         break;
   1627       case Token::SAR:
   1628         if (shift_count != 0) {
   1629           __ sra(result, left, shift_count);
   1630         } else {
   1631           __ Move(result, left);
   1632         }
   1633         break;
   1634       case Token::SHR:
   1635         if (shift_count != 0) {
   1636           __ srl(result, left, shift_count);
   1637         } else {
   1638           if (instr->can_deopt()) {
   1639             __ And(at, left, Operand(0x80000000));
   1640             DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
   1641                          Operand(zero_reg));
   1642           }
   1643           __ Move(result, left);
   1644         }
   1645         break;
   1646       case Token::SHL:
   1647         if (shift_count != 0) {
   1648           if (instr->hydrogen_value()->representation().IsSmi()) {
   1649             __ dsll(result, left, shift_count);
   1650           } else {
   1651             __ sll(result, left, shift_count);
   1652           }
   1653         } else {
   1654           __ Move(result, left);
   1655         }
   1656         break;
   1657       default:
   1658         UNREACHABLE();
   1659         break;
   1660     }
   1661   }
   1662 }
   1663 
   1664 
   1665 void LCodeGen::DoSubS(LSubS* instr) {
   1666   LOperand* left = instr->left();
   1667   LOperand* right = instr->right();
   1668   LOperand* result = instr->result();
   1669   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1670 
   1671   if (!can_overflow) {
   1672     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1673     __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
   1674   } else {  // can_overflow.
   1675     Register scratch = scratch0();
   1676     Label no_overflow_label;
   1677     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1678     __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
   1679                        &no_overflow_label, scratch);
   1680     DeoptimizeIf(al, instr);
   1681     __ bind(&no_overflow_label);
   1682   }
   1683 }
   1684 
   1685 
   1686 void LCodeGen::DoSubI(LSubI* instr) {
   1687   LOperand* left = instr->left();
   1688   LOperand* right = instr->right();
   1689   LOperand* result = instr->result();
   1690   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1691 
   1692   if (!can_overflow) {
   1693     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1694     __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
   1695   } else {  // can_overflow.
   1696     Register scratch = scratch0();
   1697     Label no_overflow_label;
   1698     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1699     __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
   1700                       &no_overflow_label, scratch);
   1701     DeoptimizeIf(al, instr);
   1702     __ bind(&no_overflow_label);
   1703   }
   1704 }
   1705 
   1706 
   1707 void LCodeGen::DoConstantI(LConstantI* instr) {
   1708   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1709 }
   1710 
   1711 
   1712 void LCodeGen::DoConstantS(LConstantS* instr) {
   1713   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1714 }
   1715 
   1716 
   1717 void LCodeGen::DoConstantD(LConstantD* instr) {
   1718   DCHECK(instr->result()->IsDoubleRegister());
   1719   DoubleRegister result = ToDoubleRegister(instr->result());
   1720   double v = instr->value();
   1721   __ Move(result, v);
   1722 }
   1723 
   1724 
   1725 void LCodeGen::DoConstantE(LConstantE* instr) {
   1726   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1727 }
   1728 
   1729 
   1730 void LCodeGen::DoConstantT(LConstantT* instr) {
   1731   Handle<Object> object = instr->value(isolate());
   1732   AllowDeferredHandleDereference smi_check;
   1733   __ li(ToRegister(instr->result()), object);
   1734 }
   1735 
   1736 
   1737 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
   1738                                            LOperand* index,
   1739                                            String::Encoding encoding) {
   1740   if (index->IsConstantOperand()) {
   1741     int offset = ToInteger32(LConstantOperand::cast(index));
   1742     if (encoding == String::TWO_BYTE_ENCODING) {
   1743       offset *= kUC16Size;
   1744     }
   1745     STATIC_ASSERT(kCharSize == 1);
   1746     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1747   }
   1748   Register scratch = scratch0();
   1749   DCHECK(!scratch.is(string));
   1750   DCHECK(!scratch.is(ToRegister(index)));
   1751   if (encoding == String::ONE_BYTE_ENCODING) {
   1752     __ Daddu(scratch, string, ToRegister(index));
   1753   } else {
   1754     STATIC_ASSERT(kUC16Size == 2);
   1755     __ dsll(scratch, ToRegister(index), 1);
   1756     __ Daddu(scratch, string, scratch);
   1757   }
   1758   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1759 }
   1760 
   1761 
   1762 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1763   String::Encoding encoding = instr->hydrogen()->encoding();
   1764   Register string = ToRegister(instr->string());
   1765   Register result = ToRegister(instr->result());
   1766 
   1767   if (FLAG_debug_code) {
   1768     Register scratch = scratch0();
   1769     __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   1770     __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1771 
   1772     __ And(scratch, scratch,
   1773            Operand(kStringRepresentationMask | kStringEncodingMask));
   1774     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1775     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1776     __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
   1777                                 ? one_byte_seq_type : two_byte_seq_type));
   1778     __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
   1779   }
   1780 
   1781   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1782   if (encoding == String::ONE_BYTE_ENCODING) {
   1783     __ lbu(result, operand);
   1784   } else {
   1785     __ lhu(result, operand);
   1786   }
   1787 }
   1788 
   1789 
   1790 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1791   String::Encoding encoding = instr->hydrogen()->encoding();
   1792   Register string = ToRegister(instr->string());
   1793   Register value = ToRegister(instr->value());
   1794 
   1795   if (FLAG_debug_code) {
   1796     Register scratch = scratch0();
   1797     Register index = ToRegister(instr->index());
   1798     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1799     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1800     int encoding_mask =
   1801         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1802         ? one_byte_seq_type : two_byte_seq_type;
   1803     __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
   1804   }
   1805 
   1806   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1807   if (encoding == String::ONE_BYTE_ENCODING) {
   1808     __ sb(value, operand);
   1809   } else {
   1810     __ sh(value, operand);
   1811   }
   1812 }
   1813 
   1814 
   1815 void LCodeGen::DoAddE(LAddE* instr) {
   1816   LOperand* result = instr->result();
   1817   LOperand* left = instr->left();
   1818   LOperand* right = instr->right();
   1819 
   1820   DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
   1821   DCHECK(right->IsRegister() || right->IsConstantOperand());
   1822   __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
   1823 }
   1824 
   1825 
   1826 void LCodeGen::DoAddS(LAddS* instr) {
   1827   LOperand* left = instr->left();
   1828   LOperand* right = instr->right();
   1829   LOperand* result = instr->result();
   1830   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1831 
   1832   if (!can_overflow) {
   1833     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1834     __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
   1835   } else {  // can_overflow.
   1836     Label no_overflow_label;
   1837     Register scratch = scratch1();
   1838     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1839     __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
   1840                        &no_overflow_label, scratch);
   1841     DeoptimizeIf(al, instr);
   1842     __ bind(&no_overflow_label);
   1843   }
   1844 }
   1845 
   1846 
   1847 void LCodeGen::DoAddI(LAddI* instr) {
   1848   LOperand* left = instr->left();
   1849   LOperand* right = instr->right();
   1850   LOperand* result = instr->result();
   1851   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1852 
   1853   if (!can_overflow) {
   1854     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1855     __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
   1856   } else {  // can_overflow.
   1857     Label no_overflow_label;
   1858     Register scratch = scratch1();
   1859     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1860     __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
   1861                       &no_overflow_label, scratch);
   1862     DeoptimizeIf(al, instr);
   1863     __ bind(&no_overflow_label);
   1864   }
   1865 }
   1866 
   1867 
   1868 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1869   LOperand* left = instr->left();
   1870   LOperand* right = instr->right();
   1871   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1872   Register scratch = scratch1();
   1873   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1874     Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
   1875     Register left_reg = ToRegister(left);
   1876     Register right_reg = EmitLoadRegister(right, scratch0());
   1877     Register result_reg = ToRegister(instr->result());
   1878     Label return_right, done;
   1879     __ Slt(scratch, left_reg, Operand(right_reg));
   1880     if (condition == ge) {
   1881      __  Movz(result_reg, left_reg, scratch);
   1882      __  Movn(result_reg, right_reg, scratch);
   1883     } else {
   1884      DCHECK(condition == le);
   1885      __  Movn(result_reg, left_reg, scratch);
   1886      __  Movz(result_reg, right_reg, scratch);
   1887     }
   1888   } else {
   1889     DCHECK(instr->hydrogen()->representation().IsDouble());
   1890     FPURegister left_reg = ToDoubleRegister(left);
   1891     FPURegister right_reg = ToDoubleRegister(right);
   1892     FPURegister result_reg = ToDoubleRegister(instr->result());
   1893     Label nan, done;
   1894     if (operation == HMathMinMax::kMathMax) {
   1895       __ Float64Max(result_reg, left_reg, right_reg, &nan);
   1896     } else {
   1897       DCHECK(operation == HMathMinMax::kMathMin);
   1898       __ Float64Min(result_reg, left_reg, right_reg, &nan);
   1899     }
   1900     __ Branch(&done);
   1901 
   1902     __ bind(&nan);
   1903     __ add_d(result_reg, left_reg, right_reg);
   1904 
   1905     __ bind(&done);
   1906   }
   1907 }
   1908 
   1909 
   1910 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1911   DoubleRegister left = ToDoubleRegister(instr->left());
   1912   DoubleRegister right = ToDoubleRegister(instr->right());
   1913   DoubleRegister result = ToDoubleRegister(instr->result());
   1914   switch (instr->op()) {
   1915     case Token::ADD:
   1916       __ add_d(result, left, right);
   1917       break;
   1918     case Token::SUB:
   1919       __ sub_d(result, left, right);
   1920       break;
   1921     case Token::MUL:
   1922       __ mul_d(result, left, right);
   1923       break;
   1924     case Token::DIV:
   1925       __ div_d(result, left, right);
   1926       break;
   1927     case Token::MOD: {
   1928       // Save a0-a3 on the stack.
   1929       RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
   1930       __ MultiPush(saved_regs);
   1931 
   1932       __ PrepareCallCFunction(0, 2, scratch0());
   1933       __ MovToFloatParameters(left, right);
   1934       __ CallCFunction(
   1935           ExternalReference::mod_two_doubles_operation(isolate()),
   1936           0, 2);
   1937       // Move the result in the double result register.
   1938       __ MovFromFloatResult(result);
   1939 
   1940       // Restore saved register.
   1941       __ MultiPop(saved_regs);
   1942       break;
   1943     }
   1944     default:
   1945       UNREACHABLE();
   1946       break;
   1947   }
   1948 }
   1949 
   1950 
   1951 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1952   DCHECK(ToRegister(instr->context()).is(cp));
   1953   DCHECK(ToRegister(instr->left()).is(a1));
   1954   DCHECK(ToRegister(instr->right()).is(a0));
   1955   DCHECK(ToRegister(instr->result()).is(v0));
   1956 
   1957   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   1958   CallCode(code, RelocInfo::CODE_TARGET, instr);
   1959   // Other arch use a nop here, to signal that there is no inlined
   1960   // patchable code. Mips does not need the nop, since our marker
   1961   // instruction (andi zero_reg) will never be used in normal code.
   1962 }
   1963 
   1964 
   1965 template<class InstrType>
   1966 void LCodeGen::EmitBranch(InstrType instr,
   1967                           Condition condition,
   1968                           Register src1,
   1969                           const Operand& src2) {
   1970   int left_block = instr->TrueDestination(chunk_);
   1971   int right_block = instr->FalseDestination(chunk_);
   1972 
   1973   int next_block = GetNextEmittedBlock();
   1974   if (right_block == left_block || condition == al) {
   1975     EmitGoto(left_block);
   1976   } else if (left_block == next_block) {
   1977     __ Branch(chunk_->GetAssemblyLabel(right_block),
   1978               NegateCondition(condition), src1, src2);
   1979   } else if (right_block == next_block) {
   1980     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
   1981   } else {
   1982     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
   1983     __ Branch(chunk_->GetAssemblyLabel(right_block));
   1984   }
   1985 }
   1986 
   1987 
   1988 template<class InstrType>
   1989 void LCodeGen::EmitBranchF(InstrType instr,
   1990                            Condition condition,
   1991                            FPURegister src1,
   1992                            FPURegister src2) {
   1993   int right_block = instr->FalseDestination(chunk_);
   1994   int left_block = instr->TrueDestination(chunk_);
   1995 
   1996   int next_block = GetNextEmittedBlock();
   1997   if (right_block == left_block) {
   1998     EmitGoto(left_block);
   1999   } else if (left_block == next_block) {
   2000     __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
   2001                NegateFpuCondition(condition), src1, src2);
   2002   } else if (right_block == next_block) {
   2003     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
   2004                condition, src1, src2);
   2005   } else {
   2006     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
   2007                condition, src1, src2);
   2008     __ Branch(chunk_->GetAssemblyLabel(right_block));
   2009   }
   2010 }
   2011 
   2012 
   2013 template <class InstrType>
   2014 void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
   2015                               Register src1, const Operand& src2) {
   2016   int true_block = instr->TrueDestination(chunk_);
   2017   __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
   2018 }
   2019 
   2020 
   2021 template <class InstrType>
   2022 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
   2023                                Register src1, const Operand& src2) {
   2024   int false_block = instr->FalseDestination(chunk_);
   2025   __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
   2026 }
   2027 
   2028 
   2029 template<class InstrType>
   2030 void LCodeGen::EmitFalseBranchF(InstrType instr,
   2031                                 Condition condition,
   2032                                 FPURegister src1,
   2033                                 FPURegister src2) {
   2034   int false_block = instr->FalseDestination(chunk_);
   2035   __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
   2036              condition, src1, src2);
   2037 }
   2038 
   2039 
   2040 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   2041   __ stop("LDebugBreak");
   2042 }
   2043 
   2044 
   2045 void LCodeGen::DoBranch(LBranch* instr) {
   2046   Representation r = instr->hydrogen()->value()->representation();
   2047   if (r.IsInteger32() || r.IsSmi()) {
   2048     DCHECK(!info()->IsStub());
   2049     Register reg = ToRegister(instr->value());
   2050     EmitBranch(instr, ne, reg, Operand(zero_reg));
   2051   } else if (r.IsDouble()) {
   2052     DCHECK(!info()->IsStub());
   2053     DoubleRegister reg = ToDoubleRegister(instr->value());
   2054     // Test the double value. Zero and NaN are false.
   2055     EmitBranchF(instr, ogl, reg, kDoubleRegZero);
   2056   } else {
   2057     DCHECK(r.IsTagged());
   2058     Register reg = ToRegister(instr->value());
   2059     HType type = instr->hydrogen()->value()->type();
   2060     if (type.IsBoolean()) {
   2061       DCHECK(!info()->IsStub());
   2062       __ LoadRoot(at, Heap::kTrueValueRootIndex);
   2063       EmitBranch(instr, eq, reg, Operand(at));
   2064     } else if (type.IsSmi()) {
   2065       DCHECK(!info()->IsStub());
   2066       EmitBranch(instr, ne, reg, Operand(zero_reg));
   2067     } else if (type.IsJSArray()) {
   2068       DCHECK(!info()->IsStub());
   2069       EmitBranch(instr, al, zero_reg, Operand(zero_reg));
   2070     } else if (type.IsHeapNumber()) {
   2071       DCHECK(!info()->IsStub());
   2072       DoubleRegister dbl_scratch = double_scratch0();
   2073       __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2074       // Test the double value. Zero and NaN are false.
   2075       EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
   2076     } else if (type.IsString()) {
   2077       DCHECK(!info()->IsStub());
   2078       __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
   2079       EmitBranch(instr, ne, at, Operand(zero_reg));
   2080     } else {
   2081       ToBooleanHints expected = instr->hydrogen()->expected_input_types();
   2082       // Avoid deopts in the case where we've never executed this path before.
   2083       if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
   2084 
   2085       if (expected & ToBooleanHint::kUndefined) {
   2086         // undefined -> false.
   2087         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   2088         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2089       }
   2090       if (expected & ToBooleanHint::kBoolean) {
   2091         // Boolean -> its value.
   2092         __ LoadRoot(at, Heap::kTrueValueRootIndex);
   2093         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
   2094         __ LoadRoot(at, Heap::kFalseValueRootIndex);
   2095         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2096       }
   2097       if (expected & ToBooleanHint::kNull) {
   2098         // 'null' -> false.
   2099         __ LoadRoot(at, Heap::kNullValueRootIndex);
   2100         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2101       }
   2102 
   2103       if (expected & ToBooleanHint::kSmallInteger) {
   2104         // Smis: 0 -> false, all other -> true.
   2105         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
   2106         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2107       } else if (expected & ToBooleanHint::kNeedsMap) {
   2108         // If we need a map later and have a Smi -> deopt.
   2109         __ SmiTst(reg, at);
   2110         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
   2111       }
   2112 
   2113       const Register map = scratch0();
   2114       if (expected & ToBooleanHint::kNeedsMap) {
   2115         __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2116         if (expected & ToBooleanHint::kCanBeUndetectable) {
   2117           // Undetectable -> false.
   2118           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
   2119           __ And(at, at, Operand(1 << Map::kIsUndetectable));
   2120           __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
   2121         }
   2122       }
   2123 
   2124       if (expected & ToBooleanHint::kReceiver) {
   2125         // spec object -> true.
   2126         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2127         __ Branch(instr->TrueLabel(chunk_),
   2128                   ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
   2129       }
   2130 
   2131       if (expected & ToBooleanHint::kString) {
   2132         // String value -> false iff empty.
   2133         Label not_string;
   2134         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2135         __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
   2136         __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
   2137         __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
   2138         __ Branch(instr->FalseLabel(chunk_));
   2139         __ bind(&not_string);
   2140       }
   2141 
   2142       if (expected & ToBooleanHint::kSymbol) {
   2143         // Symbol value -> true.
   2144         const Register scratch = scratch1();
   2145         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2146         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
   2147       }
   2148 
   2149       if (expected & ToBooleanHint::kHeapNumber) {
   2150         // heap number -> false iff +0, -0, or NaN.
   2151         DoubleRegister dbl_scratch = double_scratch0();
   2152         Label not_heap_number;
   2153         __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   2154         __ Branch(&not_heap_number, ne, map, Operand(at));
   2155         __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2156         __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2157                    ne, dbl_scratch, kDoubleRegZero);
   2158         // Falls through if dbl_scratch == 0.
   2159         __ Branch(instr->FalseLabel(chunk_));
   2160         __ bind(&not_heap_number);
   2161       }
   2162 
   2163       if (expected != ToBooleanHint::kAny) {
   2164         // We've seen something for the first time -> deopt.
   2165         // This can only happen if we are not generic already.
   2166         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
   2167                      Operand(zero_reg));
   2168       }
   2169     }
   2170   }
   2171 }
   2172 
   2173 
   2174 void LCodeGen::EmitGoto(int block) {
   2175   if (!IsNextEmittedBlock(block)) {
   2176     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2177   }
   2178 }
   2179 
   2180 
   2181 void LCodeGen::DoGoto(LGoto* instr) {
   2182   EmitGoto(instr->block_id());
   2183 }
   2184 
   2185 
   2186 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2187   Condition cond = kNoCondition;
   2188   switch (op) {
   2189     case Token::EQ:
   2190     case Token::EQ_STRICT:
   2191       cond = eq;
   2192       break;
   2193     case Token::NE:
   2194     case Token::NE_STRICT:
   2195       cond = ne;
   2196       break;
   2197     case Token::LT:
   2198       cond = is_unsigned ? lo : lt;
   2199       break;
   2200     case Token::GT:
   2201       cond = is_unsigned ? hi : gt;
   2202       break;
   2203     case Token::LTE:
   2204       cond = is_unsigned ? ls : le;
   2205       break;
   2206     case Token::GTE:
   2207       cond = is_unsigned ? hs : ge;
   2208       break;
   2209     case Token::IN:
   2210     case Token::INSTANCEOF:
   2211     default:
   2212       UNREACHABLE();
   2213   }
   2214   return cond;
   2215 }
   2216 
   2217 
   2218 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2219   LOperand* left = instr->left();
   2220   LOperand* right = instr->right();
   2221   bool is_unsigned =
   2222       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2223       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2224   Condition cond = TokenToCondition(instr->op(), is_unsigned);
   2225 
   2226   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2227     // We can statically evaluate the comparison.
   2228     double left_val = ToDouble(LConstantOperand::cast(left));
   2229     double right_val = ToDouble(LConstantOperand::cast(right));
   2230     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
   2231                          ? instr->TrueDestination(chunk_)
   2232                          : instr->FalseDestination(chunk_);
   2233     EmitGoto(next_block);
   2234   } else {
   2235     if (instr->is_double()) {
   2236       // Compare left and right as doubles and load the
   2237       // resulting flags into the normal status register.
   2238       FPURegister left_reg = ToDoubleRegister(left);
   2239       FPURegister right_reg = ToDoubleRegister(right);
   2240 
   2241       // If a NaN is involved, i.e. the result is unordered,
   2242       // jump to false block label.
   2243       __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
   2244                  left_reg, right_reg);
   2245 
   2246       EmitBranchF(instr, cond, left_reg, right_reg);
   2247     } else {
   2248       Register cmp_left;
   2249       Operand cmp_right = Operand((int64_t)0);
   2250       if (right->IsConstantOperand()) {
   2251         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2252         if (instr->hydrogen_value()->representation().IsSmi()) {
   2253           cmp_left = ToRegister(left);
   2254           cmp_right = Operand(Smi::FromInt(value));
   2255         } else {
   2256           cmp_left = ToRegister(left);
   2257           cmp_right = Operand(value);
   2258         }
   2259       } else if (left->IsConstantOperand()) {
   2260         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2261         if (instr->hydrogen_value()->representation().IsSmi()) {
   2262           cmp_left = ToRegister(right);
   2263           cmp_right = Operand(Smi::FromInt(value));
   2264         } else {
   2265           cmp_left = ToRegister(right);
   2266           cmp_right = Operand(value);
   2267         }
   2268         // We commuted the operands, so commute the condition.
   2269         cond = CommuteCondition(cond);
   2270       } else {
   2271         cmp_left = ToRegister(left);
   2272         cmp_right = Operand(ToRegister(right));
   2273       }
   2274 
   2275       EmitBranch(instr, cond, cmp_left, cmp_right);
   2276     }
   2277   }
   2278 }
   2279 
   2280 
   2281 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2282   Register left = ToRegister(instr->left());
   2283   Register right = ToRegister(instr->right());
   2284 
   2285   EmitBranch(instr, eq, left, Operand(right));
   2286 }
   2287 
   2288 
   2289 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2290   if (instr->hydrogen()->representation().IsTagged()) {
   2291     Register input_reg = ToRegister(instr->object());
   2292     __ li(at, Operand(factory()->the_hole_value()));
   2293     EmitBranch(instr, eq, input_reg, Operand(at));
   2294     return;
   2295   }
   2296 
   2297   DoubleRegister input_reg = ToDoubleRegister(instr->object());
   2298   EmitFalseBranchF(instr, eq, input_reg, input_reg);
   2299 
   2300   Register scratch = scratch0();
   2301   __ FmoveHigh(scratch, input_reg);
   2302   EmitBranch(instr, eq, scratch,
   2303              Operand(static_cast<int32_t>(kHoleNanUpper32)));
   2304 }
   2305 
   2306 
   2307 Condition LCodeGen::EmitIsString(Register input,
   2308                                  Register temp1,
   2309                                  Label* is_not_string,
   2310                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2311   if (check_needed == INLINE_SMI_CHECK) {
   2312     __ JumpIfSmi(input, is_not_string);
   2313   }
   2314   __ GetObjectType(input, temp1, temp1);
   2315 
   2316   return lt;
   2317 }
   2318 
   2319 
   2320 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2321   Register reg = ToRegister(instr->value());
   2322   Register temp1 = ToRegister(instr->temp());
   2323 
   2324   SmiCheck check_needed =
   2325       instr->hydrogen()->value()->type().IsHeapObject()
   2326           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2327   Condition true_cond =
   2328       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2329 
   2330   EmitBranch(instr, true_cond, temp1,
   2331              Operand(FIRST_NONSTRING_TYPE));
   2332 }
   2333 
   2334 
   2335 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2336   Register input_reg = EmitLoadRegister(instr->value(), at);
   2337   __ And(at, input_reg, kSmiTagMask);
   2338   EmitBranch(instr, eq, at, Operand(zero_reg));
   2339 }
   2340 
   2341 
   2342 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2343   Register input = ToRegister(instr->value());
   2344   Register temp = ToRegister(instr->temp());
   2345 
   2346   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2347     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2348   }
   2349   __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2350   __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2351   __ And(at, temp, Operand(1 << Map::kIsUndetectable));
   2352   EmitBranch(instr, ne, at, Operand(zero_reg));
   2353 }
   2354 
   2355 
   2356 static Condition ComputeCompareCondition(Token::Value op) {
   2357   switch (op) {
   2358     case Token::EQ_STRICT:
   2359     case Token::EQ:
   2360       return eq;
   2361     case Token::LT:
   2362       return lt;
   2363     case Token::GT:
   2364       return gt;
   2365     case Token::LTE:
   2366       return le;
   2367     case Token::GTE:
   2368       return ge;
   2369     default:
   2370       UNREACHABLE();
   2371       return kNoCondition;
   2372   }
   2373 }
   2374 
   2375 
   2376 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2377   DCHECK(ToRegister(instr->context()).is(cp));
   2378   DCHECK(ToRegister(instr->left()).is(a1));
   2379   DCHECK(ToRegister(instr->right()).is(a0));
   2380 
   2381   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   2382   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2383   __ LoadRoot(at, Heap::kTrueValueRootIndex);
   2384   EmitBranch(instr, eq, v0, Operand(at));
   2385 }
   2386 
   2387 
   2388 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2389   InstanceType from = instr->from();
   2390   InstanceType to = instr->to();
   2391   if (from == FIRST_TYPE) return to;
   2392   DCHECK(from == to || to == LAST_TYPE);
   2393   return from;
   2394 }
   2395 
   2396 
   2397 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2398   InstanceType from = instr->from();
   2399   InstanceType to = instr->to();
   2400   if (from == to) return eq;
   2401   if (to == LAST_TYPE) return hs;
   2402   if (from == FIRST_TYPE) return ls;
   2403   UNREACHABLE();
   2404   return eq;
   2405 }
   2406 
   2407 
   2408 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2409   Register scratch = scratch0();
   2410   Register input = ToRegister(instr->value());
   2411 
   2412   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2413     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2414   }
   2415 
   2416   __ GetObjectType(input, scratch, scratch);
   2417   EmitBranch(instr,
   2418              BranchCondition(instr->hydrogen()),
   2419              scratch,
   2420              Operand(TestType(instr->hydrogen())));
   2421 }
   2422 
   2423 // Branches to a label or falls through with the answer in flags.  Trashes
   2424 // the temp registers, but not the input.
   2425 void LCodeGen::EmitClassOfTest(Label* is_true,
   2426                                Label* is_false,
   2427                                Handle<String>class_name,
   2428                                Register input,
   2429                                Register temp,
   2430                                Register temp2) {
   2431   DCHECK(!input.is(temp));
   2432   DCHECK(!input.is(temp2));
   2433   DCHECK(!temp.is(temp2));
   2434 
   2435   __ JumpIfSmi(input, is_false);
   2436 
   2437   __ GetObjectType(input, temp, temp2);
   2438   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   2439   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2440     __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
   2441   } else {
   2442     __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
   2443   }
   2444 
   2445   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   2446   // Check if the constructor in the map is a function.
   2447   Register instance_type = scratch1();
   2448   DCHECK(!instance_type.is(temp));
   2449   __ GetMapConstructor(temp, temp, temp2, instance_type);
   2450 
   2451   // Objects with a non-function constructor have class 'Object'.
   2452   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
   2453     __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
   2454   } else {
   2455     __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
   2456   }
   2457 
   2458   // temp now contains the constructor function. Grab the
   2459   // instance class name from there.
   2460   __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2461   __ ld(temp, FieldMemOperand(temp,
   2462                                SharedFunctionInfo::kInstanceClassNameOffset));
   2463   // The class name we are testing against is internalized since it's a literal.
   2464   // The name in the constructor is internalized because of the way the context
   2465   // is booted.  This routine isn't expected to work for random API-created
   2466   // classes and it doesn't have to because you can't access it with natives
   2467   // syntax.  Since both sides are internalized it is sufficient to use an
   2468   // identity comparison.
   2469 
   2470   // End with the address of this class_name instance in temp register.
   2471   // On MIPS, the caller must do the comparison with Handle<String>class_name.
   2472 }
   2473 
   2474 
   2475 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2476   Register input = ToRegister(instr->value());
   2477   Register temp = scratch0();
   2478   Register temp2 = ToRegister(instr->temp());
   2479   Handle<String> class_name = instr->hydrogen()->class_name();
   2480 
   2481   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2482                   class_name, input, temp, temp2);
   2483 
   2484   EmitBranch(instr, eq, temp, Operand(class_name));
   2485 }
   2486 
   2487 
   2488 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2489   Register reg = ToRegister(instr->value());
   2490   Register temp = ToRegister(instr->temp());
   2491 
   2492   __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2493   EmitBranch(instr, eq, temp, Operand(instr->map()));
   2494 }
   2495 
   2496 
   2497 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2498     LHasInPrototypeChainAndBranch* instr) {
   2499   Register const object = ToRegister(instr->object());
   2500   Register const object_map = scratch0();
   2501   Register const object_instance_type = scratch1();
   2502   Register const object_prototype = object_map;
   2503   Register const prototype = ToRegister(instr->prototype());
   2504 
   2505   // The {object} must be a spec object.  It's sufficient to know that {object}
   2506   // is not a smi, since all other non-spec objects have {null} prototypes and
   2507   // will be ruled out below.
   2508   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2509     __ SmiTst(object, at);
   2510     EmitFalseBranch(instr, eq, at, Operand(zero_reg));
   2511   }
   2512 
   2513   // Loop through the {object}s prototype chain looking for the {prototype}.
   2514   __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   2515   Label loop;
   2516   __ bind(&loop);
   2517 
   2518   // Deoptimize if the object needs to be access checked.
   2519   __ lbu(object_instance_type,
   2520          FieldMemOperand(object_map, Map::kBitFieldOffset));
   2521   __ And(object_instance_type, object_instance_type,
   2522          Operand(1 << Map::kIsAccessCheckNeeded));
   2523   DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
   2524                Operand(zero_reg));
   2525   __ lbu(object_instance_type,
   2526          FieldMemOperand(object_map, Map::kInstanceTypeOffset));
   2527   DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
   2528                Operand(JS_PROXY_TYPE));
   2529 
   2530   __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
   2531   __ LoadRoot(at, Heap::kNullValueRootIndex);
   2532   EmitFalseBranch(instr, eq, object_prototype, Operand(at));
   2533   EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
   2534   __ Branch(&loop, USE_DELAY_SLOT);
   2535   __ ld(object_map, FieldMemOperand(object_prototype,
   2536                                     HeapObject::kMapOffset));  // In delay slot.
   2537 }
   2538 
   2539 
   2540 void LCodeGen::DoCmpT(LCmpT* instr) {
   2541   DCHECK(ToRegister(instr->context()).is(cp));
   2542   Token::Value op = instr->op();
   2543 
   2544   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   2545   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2546   // On MIPS there is no need for a "no inlined smi code" marker (nop).
   2547 
   2548   Condition condition = ComputeCompareCondition(op);
   2549   // A minor optimization that relies on LoadRoot always emitting one
   2550   // instruction.
   2551   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
   2552   Label done, check;
   2553   __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
   2554   __ bind(&check);
   2555   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2556   DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
   2557   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2558   __ bind(&done);
   2559 }
   2560 
   2561 
   2562 void LCodeGen::DoReturn(LReturn* instr) {
   2563   if (FLAG_trace && info()->IsOptimizing()) {
   2564     // Push the return value on the stack as the parameter.
   2565     // Runtime::TraceExit returns its parameter in v0. We're leaving the code
   2566     // managed by the register allocator and tearing down the frame, it's
   2567     // safe to write to the context register.
   2568     __ push(v0);
   2569     __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2570     __ CallRuntime(Runtime::kTraceExit);
   2571   }
   2572   if (info()->saves_caller_doubles()) {
   2573     RestoreCallerDoubles();
   2574   }
   2575   if (NeedsEagerFrame()) {
   2576     __ mov(sp, fp);
   2577     __ Pop(ra, fp);
   2578   }
   2579   if (instr->has_constant_parameter_count()) {
   2580     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2581     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2582     if (sp_delta != 0) {
   2583       __ Daddu(sp, sp, Operand(sp_delta));
   2584     }
   2585   } else {
   2586     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   2587     Register reg = ToRegister(instr->parameter_count());
   2588     // The argument count parameter is a smi
   2589     __ SmiUntag(reg);
   2590     __ Dlsa(sp, sp, reg, kPointerSizeLog2);
   2591   }
   2592 
   2593   __ Jump(ra);
   2594 }
   2595 
   2596 
   2597 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2598   Register context = ToRegister(instr->context());
   2599   Register result = ToRegister(instr->result());
   2600 
   2601   __ ld(result, ContextMemOperand(context, instr->slot_index()));
   2602   if (instr->hydrogen()->RequiresHoleCheck()) {
   2603     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2604 
   2605     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2606       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
   2607     } else {
   2608       Label is_not_hole;
   2609       __ Branch(&is_not_hole, ne, result, Operand(at));
   2610       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2611       __ bind(&is_not_hole);
   2612     }
   2613   }
   2614 }
   2615 
   2616 
   2617 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2618   Register context = ToRegister(instr->context());
   2619   Register value = ToRegister(instr->value());
   2620   Register scratch = scratch0();
   2621   MemOperand target = ContextMemOperand(context, instr->slot_index());
   2622 
   2623   Label skip_assignment;
   2624 
   2625   if (instr->hydrogen()->RequiresHoleCheck()) {
   2626     __ ld(scratch, target);
   2627     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2628 
   2629     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2630       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
   2631     } else {
   2632       __ Branch(&skip_assignment, ne, scratch, Operand(at));
   2633     }
   2634   }
   2635 
   2636   __ sd(value, target);
   2637   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2638     SmiCheck check_needed =
   2639         instr->hydrogen()->value()->type().IsHeapObject()
   2640             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2641     __ RecordWriteContextSlot(context,
   2642                               target.offset(),
   2643                               value,
   2644                               scratch0(),
   2645                               GetRAState(),
   2646                               kSaveFPRegs,
   2647                               EMIT_REMEMBERED_SET,
   2648                               check_needed);
   2649   }
   2650 
   2651   __ bind(&skip_assignment);
   2652 }
   2653 
   2654 
   2655 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2656   HObjectAccess access = instr->hydrogen()->access();
   2657   int offset = access.offset();
   2658   Register object = ToRegister(instr->object());
   2659   if (access.IsExternalMemory()) {
   2660     Register result = ToRegister(instr->result());
   2661     MemOperand operand = MemOperand(object, offset);
   2662     __ Load(result, operand, access.representation());
   2663     return;
   2664   }
   2665 
   2666   if (instr->hydrogen()->representation().IsDouble()) {
   2667     DoubleRegister result = ToDoubleRegister(instr->result());
   2668     __ ldc1(result, FieldMemOperand(object, offset));
   2669     return;
   2670   }
   2671 
   2672   Register result = ToRegister(instr->result());
   2673   if (!access.IsInobject()) {
   2674     __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   2675     object = result;
   2676   }
   2677 
   2678   Representation representation = access.representation();
   2679   if (representation.IsSmi() && SmiValuesAre32Bits() &&
   2680       instr->hydrogen()->representation().IsInteger32()) {
   2681     if (FLAG_debug_code) {
   2682       // Verify this is really an Smi.
   2683       Register scratch = scratch0();
   2684       __ Load(scratch, FieldMemOperand(object, offset), representation);
   2685       __ AssertSmi(scratch);
   2686     }
   2687 
   2688     // Read int value directly from upper half of the smi.
   2689     STATIC_ASSERT(kSmiTag == 0);
   2690     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
   2691     offset = SmiWordOffset(offset);
   2692     representation = Representation::Integer32();
   2693   }
   2694   __ Load(result, FieldMemOperand(object, offset), representation);
   2695 }
   2696 
   2697 
   2698 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2699   Register scratch = scratch0();
   2700   Register function = ToRegister(instr->function());
   2701   Register result = ToRegister(instr->result());
   2702 
   2703   // Get the prototype or initial map from the function.
   2704   __ ld(result,
   2705          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2706 
   2707   // Check that the function has a prototype or an initial map.
   2708   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2709   DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
   2710 
   2711   // If the function does not have an initial map, we're done.
   2712   Label done;
   2713   __ GetObjectType(result, scratch, scratch);
   2714   __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
   2715 
   2716   // Get the prototype from the initial map.
   2717   __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
   2718 
   2719   // All done.
   2720   __ bind(&done);
   2721 }
   2722 
   2723 
   2724 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   2725   Register result = ToRegister(instr->result());
   2726   __ LoadRoot(result, instr->index());
   2727 }
   2728 
   2729 
   2730 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2731   Register arguments = ToRegister(instr->arguments());
   2732   Register result = ToRegister(instr->result());
   2733   // There are two words between the frame pointer and the last argument.
   2734   // Subtracting from length accounts for one of them add one more.
   2735   if (instr->length()->IsConstantOperand()) {
   2736     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   2737     if (instr->index()->IsConstantOperand()) {
   2738       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2739       int index = (const_length - const_index) + 1;
   2740       __ ld(result, MemOperand(arguments, index * kPointerSize));
   2741     } else {
   2742       Register index = ToRegister(instr->index());
   2743       __ li(at, Operand(const_length + 1));
   2744       __ Dsubu(result, at, index);
   2745       __ Dlsa(at, arguments, result, kPointerSizeLog2);
   2746       __ ld(result, MemOperand(at));
   2747     }
   2748   } else if (instr->index()->IsConstantOperand()) {
   2749     Register length = ToRegister(instr->length());
   2750     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2751     int loc = const_index - 1;
   2752     if (loc != 0) {
   2753       __ Dsubu(result, length, Operand(loc));
   2754       __ Dlsa(at, arguments, result, kPointerSizeLog2);
   2755       __ ld(result, MemOperand(at));
   2756     } else {
   2757       __ Dlsa(at, arguments, length, kPointerSizeLog2);
   2758       __ ld(result, MemOperand(at));
   2759     }
   2760   } else {
   2761     Register length = ToRegister(instr->length());
   2762     Register index = ToRegister(instr->index());
   2763     __ Dsubu(result, length, index);
   2764     __ Daddu(result, result, 1);
   2765     __ Dlsa(at, arguments, result, kPointerSizeLog2);
   2766     __ ld(result, MemOperand(at));
   2767   }
   2768 }
   2769 
   2770 
   2771 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   2772   Register external_pointer = ToRegister(instr->elements());
   2773   Register key = no_reg;
   2774   ElementsKind elements_kind = instr->elements_kind();
   2775   bool key_is_constant = instr->key()->IsConstantOperand();
   2776   int constant_key = 0;
   2777   if (key_is_constant) {
   2778     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   2779     if (constant_key & 0xF0000000) {
   2780       Abort(kArrayIndexConstantValueTooBig);
   2781     }
   2782   } else {
   2783     key = ToRegister(instr->key());
   2784   }
   2785   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   2786   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   2787       ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
   2788       : element_size_shift;
   2789   int base_offset = instr->base_offset();
   2790 
   2791   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   2792     FPURegister result = ToDoubleRegister(instr->result());
   2793     if (key_is_constant) {
   2794       __ Daddu(scratch0(), external_pointer,
   2795           constant_key << element_size_shift);
   2796     } else {
   2797       if (shift_size < 0) {
   2798          if (shift_size == -32) {
   2799            __ dsra32(scratch0(), key, 0);
   2800          } else {
   2801            __ dsra(scratch0(), key, -shift_size);
   2802          }
   2803       } else {
   2804         __ dsll(scratch0(), key, shift_size);
   2805       }
   2806       __ Daddu(scratch0(), scratch0(), external_pointer);
   2807     }
   2808     if (elements_kind == FLOAT32_ELEMENTS) {
   2809       __ lwc1(result, MemOperand(scratch0(), base_offset));
   2810       __ cvt_d_s(result, result);
   2811     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   2812       __ ldc1(result, MemOperand(scratch0(), base_offset));
   2813     }
   2814   } else {
   2815     Register result = ToRegister(instr->result());
   2816     MemOperand mem_operand = PrepareKeyedOperand(
   2817         key, external_pointer, key_is_constant, constant_key,
   2818         element_size_shift, shift_size, base_offset);
   2819     switch (elements_kind) {
   2820       case INT8_ELEMENTS:
   2821         __ lb(result, mem_operand);
   2822         break;
   2823       case UINT8_ELEMENTS:
   2824       case UINT8_CLAMPED_ELEMENTS:
   2825         __ lbu(result, mem_operand);
   2826         break;
   2827       case INT16_ELEMENTS:
   2828         __ lh(result, mem_operand);
   2829         break;
   2830       case UINT16_ELEMENTS:
   2831         __ lhu(result, mem_operand);
   2832         break;
   2833       case INT32_ELEMENTS:
   2834         __ lw(result, mem_operand);
   2835         break;
   2836       case UINT32_ELEMENTS:
   2837         __ lw(result, mem_operand);
   2838         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   2839           DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
   2840                        result, Operand(0x80000000));
   2841         }
   2842         break;
   2843       case FLOAT32_ELEMENTS:
   2844       case FLOAT64_ELEMENTS:
   2845       case FAST_DOUBLE_ELEMENTS:
   2846       case FAST_ELEMENTS:
   2847       case FAST_SMI_ELEMENTS:
   2848       case FAST_HOLEY_DOUBLE_ELEMENTS:
   2849       case FAST_HOLEY_ELEMENTS:
   2850       case FAST_HOLEY_SMI_ELEMENTS:
   2851       case DICTIONARY_ELEMENTS:
   2852       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   2853       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   2854       case FAST_STRING_WRAPPER_ELEMENTS:
   2855       case SLOW_STRING_WRAPPER_ELEMENTS:
   2856       case NO_ELEMENTS:
   2857         UNREACHABLE();
   2858         break;
   2859     }
   2860   }
   2861 }
   2862 
   2863 
   2864 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   2865   Register elements = ToRegister(instr->elements());
   2866   bool key_is_constant = instr->key()->IsConstantOperand();
   2867   Register key = no_reg;
   2868   DoubleRegister result = ToDoubleRegister(instr->result());
   2869   Register scratch = scratch0();
   2870 
   2871   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   2872 
   2873   int base_offset = instr->base_offset();
   2874   if (key_is_constant) {
   2875     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   2876     if (constant_key & 0xF0000000) {
   2877       Abort(kArrayIndexConstantValueTooBig);
   2878     }
   2879     base_offset += constant_key * kDoubleSize;
   2880   }
   2881   __ Daddu(scratch, elements, Operand(base_offset));
   2882 
   2883   if (!key_is_constant) {
   2884     key = ToRegister(instr->key());
   2885     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   2886         ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
   2887         : element_size_shift;
   2888     if (shift_size > 0) {
   2889       __ dsll(at, key, shift_size);
   2890     } else if (shift_size == -32) {
   2891       __ dsra32(at, key, 0);
   2892     } else {
   2893       __ dsra(at, key, -shift_size);
   2894     }
   2895     __ Daddu(scratch, scratch, at);
   2896   }
   2897 
   2898   __ ldc1(result, MemOperand(scratch));
   2899 
   2900   if (instr->hydrogen()->RequiresHoleCheck()) {
   2901     __ FmoveHigh(scratch, result);
   2902     DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
   2903                  Operand(static_cast<int32_t>(kHoleNanUpper32)));
   2904   }
   2905 }
   2906 
   2907 
   2908 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   2909   HLoadKeyed* hinstr = instr->hydrogen();
   2910   Register elements = ToRegister(instr->elements());
   2911   Register result = ToRegister(instr->result());
   2912   Register scratch = scratch0();
   2913   Register store_base = scratch;
   2914   int offset = instr->base_offset();
   2915 
   2916   if (instr->key()->IsConstantOperand()) {
   2917     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   2918     offset += ToInteger32(const_operand) * kPointerSize;
   2919     store_base = elements;
   2920   } else {
   2921     Register key = ToRegister(instr->key());
   2922     // Even though the HLoadKeyed instruction forces the input
   2923     // representation for the key to be an integer, the input gets replaced
   2924     // during bound check elimination with the index argument to the bounds
   2925     // check, which can be tagged, so that case must be handled here, too.
   2926     if (instr->hydrogen()->key()->representation().IsSmi()) {
   2927     __ SmiScale(scratch, key, kPointerSizeLog2);
   2928     __ daddu(scratch, elements, scratch);
   2929     } else {
   2930       __ Dlsa(scratch, elements, key, kPointerSizeLog2);
   2931     }
   2932   }
   2933 
   2934   Representation representation = hinstr->representation();
   2935   if (representation.IsInteger32() && SmiValuesAre32Bits() &&
   2936       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
   2937     DCHECK(!hinstr->RequiresHoleCheck());
   2938     if (FLAG_debug_code) {
   2939       Register temp = scratch1();
   2940       __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
   2941       __ AssertSmi(temp);
   2942     }
   2943 
   2944     // Read int value directly from upper half of the smi.
   2945     STATIC_ASSERT(kSmiTag == 0);
   2946     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
   2947     offset = SmiWordOffset(offset);
   2948   }
   2949 
   2950   __ Load(result, MemOperand(store_base, offset), representation);
   2951 
   2952   // Check for the hole value.
   2953   if (hinstr->RequiresHoleCheck()) {
   2954     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   2955       __ SmiTst(result, scratch);
   2956       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
   2957                    Operand(zero_reg));
   2958     } else {
   2959       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   2960       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
   2961                    Operand(scratch));
   2962     }
   2963   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   2964     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
   2965     Label done;
   2966     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   2967     __ Branch(&done, ne, result, Operand(scratch));
   2968     if (info()->IsStub()) {
   2969       // A stub can safely convert the hole to undefined only if the array
   2970       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
   2971       // it needs to bail out.
   2972       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
   2973       // The comparison only needs LS bits of value, which is a smi.
   2974       __ ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
   2975       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
   2976                    Operand(Smi::FromInt(Isolate::kProtectorValid)));
   2977     }
   2978     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2979     __ bind(&done);
   2980   }
   2981 }
   2982 
   2983 
   2984 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   2985   if (instr->is_fixed_typed_array()) {
   2986     DoLoadKeyedExternalArray(instr);
   2987   } else if (instr->hydrogen()->representation().IsDouble()) {
   2988     DoLoadKeyedFixedDoubleArray(instr);
   2989   } else {
   2990     DoLoadKeyedFixedArray(instr);
   2991   }
   2992 }
   2993 
   2994 
   2995 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
   2996                                          Register base,
   2997                                          bool key_is_constant,
   2998                                          int constant_key,
   2999                                          int element_size,
   3000                                          int shift_size,
   3001                                          int base_offset) {
   3002   if (key_is_constant) {
   3003     return MemOperand(base, (constant_key << element_size) + base_offset);
   3004   }
   3005 
   3006   if (base_offset == 0) {
   3007     if (shift_size >= 0) {
   3008       __ dsll(scratch0(), key, shift_size);
   3009       __ Daddu(scratch0(), base, scratch0());
   3010       return MemOperand(scratch0());
   3011     } else {
   3012       if (shift_size == -32) {
   3013         __ dsra32(scratch0(), key, 0);
   3014       } else {
   3015         __ dsra(scratch0(), key, -shift_size);
   3016       }
   3017       __ Daddu(scratch0(), base, scratch0());
   3018       return MemOperand(scratch0());
   3019     }
   3020   }
   3021 
   3022   if (shift_size >= 0) {
   3023     __ dsll(scratch0(), key, shift_size);
   3024     __ Daddu(scratch0(), base, scratch0());
   3025     return MemOperand(scratch0(), base_offset);
   3026   } else {
   3027     if (shift_size == -32) {
   3028        __ dsra32(scratch0(), key, 0);
   3029     } else {
   3030       __ dsra(scratch0(), key, -shift_size);
   3031     }
   3032     __ Daddu(scratch0(), base, scratch0());
   3033     return MemOperand(scratch0(), base_offset);
   3034   }
   3035 }
   3036 
   3037 
   3038 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3039   Register scratch = scratch0();
   3040   Register temp = scratch1();
   3041   Register result = ToRegister(instr->result());
   3042 
   3043   if (instr->hydrogen()->from_inlined()) {
   3044     __ Dsubu(result, sp, 2 * kPointerSize);
   3045   } else if (instr->hydrogen()->arguments_adaptor()) {
   3046     // Check if the calling frame is an arguments adaptor frame.
   3047     Label done, adapted;
   3048     __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3049     __ ld(result,
   3050           MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
   3051     __ Xor(temp, result,
   3052            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   3053 
   3054     // Result is the frame pointer for the frame if not adapted and for the real
   3055     // frame below the adaptor frame if adapted.
   3056     __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
   3057     __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
   3058   } else {
   3059     __ mov(result, fp);
   3060   }
   3061 }
   3062 
   3063 
   3064 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3065   Register elem = ToRegister(instr->elements());
   3066   Register result = ToRegister(instr->result());
   3067 
   3068   Label done;
   3069 
   3070   // If no arguments adaptor frame the number of arguments is fixed.
   3071   __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
   3072   __ Branch(&done, eq, fp, Operand(elem));
   3073 
   3074   // Arguments adaptor frame present. Get argument length from there.
   3075   __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3076   __ ld(result,
   3077         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3078   __ SmiUntag(result);
   3079 
   3080   // Argument length is in result register.
   3081   __ bind(&done);
   3082 }
   3083 
   3084 
   3085 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3086   Register receiver = ToRegister(instr->receiver());
   3087   Register function = ToRegister(instr->function());
   3088   Register result = ToRegister(instr->result());
   3089   Register scratch = scratch0();
   3090 
   3091   // If the receiver is null or undefined, we have to pass the global
   3092   // object as a receiver to normal functions. Values have to be
   3093   // passed unchanged to builtins and strict-mode functions.
   3094   Label global_object, result_in_receiver;
   3095 
   3096   if (!instr->hydrogen()->known_function()) {
   3097     // Do not transform the receiver to object for strict mode functions.
   3098     __ ld(scratch,
   3099            FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3100 
   3101     // Do not transform the receiver to object for builtins.
   3102     int32_t strict_mode_function_mask =
   3103         1 <<  SharedFunctionInfo::kStrictModeBitWithinByte;
   3104     int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
   3105 
   3106     __ lbu(at,
   3107            FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
   3108     __ And(at, at, Operand(strict_mode_function_mask));
   3109     __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
   3110     __ lbu(at,
   3111            FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
   3112     __ And(at, at, Operand(native_mask));
   3113     __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
   3114   }
   3115 
   3116   // Normal function. Replace undefined or null with global receiver.
   3117   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
   3118   __ Branch(&global_object, eq, receiver, Operand(scratch));
   3119   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3120   __ Branch(&global_object, eq, receiver, Operand(scratch));
   3121 
   3122   // Deoptimize if the receiver is not a JS object.
   3123   __ SmiTst(receiver, scratch);
   3124   DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
   3125 
   3126   __ GetObjectType(receiver, scratch, scratch);
   3127   DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
   3128                Operand(FIRST_JS_RECEIVER_TYPE));
   3129   __ Branch(&result_in_receiver);
   3130 
   3131   __ bind(&global_object);
   3132   __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
   3133   __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
   3134   __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
   3135 
   3136   if (result.is(receiver)) {
   3137     __ bind(&result_in_receiver);
   3138   } else {
   3139     Label result_ok;
   3140     __ Branch(&result_ok);
   3141     __ bind(&result_in_receiver);
   3142     __ mov(result, receiver);
   3143     __ bind(&result_ok);
   3144   }
   3145 }
   3146 
   3147 
   3148 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3149   Register receiver = ToRegister(instr->receiver());
   3150   Register function = ToRegister(instr->function());
   3151   Register length = ToRegister(instr->length());
   3152   Register elements = ToRegister(instr->elements());
   3153   Register scratch = scratch0();
   3154   DCHECK(receiver.is(a0));  // Used for parameter count.
   3155   DCHECK(function.is(a1));  // Required by InvokeFunction.
   3156   DCHECK(ToRegister(instr->result()).is(v0));
   3157 
   3158   // Copy the arguments to this function possibly from the
   3159   // adaptor frame below it.
   3160   const uint32_t kArgumentsLimit = 1 * KB;
   3161   DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
   3162                Operand(kArgumentsLimit));
   3163 
   3164   // Push the receiver and use the register to keep the original
   3165   // number of arguments.
   3166   __ push(receiver);
   3167   __ Move(receiver, length);
   3168   // The arguments are at a one pointer size offset from elements.
   3169   __ Daddu(elements, elements, Operand(1 * kPointerSize));
   3170 
   3171   // Loop through the arguments pushing them onto the execution
   3172   // stack.
   3173   Label invoke, loop;
   3174   // length is a small non-negative integer, due to the test above.
   3175   __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
   3176   __ dsll(scratch, length, kPointerSizeLog2);
   3177   __ bind(&loop);
   3178   __ Daddu(scratch, elements, scratch);
   3179   __ ld(scratch, MemOperand(scratch));
   3180   __ push(scratch);
   3181   __ Dsubu(length, length, Operand(1));
   3182   __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
   3183   __ dsll(scratch, length, kPointerSizeLog2);
   3184 
   3185   __ bind(&invoke);
   3186 
   3187   InvokeFlag flag = CALL_FUNCTION;
   3188   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
   3189     DCHECK(!info()->saves_caller_doubles());
   3190     // TODO(ishell): drop current frame before pushing arguments to the stack.
   3191     flag = JUMP_FUNCTION;
   3192     ParameterCount actual(a0);
   3193     // It is safe to use t0, t1 and t2 as scratch registers here given that
   3194     // we are not going to return to caller function anyway.
   3195     PrepareForTailCall(actual, t0, t1, t2);
   3196   }
   3197 
   3198   DCHECK(instr->HasPointerMap());
   3199   LPointerMap* pointers = instr->pointer_map();
   3200   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   3201   // The number of arguments is stored in receiver which is a0, as expected
   3202   // by InvokeFunction.
   3203   ParameterCount actual(receiver);
   3204   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
   3205 }
   3206 
   3207 
   3208 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3209   LOperand* argument = instr->value();
   3210   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3211     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3212   } else {
   3213     Register argument_reg = EmitLoadRegister(argument, at);
   3214     __ push(argument_reg);
   3215   }
   3216 }
   3217 
   3218 
   3219 void LCodeGen::DoDrop(LDrop* instr) {
   3220   __ Drop(instr->count());
   3221 }
   3222 
   3223 
   3224 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3225   Register result = ToRegister(instr->result());
   3226   __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3227 }
   3228 
   3229 
   3230 void LCodeGen::DoContext(LContext* instr) {
   3231   // If there is a non-return use, the context must be moved to a register.
   3232   Register result = ToRegister(instr->result());
   3233   if (info()->IsOptimizing()) {
   3234     __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3235   } else {
   3236     // If there is no frame, the context must be in cp.
   3237     DCHECK(result.is(cp));
   3238   }
   3239 }
   3240 
   3241 
   3242 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3243   DCHECK(ToRegister(instr->context()).is(cp));
   3244   __ li(scratch0(), instr->hydrogen()->declarations());
   3245   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   3246   __ Push(scratch0(), scratch1());
   3247   __ li(scratch0(), instr->hydrogen()->feedback_vector());
   3248   __ Push(scratch0());
   3249   CallRuntime(Runtime::kDeclareGlobals, instr);
   3250 }
   3251 
   3252 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3253                                  int formal_parameter_count, int arity,
   3254                                  bool is_tail_call, LInstruction* instr) {
   3255   bool dont_adapt_arguments =
   3256       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3257   bool can_invoke_directly =
   3258       dont_adapt_arguments || formal_parameter_count == arity;
   3259 
   3260   Register function_reg = a1;
   3261   LPointerMap* pointers = instr->pointer_map();
   3262 
   3263   if (can_invoke_directly) {
   3264     // Change context.
   3265     __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
   3266 
   3267     // Always initialize new target and number of actual arguments.
   3268     __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
   3269     __ li(a0, Operand(arity));
   3270 
   3271     bool is_self_call = function.is_identical_to(info()->closure());
   3272 
   3273     // Invoke function.
   3274     if (is_self_call) {
   3275       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
   3276       if (is_tail_call) {
   3277         __ Jump(self, RelocInfo::CODE_TARGET);
   3278       } else {
   3279         __ Call(self, RelocInfo::CODE_TARGET);
   3280       }
   3281     } else {
   3282       __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
   3283       if (is_tail_call) {
   3284         __ Jump(at);
   3285       } else {
   3286         __ Call(at);
   3287       }
   3288     }
   3289 
   3290     if (!is_tail_call) {
   3291       // Set up deoptimization.
   3292       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3293     }
   3294   } else {
   3295     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3296     ParameterCount actual(arity);
   3297     ParameterCount expected(formal_parameter_count);
   3298     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   3299     __ InvokeFunction(function_reg, expected, actual, flag, generator);
   3300   }
   3301 }
   3302 
   3303 
   3304 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3305   DCHECK(instr->context() != NULL);
   3306   DCHECK(ToRegister(instr->context()).is(cp));
   3307   Register input = ToRegister(instr->value());
   3308   Register result = ToRegister(instr->result());
   3309   Register scratch = scratch0();
   3310 
   3311   // Deoptimize if not a heap number.
   3312   __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3313   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   3314   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
   3315                Operand(at));
   3316 
   3317   Label done;
   3318   Register exponent = scratch0();
   3319   scratch = no_reg;
   3320   __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3321   // Check the sign of the argument. If the argument is positive, just
   3322   // return it.
   3323   __ Move(result, input);
   3324   __ And(at, exponent, Operand(HeapNumber::kSignMask));
   3325   __ Branch(&done, eq, at, Operand(zero_reg));
   3326 
   3327   // Input is negative. Reverse its sign.
   3328   // Preserve the value of all registers.
   3329   {
   3330     PushSafepointRegistersScope scope(this);
   3331 
   3332     // Registers were saved at the safepoint, so we can use
   3333     // many scratch registers.
   3334     Register tmp1 = input.is(a1) ? a0 : a1;
   3335     Register tmp2 = input.is(a2) ? a0 : a2;
   3336     Register tmp3 = input.is(a3) ? a0 : a3;
   3337     Register tmp4 = input.is(a4) ? a0 : a4;
   3338 
   3339     // exponent: floating point exponent value.
   3340 
   3341     Label allocated, slow;
   3342     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3343     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3344     __ Branch(&allocated);
   3345 
   3346     // Slow case: Call the runtime system to do the number allocation.
   3347     __ bind(&slow);
   3348 
   3349     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3350                             instr->context());
   3351     // Set the pointer to the new heap number in tmp.
   3352     if (!tmp1.is(v0))
   3353       __ mov(tmp1, v0);
   3354     // Restore input_reg after call to runtime.
   3355     __ LoadFromSafepointRegisterSlot(input, input);
   3356     __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3357 
   3358     __ bind(&allocated);
   3359     // exponent: floating point exponent value.
   3360     // tmp1: allocated heap number.
   3361     __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
   3362     __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3363     __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3364     __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3365 
   3366     __ StoreToSafepointRegisterSlot(tmp1, result);
   3367   }
   3368 
   3369   __ bind(&done);
   3370 }
   3371 
   3372 
   3373 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3374   Register input = ToRegister(instr->value());
   3375   Register result = ToRegister(instr->result());
   3376   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   3377   Label done;
   3378   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
   3379   __ mov(result, input);
   3380   __ subu(result, zero_reg, input);
   3381   // Overflow if result is still negative, i.e. 0x80000000.
   3382   DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
   3383                Operand(zero_reg));
   3384   __ bind(&done);
   3385 }
   3386 
   3387 
   3388 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
   3389   Register input = ToRegister(instr->value());
   3390   Register result = ToRegister(instr->result());
   3391   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   3392   Label done;
   3393   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
   3394   __ mov(result, input);
   3395   __ dsubu(result, zero_reg, input);
   3396   // Overflow if result is still negative, i.e. 0x80000000 00000000.
   3397   DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
   3398                Operand(zero_reg));
   3399   __ bind(&done);
   3400 }
   3401 
   3402 
   3403 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3404   // Class for deferred case.
   3405   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
   3406    public:
   3407     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3408         : LDeferredCode(codegen), instr_(instr) { }
   3409     void Generate() override {
   3410       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3411     }
   3412     LInstruction* instr() override { return instr_; }
   3413 
   3414    private:
   3415     LMathAbs* instr_;
   3416   };
   3417 
   3418   Representation r = instr->hydrogen()->value()->representation();
   3419   if (r.IsDouble()) {
   3420     FPURegister input = ToDoubleRegister(instr->value());
   3421     FPURegister result = ToDoubleRegister(instr->result());
   3422     __ abs_d(result, input);
   3423   } else if (r.IsInteger32()) {
   3424     EmitIntegerMathAbs(instr);
   3425   } else if (r.IsSmi()) {
   3426     EmitSmiMathAbs(instr);
   3427   } else {
   3428     // Representation is tagged.
   3429     DeferredMathAbsTaggedHeapNumber* deferred =
   3430         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3431     Register input = ToRegister(instr->value());
   3432     // Smi check.
   3433     __ JumpIfNotSmi(input, deferred->entry());
   3434     // If smi, handle it directly.
   3435     EmitSmiMathAbs(instr);
   3436     __ bind(deferred->exit());
   3437   }
   3438 }
   3439 
   3440 
   3441 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3442   DoubleRegister input = ToDoubleRegister(instr->value());
   3443   Register result = ToRegister(instr->result());
   3444   Register scratch1 = scratch0();
   3445   Register except_flag = ToRegister(instr->temp());
   3446 
   3447   __ EmitFPUTruncate(kRoundToMinusInf,
   3448                      result,
   3449                      input,
   3450                      scratch1,
   3451                      double_scratch0(),
   3452                      except_flag);
   3453 
   3454   // Deopt if the operation did not succeed.
   3455   DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
   3456                Operand(zero_reg));
   3457 
   3458   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3459     // Test for -0.
   3460     Label done;
   3461     __ Branch(&done, ne, result, Operand(zero_reg));
   3462     __ mfhc1(scratch1, input);  // Get exponent/sign bits.
   3463     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   3464     DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
   3465                  Operand(zero_reg));
   3466     __ bind(&done);
   3467   }
   3468 }
   3469 
   3470 
   3471 void LCodeGen::DoMathRound(LMathRound* instr) {
   3472   DoubleRegister input = ToDoubleRegister(instr->value());
   3473   Register result = ToRegister(instr->result());
   3474   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3475   Register scratch = scratch0();
   3476   Label done, check_sign_on_zero;
   3477 
   3478   // Extract exponent bits.
   3479   __ mfhc1(result, input);
   3480   __ Ext(scratch,
   3481          result,
   3482          HeapNumber::kExponentShift,
   3483          HeapNumber::kExponentBits);
   3484 
   3485   // If the number is in ]-0.5, +0.5[, the result is +/- 0.
   3486   Label skip1;
   3487   __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
   3488   __ mov(result, zero_reg);
   3489   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3490     __ Branch(&check_sign_on_zero);
   3491   } else {
   3492     __ Branch(&done);
   3493   }
   3494   __ bind(&skip1);
   3495 
   3496   // The following conversion will not work with numbers
   3497   // outside of ]-2^32, 2^32[.
   3498   DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
   3499                Operand(HeapNumber::kExponentBias + 32));
   3500 
   3501   // Save the original sign for later comparison.
   3502   __ And(scratch, result, Operand(HeapNumber::kSignMask));
   3503 
   3504   __ Move(double_scratch0(), 0.5);
   3505   __ add_d(double_scratch0(), input, double_scratch0());
   3506 
   3507   // Check sign of the result: if the sign changed, the input
   3508   // value was in ]0.5, 0[ and the result should be -0.
   3509   __ mfhc1(result, double_scratch0());
   3510   // mfhc1 sign-extends, clear the upper bits.
   3511   __ dsll32(result, result, 0);
   3512   __ dsrl32(result, result, 0);
   3513   __ Xor(result, result, Operand(scratch));
   3514   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3515     // ARM uses 'mi' here, which is 'lt'
   3516     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
   3517                  Operand(zero_reg));
   3518   } else {
   3519     Label skip2;
   3520     // ARM uses 'mi' here, which is 'lt'
   3521     // Negating it results in 'ge'
   3522     __ Branch(&skip2, ge, result, Operand(zero_reg));
   3523     __ mov(result, zero_reg);
   3524     __ Branch(&done);
   3525     __ bind(&skip2);
   3526   }
   3527 
   3528   Register except_flag = scratch;
   3529   __ EmitFPUTruncate(kRoundToMinusInf,
   3530                      result,
   3531                      double_scratch0(),
   3532                      at,
   3533                      double_scratch1,
   3534                      except_flag);
   3535 
   3536   DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
   3537                Operand(zero_reg));
   3538 
   3539   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3540     // Test for -0.
   3541     __ Branch(&done, ne, result, Operand(zero_reg));
   3542     __ bind(&check_sign_on_zero);
   3543     __ mfhc1(scratch, input);  // Get exponent/sign bits.
   3544     __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
   3545     DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
   3546                  Operand(zero_reg));
   3547   }
   3548   __ bind(&done);
   3549 }
   3550 
   3551 
   3552 void LCodeGen::DoMathFround(LMathFround* instr) {
   3553   DoubleRegister input = ToDoubleRegister(instr->value());
   3554   DoubleRegister result = ToDoubleRegister(instr->result());
   3555   __ cvt_s_d(result, input);
   3556   __ cvt_d_s(result, result);
   3557 }
   3558 
   3559 
   3560 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3561   DoubleRegister input = ToDoubleRegister(instr->value());
   3562   DoubleRegister result = ToDoubleRegister(instr->result());
   3563   __ sqrt_d(result, input);
   3564 }
   3565 
   3566 
   3567 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3568   DoubleRegister input = ToDoubleRegister(instr->value());
   3569   DoubleRegister result = ToDoubleRegister(instr->result());
   3570   DoubleRegister temp = ToDoubleRegister(instr->temp());
   3571 
   3572   DCHECK(!input.is(result));
   3573 
   3574   // Note that according to ECMA-262 15.8.2.13:
   3575   // Math.pow(-Infinity, 0.5) == Infinity
   3576   // Math.sqrt(-Infinity) == NaN
   3577   Label done;
   3578   __ Move(temp, static_cast<double>(-V8_INFINITY));
   3579   // Set up Infinity.
   3580   __ Neg_d(result, temp);
   3581   // result is overwritten if the branch is not taken.
   3582   __ BranchF(&done, NULL, eq, temp, input);
   3583 
   3584   // Add +0 to convert -0 to +0.
   3585   __ add_d(result, input, kDoubleRegZero);
   3586   __ sqrt_d(result, result);
   3587   __ bind(&done);
   3588 }
   3589 
   3590 
   3591 void LCodeGen::DoPower(LPower* instr) {
   3592   Representation exponent_type = instr->hydrogen()->right()->representation();
   3593   // Having marked this as a call, we can use any registers.
   3594   // Just make sure that the input/output registers are the expected ones.
   3595   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3596   DCHECK(!instr->right()->IsDoubleRegister() ||
   3597          ToDoubleRegister(instr->right()).is(f4));
   3598   DCHECK(!instr->right()->IsRegister() ||
   3599          ToRegister(instr->right()).is(tagged_exponent));
   3600   DCHECK(ToDoubleRegister(instr->left()).is(f2));
   3601   DCHECK(ToDoubleRegister(instr->result()).is(f0));
   3602 
   3603   if (exponent_type.IsSmi()) {
   3604     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3605     __ CallStub(&stub);
   3606   } else if (exponent_type.IsTagged()) {
   3607     Label no_deopt;
   3608     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3609     DCHECK(!a7.is(tagged_exponent));
   3610     __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
   3611     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   3612     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at));
   3613     __ bind(&no_deopt);
   3614     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3615     __ CallStub(&stub);
   3616   } else if (exponent_type.IsInteger32()) {
   3617     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3618     __ CallStub(&stub);
   3619   } else {
   3620     DCHECK(exponent_type.IsDouble());
   3621     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3622     __ CallStub(&stub);
   3623   }
   3624 }
   3625 
   3626 void LCodeGen::DoMathCos(LMathCos* instr) {
   3627   __ PrepareCallCFunction(0, 1, scratch0());
   3628   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3629   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
   3630   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3631 }
   3632 
   3633 void LCodeGen::DoMathSin(LMathSin* instr) {
   3634   __ PrepareCallCFunction(0, 1, scratch0());
   3635   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3636   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
   3637   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3638 }
   3639 
   3640 void LCodeGen::DoMathExp(LMathExp* instr) {
   3641   __ PrepareCallCFunction(0, 1, scratch0());
   3642   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3643   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
   3644   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3645 }
   3646 
   3647 
   3648 void LCodeGen::DoMathLog(LMathLog* instr) {
   3649   __ PrepareCallCFunction(0, 1, scratch0());
   3650   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3651   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   3652   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3653 }
   3654 
   3655 
   3656 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3657   Register input = ToRegister(instr->value());
   3658   Register result = ToRegister(instr->result());
   3659   __ Clz(result, input);
   3660 }
   3661 
   3662 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
   3663                                   Register scratch1, Register scratch2,
   3664                                   Register scratch3) {
   3665 #if DEBUG
   3666   if (actual.is_reg()) {
   3667     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
   3668   } else {
   3669     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
   3670   }
   3671 #endif
   3672   if (FLAG_code_comments) {
   3673     if (actual.is_reg()) {
   3674       Comment(";;; PrepareForTailCall, actual: %s {",
   3675               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
   3676                   actual.reg().code()));
   3677     } else {
   3678       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
   3679     }
   3680   }
   3681 
   3682   // Check if next frame is an arguments adaptor frame.
   3683   Register caller_args_count_reg = scratch1;
   3684   Label no_arguments_adaptor, formal_parameter_count_loaded;
   3685   __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3686   __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
   3687   __ Branch(&no_arguments_adaptor, ne, scratch3,
   3688             Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   3689 
   3690   // Drop current frame and load arguments count from arguments adaptor frame.
   3691   __ mov(fp, scratch2);
   3692   __ ld(caller_args_count_reg,
   3693         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3694   __ SmiUntag(caller_args_count_reg);
   3695   __ Branch(&formal_parameter_count_loaded);
   3696 
   3697   __ bind(&no_arguments_adaptor);
   3698   // Load caller's formal parameter count
   3699   __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
   3700 
   3701   __ bind(&formal_parameter_count_loaded);
   3702   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
   3703 
   3704   Comment(";;; }");
   3705 }
   3706 
   3707 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3708   HInvokeFunction* hinstr = instr->hydrogen();
   3709   DCHECK(ToRegister(instr->context()).is(cp));
   3710   DCHECK(ToRegister(instr->function()).is(a1));
   3711   DCHECK(instr->HasPointerMap());
   3712 
   3713   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
   3714 
   3715   if (is_tail_call) {
   3716     DCHECK(!info()->saves_caller_doubles());
   3717     ParameterCount actual(instr->arity());
   3718     // It is safe to use t0, t1 and t2 as scratch registers here given that
   3719     // we are not going to return to caller function anyway.
   3720     PrepareForTailCall(actual, t0, t1, t2);
   3721   }
   3722 
   3723   Handle<JSFunction> known_function = hinstr->known_function();
   3724   if (known_function.is_null()) {
   3725     LPointerMap* pointers = instr->pointer_map();
   3726     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3727     ParameterCount actual(instr->arity());
   3728     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   3729     __ InvokeFunction(a1, no_reg, actual, flag, generator);
   3730   } else {
   3731     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
   3732                       instr->arity(), is_tail_call, instr);
   3733   }
   3734 }
   3735 
   3736 
   3737 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3738   DCHECK(ToRegister(instr->result()).is(v0));
   3739 
   3740   if (instr->hydrogen()->IsTailCall()) {
   3741     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
   3742 
   3743     if (instr->target()->IsConstantOperand()) {
   3744       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3745       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3746       __ Jump(code, RelocInfo::CODE_TARGET);
   3747     } else {
   3748       DCHECK(instr->target()->IsRegister());
   3749       Register target = ToRegister(instr->target());
   3750       __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3751       __ Jump(target);
   3752     }
   3753   } else {
   3754     LPointerMap* pointers = instr->pointer_map();
   3755     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3756 
   3757     if (instr->target()->IsConstantOperand()) {
   3758       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3759       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3760       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   3761       __ Call(code, RelocInfo::CODE_TARGET);
   3762     } else {
   3763       DCHECK(instr->target()->IsRegister());
   3764       Register target = ToRegister(instr->target());
   3765       generator.BeforeCall(__ CallSize(target));
   3766       __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3767       __ Call(target);
   3768     }
   3769     generator.AfterCall();
   3770   }
   3771 }
   3772 
   3773 
   3774 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3775   DCHECK(ToRegister(instr->context()).is(cp));
   3776   DCHECK(ToRegister(instr->constructor()).is(a1));
   3777   DCHECK(ToRegister(instr->result()).is(v0));
   3778 
   3779   __ li(a0, Operand(instr->arity()));
   3780   __ li(a2, instr->hydrogen()->site());
   3781 
   3782   ElementsKind kind = instr->hydrogen()->elements_kind();
   3783   AllocationSiteOverrideMode override_mode =
   3784       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3785           ? DISABLE_ALLOCATION_SITES
   3786           : DONT_OVERRIDE;
   3787 
   3788   if (instr->arity() == 0) {
   3789     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3790     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3791   } else if (instr->arity() == 1) {
   3792     Label done;
   3793     if (IsFastPackedElementsKind(kind)) {
   3794       Label packed_case;
   3795       // We might need a change here,
   3796       // look at the first argument.
   3797       __ ld(a5, MemOperand(sp, 0));
   3798       __ Branch(&packed_case, eq, a5, Operand(zero_reg));
   3799 
   3800       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   3801       ArraySingleArgumentConstructorStub stub(isolate(),
   3802                                               holey_kind,
   3803                                               override_mode);
   3804       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3805       __ jmp(&done);
   3806       __ bind(&packed_case);
   3807     }
   3808 
   3809     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   3810     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3811     __ bind(&done);
   3812   } else {
   3813     ArrayNArgumentsConstructorStub stub(isolate());
   3814     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3815   }
   3816 }
   3817 
   3818 
   3819 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   3820   CallRuntime(instr->function(), instr->arity(), instr);
   3821 }
   3822 
   3823 
   3824 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   3825   Register function = ToRegister(instr->function());
   3826   Register code_object = ToRegister(instr->code_object());
   3827   __ Daddu(code_object, code_object,
   3828           Operand(Code::kHeaderSize - kHeapObjectTag));
   3829   __ sd(code_object,
   3830         FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   3831 }
   3832 
   3833 
   3834 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   3835   Register result = ToRegister(instr->result());
   3836   Register base = ToRegister(instr->base_object());
   3837   if (instr->offset()->IsConstantOperand()) {
   3838     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   3839     __ Daddu(result, base, Operand(ToInteger32(offset)));
   3840   } else {
   3841     Register offset = ToRegister(instr->offset());
   3842     __ Daddu(result, base, offset);
   3843   }
   3844 }
   3845 
   3846 
   3847 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   3848   Representation representation = instr->representation();
   3849 
   3850   Register object = ToRegister(instr->object());
   3851   Register scratch2 = scratch1();
   3852   Register scratch1 = scratch0();
   3853 
   3854   HObjectAccess access = instr->hydrogen()->access();
   3855   int offset = access.offset();
   3856   if (access.IsExternalMemory()) {
   3857     Register value = ToRegister(instr->value());
   3858     MemOperand operand = MemOperand(object, offset);
   3859     __ Store(value, operand, representation);
   3860     return;
   3861   }
   3862 
   3863   __ AssertNotSmi(object);
   3864 
   3865   DCHECK(!representation.IsSmi() ||
   3866          !instr->value()->IsConstantOperand() ||
   3867          IsSmi(LConstantOperand::cast(instr->value())));
   3868   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
   3869     DCHECK(access.IsInobject());
   3870     DCHECK(!instr->hydrogen()->has_transition());
   3871     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   3872     DoubleRegister value = ToDoubleRegister(instr->value());
   3873     __ sdc1(value, FieldMemOperand(object, offset));
   3874     return;
   3875   }
   3876 
   3877   if (instr->hydrogen()->has_transition()) {
   3878     Handle<Map> transition = instr->hydrogen()->transition_map();
   3879     AddDeprecationDependency(transition);
   3880     __ li(scratch1, Operand(transition));
   3881     __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
   3882     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   3883       Register temp = ToRegister(instr->temp());
   3884       // Update the write barrier for the map field.
   3885       __ RecordWriteForMap(object,
   3886                            scratch1,
   3887                            temp,
   3888                            GetRAState(),
   3889                            kSaveFPRegs);
   3890     }
   3891   }
   3892 
   3893   // Do the store.
   3894   Register destination = object;
   3895   if (!access.IsInobject()) {
   3896        destination = scratch1;
   3897     __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
   3898   }
   3899 
   3900   if (representation.IsSmi() && SmiValuesAre32Bits() &&
   3901       instr->hydrogen()->value()->representation().IsInteger32()) {
   3902     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   3903     if (FLAG_debug_code) {
   3904       __ Load(scratch2, FieldMemOperand(destination, offset), representation);
   3905       __ AssertSmi(scratch2);
   3906     }
   3907     // Store int value directly to upper half of the smi.
   3908     offset = SmiWordOffset(offset);
   3909     representation = Representation::Integer32();
   3910   }
   3911   MemOperand operand = FieldMemOperand(destination, offset);
   3912 
   3913   if (FLAG_unbox_double_fields && representation.IsDouble()) {
   3914     DCHECK(access.IsInobject());
   3915     DoubleRegister value = ToDoubleRegister(instr->value());
   3916     __ sdc1(value, operand);
   3917   } else {
   3918     DCHECK(instr->value()->IsRegister());
   3919     Register value = ToRegister(instr->value());
   3920     __ Store(value, operand, representation);
   3921   }
   3922 
   3923   if (instr->hydrogen()->NeedsWriteBarrier()) {
   3924     // Update the write barrier for the object for in-object properties.
   3925     Register value = ToRegister(instr->value());
   3926     __ RecordWriteField(destination,
   3927                         offset,
   3928                         value,
   3929                         scratch2,
   3930                         GetRAState(),
   3931                         kSaveFPRegs,
   3932                         EMIT_REMEMBERED_SET,
   3933                         instr->hydrogen()->SmiCheckForWriteBarrier(),
   3934                         instr->hydrogen()->PointersToHereCheckForValue());
   3935   }
   3936 }
   3937 
   3938 
   3939 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   3940   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
   3941   Operand operand((int64_t)0);
   3942   Register reg;
   3943   if (instr->index()->IsConstantOperand()) {
   3944     operand = ToOperand(instr->index());
   3945     reg = ToRegister(instr->length());
   3946     cc = CommuteCondition(cc);
   3947   } else {
   3948     reg = ToRegister(instr->index());
   3949     operand = ToOperand(instr->length());
   3950   }
   3951   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   3952     Label done;
   3953     __ Branch(&done, NegateCondition(cc), reg, operand);
   3954     __ stop("eliminated bounds check failed");
   3955     __ bind(&done);
   3956   } else {
   3957     DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
   3958   }
   3959 }
   3960 
   3961 
   3962 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   3963   Register external_pointer = ToRegister(instr->elements());
   3964   Register key = no_reg;
   3965   ElementsKind elements_kind = instr->elements_kind();
   3966   bool key_is_constant = instr->key()->IsConstantOperand();
   3967   int constant_key = 0;
   3968   if (key_is_constant) {
   3969     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3970     if (constant_key & 0xF0000000) {
   3971       Abort(kArrayIndexConstantValueTooBig);
   3972     }
   3973   } else {
   3974     key = ToRegister(instr->key());
   3975   }
   3976   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3977   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3978       ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
   3979       : element_size_shift;
   3980   int base_offset = instr->base_offset();
   3981 
   3982   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   3983     Register address = scratch0();
   3984     FPURegister value(ToDoubleRegister(instr->value()));
   3985     if (key_is_constant) {
   3986       if (constant_key != 0) {
   3987         __ Daddu(address, external_pointer,
   3988                 Operand(constant_key << element_size_shift));
   3989       } else {
   3990         address = external_pointer;
   3991       }
   3992     } else {
   3993       if (shift_size < 0) {
   3994         if (shift_size == -32) {
   3995           __ dsra32(address, key, 0);
   3996         } else {
   3997           __ dsra(address, key, -shift_size);
   3998         }
   3999       } else {
   4000         __ dsll(address, key, shift_size);
   4001       }
   4002       __ Daddu(address, external_pointer, address);
   4003     }
   4004 
   4005     if (elements_kind == FLOAT32_ELEMENTS) {
   4006       __ cvt_s_d(double_scratch0(), value);
   4007       __ swc1(double_scratch0(), MemOperand(address, base_offset));
   4008     } else {  // Storing doubles, not floats.
   4009       __ sdc1(value, MemOperand(address, base_offset));
   4010     }
   4011   } else {
   4012     Register value(ToRegister(instr->value()));
   4013     MemOperand mem_operand = PrepareKeyedOperand(
   4014         key, external_pointer, key_is_constant, constant_key,
   4015         element_size_shift, shift_size,
   4016         base_offset);
   4017     switch (elements_kind) {
   4018       case UINT8_ELEMENTS:
   4019       case UINT8_CLAMPED_ELEMENTS:
   4020       case INT8_ELEMENTS:
   4021         __ sb(value, mem_operand);
   4022         break;
   4023       case INT16_ELEMENTS:
   4024       case UINT16_ELEMENTS:
   4025         __ sh(value, mem_operand);
   4026         break;
   4027       case INT32_ELEMENTS:
   4028       case UINT32_ELEMENTS:
   4029         __ sw(value, mem_operand);
   4030         break;
   4031       case FLOAT32_ELEMENTS:
   4032       case FLOAT64_ELEMENTS:
   4033       case FAST_DOUBLE_ELEMENTS:
   4034       case FAST_ELEMENTS:
   4035       case FAST_SMI_ELEMENTS:
   4036       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4037       case FAST_HOLEY_ELEMENTS:
   4038       case FAST_HOLEY_SMI_ELEMENTS:
   4039       case DICTIONARY_ELEMENTS:
   4040       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   4041       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   4042       case FAST_STRING_WRAPPER_ELEMENTS:
   4043       case SLOW_STRING_WRAPPER_ELEMENTS:
   4044       case NO_ELEMENTS:
   4045         UNREACHABLE();
   4046         break;
   4047     }
   4048   }
   4049 }
   4050 
   4051 
   4052 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4053   DoubleRegister value = ToDoubleRegister(instr->value());
   4054   Register elements = ToRegister(instr->elements());
   4055   Register scratch = scratch0();
   4056   DoubleRegister double_scratch = double_scratch0();
   4057   bool key_is_constant = instr->key()->IsConstantOperand();
   4058   int base_offset = instr->base_offset();
   4059   Label not_nan, done;
   4060 
   4061   // Calculate the effective address of the slot in the array to store the
   4062   // double value.
   4063   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4064   if (key_is_constant) {
   4065     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4066     if (constant_key & 0xF0000000) {
   4067       Abort(kArrayIndexConstantValueTooBig);
   4068     }
   4069     __ Daddu(scratch, elements,
   4070              Operand((constant_key << element_size_shift) + base_offset));
   4071   } else {
   4072     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4073         ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
   4074         : element_size_shift;
   4075     __ Daddu(scratch, elements, Operand(base_offset));
   4076     DCHECK((shift_size == 3) || (shift_size == -29));
   4077     if (shift_size == 3) {
   4078       __ dsll(at, ToRegister(instr->key()), 3);
   4079     } else if (shift_size == -29) {
   4080       __ dsra(at, ToRegister(instr->key()), 29);
   4081     }
   4082     __ Daddu(scratch, scratch, at);
   4083   }
   4084 
   4085   if (instr->NeedsCanonicalization()) {
   4086     __ FPUCanonicalizeNaN(double_scratch, value);
   4087     __ sdc1(double_scratch, MemOperand(scratch, 0));
   4088   } else {
   4089     __ sdc1(value, MemOperand(scratch, 0));
   4090   }
   4091 }
   4092 
   4093 
   4094 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4095   Register value = ToRegister(instr->value());
   4096   Register elements = ToRegister(instr->elements());
   4097   Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
   4098       : no_reg;
   4099   Register scratch = scratch0();
   4100   Register store_base = scratch;
   4101   int offset = instr->base_offset();
   4102 
   4103   // Do the store.
   4104   if (instr->key()->IsConstantOperand()) {
   4105     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   4106     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4107     offset += ToInteger32(const_operand) * kPointerSize;
   4108     store_base = elements;
   4109   } else {
   4110     // Even though the HLoadKeyed instruction forces the input
   4111     // representation for the key to be an integer, the input gets replaced
   4112     // during bound check elimination with the index argument to the bounds
   4113     // check, which can be tagged, so that case must be handled here, too.
   4114     if (instr->hydrogen()->key()->representation().IsSmi()) {
   4115       __ SmiScale(scratch, key, kPointerSizeLog2);
   4116       __ daddu(store_base, elements, scratch);
   4117     } else {
   4118       __ Dlsa(store_base, elements, key, kPointerSizeLog2);
   4119     }
   4120   }
   4121 
   4122   Representation representation = instr->hydrogen()->value()->representation();
   4123   if (representation.IsInteger32() && SmiValuesAre32Bits()) {
   4124     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   4125     DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
   4126     if (FLAG_debug_code) {
   4127       Register temp = scratch1();
   4128       __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
   4129       __ AssertSmi(temp);
   4130     }
   4131 
   4132     // Store int value directly to upper half of the smi.
   4133     STATIC_ASSERT(kSmiTag == 0);
   4134     STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
   4135     offset = SmiWordOffset(offset);
   4136     representation = Representation::Integer32();
   4137   }
   4138 
   4139   __ Store(value, MemOperand(store_base, offset), representation);
   4140 
   4141   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4142     SmiCheck check_needed =
   4143         instr->hydrogen()->value()->type().IsHeapObject()
   4144             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4145     // Compute address of modified element and store it into key register.
   4146     __ Daddu(key, store_base, Operand(offset));
   4147     __ RecordWrite(elements,
   4148                    key,
   4149                    value,
   4150                    GetRAState(),
   4151                    kSaveFPRegs,
   4152                    EMIT_REMEMBERED_SET,
   4153                    check_needed,
   4154                    instr->hydrogen()->PointersToHereCheckForValue());
   4155   }
   4156 }
   4157 
   4158 
   4159 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4160   // By cases: external, fast double
   4161   if (instr->is_fixed_typed_array()) {
   4162     DoStoreKeyedExternalArray(instr);
   4163   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4164     DoStoreKeyedFixedDoubleArray(instr);
   4165   } else {
   4166     DoStoreKeyedFixedArray(instr);
   4167   }
   4168 }
   4169 
   4170 
   4171 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   4172   class DeferredMaybeGrowElements final : public LDeferredCode {
   4173    public:
   4174     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   4175         : LDeferredCode(codegen), instr_(instr) {}
   4176     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   4177     LInstruction* instr() override { return instr_; }
   4178 
   4179    private:
   4180     LMaybeGrowElements* instr_;
   4181   };
   4182 
   4183   Register result = v0;
   4184   DeferredMaybeGrowElements* deferred =
   4185       new (zone()) DeferredMaybeGrowElements(this, instr);
   4186   LOperand* key = instr->key();
   4187   LOperand* current_capacity = instr->current_capacity();
   4188 
   4189   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   4190   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   4191   DCHECK(key->IsConstantOperand() || key->IsRegister());
   4192   DCHECK(current_capacity->IsConstantOperand() ||
   4193          current_capacity->IsRegister());
   4194 
   4195   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   4196     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4197     int32_t constant_capacity =
   4198         ToInteger32(LConstantOperand::cast(current_capacity));
   4199     if (constant_key >= constant_capacity) {
   4200       // Deferred case.
   4201       __ jmp(deferred->entry());
   4202     }
   4203   } else if (key->IsConstantOperand()) {
   4204     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4205     __ Branch(deferred->entry(), le, ToRegister(current_capacity),
   4206               Operand(constant_key));
   4207   } else if (current_capacity->IsConstantOperand()) {
   4208     int32_t constant_capacity =
   4209         ToInteger32(LConstantOperand::cast(current_capacity));
   4210     __ Branch(deferred->entry(), ge, ToRegister(key),
   4211               Operand(constant_capacity));
   4212   } else {
   4213     __ Branch(deferred->entry(), ge, ToRegister(key),
   4214               Operand(ToRegister(current_capacity)));
   4215   }
   4216 
   4217   if (instr->elements()->IsRegister()) {
   4218     __ mov(result, ToRegister(instr->elements()));
   4219   } else {
   4220     __ ld(result, ToMemOperand(instr->elements()));
   4221   }
   4222 
   4223   __ bind(deferred->exit());
   4224 }
   4225 
   4226 
   4227 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   4228   // TODO(3095996): Get rid of this. For now, we need to make the
   4229   // result register contain a valid pointer because it is already
   4230   // contained in the register pointer map.
   4231   Register result = v0;
   4232   __ mov(result, zero_reg);
   4233 
   4234   // We have to call a stub.
   4235   {
   4236     PushSafepointRegistersScope scope(this);
   4237     if (instr->object()->IsRegister()) {
   4238       __ mov(result, ToRegister(instr->object()));
   4239     } else {
   4240       __ ld(result, ToMemOperand(instr->object()));
   4241     }
   4242 
   4243     LOperand* key = instr->key();
   4244     if (key->IsConstantOperand()) {
   4245       __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
   4246     } else {
   4247       __ mov(a3, ToRegister(key));
   4248       __ SmiTag(a3);
   4249     }
   4250 
   4251     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
   4252     __ mov(a0, result);
   4253     __ CallStub(&stub);
   4254     RecordSafepointWithLazyDeopt(
   4255         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4256     __ StoreToSafepointRegisterSlot(result, result);
   4257   }
   4258 
   4259   // Deopt on smi, which means the elements array changed to dictionary mode.
   4260   __ SmiTst(result, at);
   4261   DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
   4262 }
   4263 
   4264 
   4265 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4266   Register object_reg = ToRegister(instr->object());
   4267   Register scratch = scratch0();
   4268 
   4269   Handle<Map> from_map = instr->original_map();
   4270   Handle<Map> to_map = instr->transitioned_map();
   4271   ElementsKind from_kind = instr->from_kind();
   4272   ElementsKind to_kind = instr->to_kind();
   4273 
   4274   Label not_applicable;
   4275   __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4276   __ Branch(&not_applicable, ne, scratch, Operand(from_map));
   4277 
   4278   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4279     Register new_map_reg = ToRegister(instr->new_map_temp());
   4280     __ li(new_map_reg, Operand(to_map));
   4281     __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4282     // Write barrier.
   4283     __ RecordWriteForMap(object_reg,
   4284                          new_map_reg,
   4285                          scratch,
   4286                          GetRAState(),
   4287                          kDontSaveFPRegs);
   4288   } else {
   4289     DCHECK(object_reg.is(a0));
   4290     DCHECK(ToRegister(instr->context()).is(cp));
   4291     PushSafepointRegistersScope scope(this);
   4292     __ li(a1, Operand(to_map));
   4293     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
   4294     __ CallStub(&stub);
   4295     RecordSafepointWithRegisters(
   4296         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   4297   }
   4298   __ bind(&not_applicable);
   4299 }
   4300 
   4301 
   4302 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4303   Register object = ToRegister(instr->object());
   4304   Register temp = ToRegister(instr->temp());
   4305   Label no_memento_found;
   4306   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
   4307   DeoptimizeIf(al, instr, DeoptimizeReason::kMementoFound);
   4308   __ bind(&no_memento_found);
   4309 }
   4310 
   4311 
   4312 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4313   DCHECK(ToRegister(instr->context()).is(cp));
   4314   DCHECK(ToRegister(instr->left()).is(a1));
   4315   DCHECK(ToRegister(instr->right()).is(a0));
   4316   StringAddStub stub(isolate(),
   4317                      instr->hydrogen()->flags(),
   4318                      instr->hydrogen()->pretenure_flag());
   4319   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4320 }
   4321 
   4322 
   4323 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4324   class DeferredStringCharCodeAt final : public LDeferredCode {
   4325    public:
   4326     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4327         : LDeferredCode(codegen), instr_(instr) { }
   4328     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4329     LInstruction* instr() override { return instr_; }
   4330 
   4331    private:
   4332     LStringCharCodeAt* instr_;
   4333   };
   4334 
   4335   DeferredStringCharCodeAt* deferred =
   4336       new(zone()) DeferredStringCharCodeAt(this, instr);
   4337   StringCharLoadGenerator::Generate(masm(),
   4338                                     ToRegister(instr->string()),
   4339                                     ToRegister(instr->index()),
   4340                                     ToRegister(instr->result()),
   4341                                     deferred->entry());
   4342   __ bind(deferred->exit());
   4343 }
   4344 
   4345 
   4346 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4347   Register string = ToRegister(instr->string());
   4348   Register result = ToRegister(instr->result());
   4349   Register scratch = scratch0();
   4350 
   4351   // TODO(3095996): Get rid of this. For now, we need to make the
   4352   // result register contain a valid pointer because it is already
   4353   // contained in the register pointer map.
   4354   __ mov(result, zero_reg);
   4355 
   4356   PushSafepointRegistersScope scope(this);
   4357   __ push(string);
   4358   // Push the index as a smi. This is safe because of the checks in
   4359   // DoStringCharCodeAt above.
   4360   if (instr->index()->IsConstantOperand()) {
   4361     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4362     __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
   4363     __ push(scratch);
   4364   } else {
   4365     Register index = ToRegister(instr->index());
   4366     __ SmiTag(index);
   4367     __ push(index);
   4368   }
   4369   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
   4370                           instr->context());
   4371   __ AssertSmi(v0);
   4372   __ SmiUntag(v0);
   4373   __ StoreToSafepointRegisterSlot(v0, result);
   4374 }
   4375 
   4376 
   4377 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4378   class DeferredStringCharFromCode final : public LDeferredCode {
   4379    public:
   4380     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4381         : LDeferredCode(codegen), instr_(instr) { }
   4382     void Generate() override {
   4383       codegen()->DoDeferredStringCharFromCode(instr_);
   4384     }
   4385     LInstruction* instr() override { return instr_; }
   4386 
   4387    private:
   4388     LStringCharFromCode* instr_;
   4389   };
   4390 
   4391   DeferredStringCharFromCode* deferred =
   4392       new(zone()) DeferredStringCharFromCode(this, instr);
   4393 
   4394   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   4395   Register char_code = ToRegister(instr->char_code());
   4396   Register result = ToRegister(instr->result());
   4397   Register scratch = scratch0();
   4398   DCHECK(!char_code.is(result));
   4399 
   4400   __ Branch(deferred->entry(), hi,
   4401             char_code, Operand(String::kMaxOneByteCharCode));
   4402   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4403   __ Dlsa(result, result, char_code, kPointerSizeLog2);
   4404   __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4405   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   4406   __ Branch(deferred->entry(), eq, result, Operand(scratch));
   4407   __ bind(deferred->exit());
   4408 }
   4409 
   4410 
   4411 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4412   Register char_code = ToRegister(instr->char_code());
   4413   Register result = ToRegister(instr->result());
   4414 
   4415   // TODO(3095996): Get rid of this. For now, we need to make the
   4416   // result register contain a valid pointer because it is already
   4417   // contained in the register pointer map.
   4418   __ mov(result, zero_reg);
   4419 
   4420   PushSafepointRegistersScope scope(this);
   4421   __ SmiTag(char_code);
   4422   __ push(char_code);
   4423   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   4424                           instr->context());
   4425   __ StoreToSafepointRegisterSlot(v0, result);
   4426 }
   4427 
   4428 
   4429 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4430   LOperand* input = instr->value();
   4431   DCHECK(input->IsRegister() || input->IsStackSlot());
   4432   LOperand* output = instr->result();
   4433   DCHECK(output->IsDoubleRegister());
   4434   FPURegister single_scratch = double_scratch0().low();
   4435   if (input->IsStackSlot()) {
   4436     Register scratch = scratch0();
   4437     __ ld(scratch, ToMemOperand(input));
   4438     __ mtc1(scratch, single_scratch);
   4439   } else {
   4440     __ mtc1(ToRegister(input), single_scratch);
   4441   }
   4442   __ cvt_d_w(ToDoubleRegister(output), single_scratch);
   4443 }
   4444 
   4445 
   4446 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4447   LOperand* input = instr->value();
   4448   LOperand* output = instr->result();
   4449 
   4450   FPURegister dbl_scratch = double_scratch0();
   4451   __ mtc1(ToRegister(input), dbl_scratch);
   4452   __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
   4453 }
   4454 
   4455 
   4456 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4457   class DeferredNumberTagU final : public LDeferredCode {
   4458    public:
   4459     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4460         : LDeferredCode(codegen), instr_(instr) { }
   4461     void Generate() override {
   4462       codegen()->DoDeferredNumberTagIU(instr_,
   4463                                        instr_->value(),
   4464                                        instr_->temp1(),
   4465                                        instr_->temp2(),
   4466                                        UNSIGNED_INT32);
   4467     }
   4468     LInstruction* instr() override { return instr_; }
   4469 
   4470    private:
   4471     LNumberTagU* instr_;
   4472   };
   4473 
   4474   Register input = ToRegister(instr->value());
   4475   Register result = ToRegister(instr->result());
   4476 
   4477   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4478   __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
   4479   __ SmiTag(result, input);
   4480   __ bind(deferred->exit());
   4481 }
   4482 
   4483 
   4484 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
   4485                                      LOperand* value,
   4486                                      LOperand* temp1,
   4487                                      LOperand* temp2,
   4488                                      IntegerSignedness signedness) {
   4489   Label done, slow;
   4490   Register src = ToRegister(value);
   4491   Register dst = ToRegister(instr->result());
   4492   Register tmp1 = scratch0();
   4493   Register tmp2 = ToRegister(temp1);
   4494   Register tmp3 = ToRegister(temp2);
   4495   DoubleRegister dbl_scratch = double_scratch0();
   4496 
   4497   if (signedness == SIGNED_INT32) {
   4498     // There was overflow, so bits 30 and 31 of the original integer
   4499     // disagree. Try to allocate a heap number in new space and store
   4500     // the value in there. If that fails, call the runtime system.
   4501     if (dst.is(src)) {
   4502       __ SmiUntag(src, dst);
   4503       __ Xor(src, src, Operand(0x80000000));
   4504     }
   4505     __ mtc1(src, dbl_scratch);
   4506     __ cvt_d_w(dbl_scratch, dbl_scratch);
   4507   } else {
   4508     __ mtc1(src, dbl_scratch);
   4509     __ Cvt_d_uw(dbl_scratch, dbl_scratch);
   4510   }
   4511 
   4512   if (FLAG_inline_new) {
   4513     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
   4514     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
   4515     __ Branch(&done);
   4516   }
   4517 
   4518   // Slow case: Call the runtime system to do the number allocation.
   4519   __ bind(&slow);
   4520   {
   4521     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4522     // result register is stored, as this register is in the pointer map, but
   4523     // contains an integer value.
   4524     __ mov(dst, zero_reg);
   4525     // Preserve the value of all registers.
   4526     PushSafepointRegistersScope scope(this);
   4527     // Reset the context register.
   4528     if (!dst.is(cp)) {
   4529       __ mov(cp, zero_reg);
   4530     }
   4531     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4532     RecordSafepointWithRegisters(
   4533         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4534     __ StoreToSafepointRegisterSlot(v0, dst);
   4535   }
   4536 
   4537   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4538   // number.
   4539   __ bind(&done);
   4540   __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
   4541 }
   4542 
   4543 
   4544 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4545   class DeferredNumberTagD final : public LDeferredCode {
   4546    public:
   4547     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4548         : LDeferredCode(codegen), instr_(instr) { }
   4549     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
   4550     LInstruction* instr() override { return instr_; }
   4551 
   4552    private:
   4553     LNumberTagD* instr_;
   4554   };
   4555 
   4556   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   4557   Register scratch = scratch0();
   4558   Register reg = ToRegister(instr->result());
   4559   Register temp1 = ToRegister(instr->temp());
   4560   Register temp2 = ToRegister(instr->temp2());
   4561 
   4562   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4563   if (FLAG_inline_new) {
   4564     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4565     // We want the untagged address first for performance
   4566     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
   4567   } else {
   4568     __ Branch(deferred->entry());
   4569   }
   4570   __ bind(deferred->exit());
   4571   __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
   4572 }
   4573 
   4574 
   4575 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4576   // TODO(3095996): Get rid of this. For now, we need to make the
   4577   // result register contain a valid pointer because it is already
   4578   // contained in the register pointer map.
   4579   Register reg = ToRegister(instr->result());
   4580   __ mov(reg, zero_reg);
   4581 
   4582   PushSafepointRegistersScope scope(this);
   4583   // Reset the context register.
   4584   if (!reg.is(cp)) {
   4585     __ mov(cp, zero_reg);
   4586   }
   4587   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4588   RecordSafepointWithRegisters(
   4589       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4590   __ StoreToSafepointRegisterSlot(v0, reg);
   4591 }
   4592 
   4593 
   4594 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4595   HChange* hchange = instr->hydrogen();
   4596   Register input = ToRegister(instr->value());
   4597   Register output = ToRegister(instr->result());
   4598   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4599       hchange->value()->CheckFlag(HValue::kUint32)) {
   4600     __ And(at, input, Operand(0x80000000));
   4601     DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
   4602   }
   4603   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4604       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4605     __ SmiTagCheckOverflow(output, input, at);
   4606     DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
   4607   } else {
   4608     __ SmiTag(output, input);
   4609   }
   4610 }
   4611 
   4612 
   4613 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4614   Register scratch = scratch0();
   4615   Register input = ToRegister(instr->value());
   4616   Register result = ToRegister(instr->result());
   4617   if (instr->needs_check()) {
   4618     STATIC_ASSERT(kHeapObjectTag == 1);
   4619     // If the input is a HeapObject, value of scratch won't be zero.
   4620     __ And(scratch, input, Operand(kHeapObjectTag));
   4621     __ SmiUntag(result, input);
   4622     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
   4623                  Operand(zero_reg));
   4624   } else {
   4625     __ SmiUntag(result, input);
   4626   }
   4627 }
   4628 
   4629 
   4630 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
   4631                                 DoubleRegister result_reg,
   4632                                 NumberUntagDMode mode) {
   4633   bool can_convert_undefined_to_nan = instr->truncating();
   4634   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
   4635 
   4636   Register scratch = scratch0();
   4637   Label convert, load_smi, done;
   4638   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4639     // Smi check.
   4640     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4641     // Heap number map check.
   4642     __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4643     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   4644     if (can_convert_undefined_to_nan) {
   4645       __ Branch(&convert, ne, scratch, Operand(at));
   4646     } else {
   4647       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
   4648                    Operand(at));
   4649     }
   4650     // Load heap number.
   4651     __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4652     if (deoptimize_on_minus_zero) {
   4653       __ mfc1(at, result_reg);
   4654       __ Branch(&done, ne, at, Operand(zero_reg));
   4655       __ mfhc1(scratch, result_reg);  // Get exponent/sign bits.
   4656       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
   4657                    Operand(HeapNumber::kSignMask));
   4658     }
   4659     __ Branch(&done);
   4660     if (can_convert_undefined_to_nan) {
   4661       __ bind(&convert);
   4662       // Convert undefined (and hole) to NaN.
   4663       __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   4664       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
   4665                    input_reg, Operand(at));
   4666       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4667       __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4668       __ Branch(&done);
   4669     }
   4670   } else {
   4671     __ SmiUntag(scratch, input_reg);
   4672     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4673   }
   4674   // Smi to double register conversion
   4675   __ bind(&load_smi);
   4676   // scratch: untagged value of input_reg
   4677   __ mtc1(scratch, result_reg);
   4678   __ cvt_d_w(result_reg, result_reg);
   4679   __ bind(&done);
   4680 }
   4681 
   4682 
   4683 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4684   Register input_reg = ToRegister(instr->value());
   4685   Register scratch1 = scratch0();
   4686   Register scratch2 = ToRegister(instr->temp());
   4687   DoubleRegister double_scratch = double_scratch0();
   4688   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4689 
   4690   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4691   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4692 
   4693   Label done;
   4694 
   4695   // The input is a tagged HeapObject.
   4696   // Heap number map check.
   4697   __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4698   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   4699   // This 'at' value and scratch1 map value are used for tests in both clauses
   4700   // of the if.
   4701 
   4702   if (instr->truncating()) {
   4703     Label truncate;
   4704     __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
   4705     __ mov(scratch2, input_reg);  // In delay slot.
   4706     __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   4707     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
   4708                  Operand(ODDBALL_TYPE));
   4709     __ bind(&truncate);
   4710     __ TruncateHeapNumberToI(input_reg, scratch2);
   4711   } else {
   4712     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
   4713                  Operand(at));
   4714 
   4715     // Load the double value.
   4716     __ ldc1(double_scratch,
   4717             FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4718 
   4719     Register except_flag = scratch2;
   4720     __ EmitFPUTruncate(kRoundToZero,
   4721                        input_reg,
   4722                        double_scratch,
   4723                        scratch1,
   4724                        double_scratch2,
   4725                        except_flag,
   4726                        kCheckForInexactConversion);
   4727 
   4728     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
   4729                  Operand(zero_reg));
   4730 
   4731     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4732       __ Branch(&done, ne, input_reg, Operand(zero_reg));
   4733 
   4734       __ mfhc1(scratch1, double_scratch);  // Get exponent/sign bits.
   4735       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4736       DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
   4737                    Operand(zero_reg));
   4738     }
   4739   }
   4740   __ bind(&done);
   4741 }
   4742 
   4743 
   4744 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4745   class DeferredTaggedToI final : public LDeferredCode {
   4746    public:
   4747     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4748         : LDeferredCode(codegen), instr_(instr) { }
   4749     void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
   4750     LInstruction* instr() override { return instr_; }
   4751 
   4752    private:
   4753     LTaggedToI* instr_;
   4754   };
   4755 
   4756   LOperand* input = instr->value();
   4757   DCHECK(input->IsRegister());
   4758   DCHECK(input->Equals(instr->result()));
   4759 
   4760   Register input_reg = ToRegister(input);
   4761 
   4762   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4763     __ SmiUntag(input_reg);
   4764   } else {
   4765     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   4766 
   4767     // Let the deferred code handle the HeapObject case.
   4768     __ JumpIfNotSmi(input_reg, deferred->entry());
   4769 
   4770     // Smi to int32 conversion.
   4771     __ SmiUntag(input_reg);
   4772     __ bind(deferred->exit());
   4773   }
   4774 }
   4775 
   4776 
   4777 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4778   LOperand* input = instr->value();
   4779   DCHECK(input->IsRegister());
   4780   LOperand* result = instr->result();
   4781   DCHECK(result->IsDoubleRegister());
   4782 
   4783   Register input_reg = ToRegister(input);
   4784   DoubleRegister result_reg = ToDoubleRegister(result);
   4785 
   4786   HValue* value = instr->hydrogen()->value();
   4787   NumberUntagDMode mode = value->representation().IsSmi()
   4788       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4789 
   4790   EmitNumberUntagD(instr, input_reg, result_reg, mode);
   4791 }
   4792 
   4793 
   4794 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4795   Register result_reg = ToRegister(instr->result());
   4796   Register scratch1 = scratch0();
   4797   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4798 
   4799   if (instr->truncating()) {
   4800     __ TruncateDoubleToI(result_reg, double_input);
   4801   } else {
   4802     Register except_flag = LCodeGen::scratch1();
   4803 
   4804     __ EmitFPUTruncate(kRoundToMinusInf,
   4805                        result_reg,
   4806                        double_input,
   4807                        scratch1,
   4808                        double_scratch0(),
   4809                        except_flag,
   4810                        kCheckForInexactConversion);
   4811 
   4812     // Deopt if the operation did not succeed (except_flag != 0).
   4813     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
   4814                  Operand(zero_reg));
   4815 
   4816     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4817       Label done;
   4818       __ Branch(&done, ne, result_reg, Operand(zero_reg));
   4819       __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
   4820       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4821       DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
   4822                    Operand(zero_reg));
   4823       __ bind(&done);
   4824     }
   4825   }
   4826 }
   4827 
   4828 
   4829 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   4830   Register result_reg = ToRegister(instr->result());
   4831   Register scratch1 = LCodeGen::scratch0();
   4832   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4833 
   4834   if (instr->truncating()) {
   4835     __ TruncateDoubleToI(result_reg, double_input);
   4836   } else {
   4837     Register except_flag = LCodeGen::scratch1();
   4838 
   4839     __ EmitFPUTruncate(kRoundToMinusInf,
   4840                        result_reg,
   4841                        double_input,
   4842                        scratch1,
   4843                        double_scratch0(),
   4844                        except_flag,
   4845                        kCheckForInexactConversion);
   4846 
   4847     // Deopt if the operation did not succeed (except_flag != 0).
   4848     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
   4849                  Operand(zero_reg));
   4850 
   4851     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4852       Label done;
   4853       __ Branch(&done, ne, result_reg, Operand(zero_reg));
   4854       __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
   4855       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4856       DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
   4857                    Operand(zero_reg));
   4858       __ bind(&done);
   4859     }
   4860   }
   4861   __ SmiTag(result_reg, result_reg);
   4862 }
   4863 
   4864 
   4865 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   4866   LOperand* input = instr->value();
   4867   __ SmiTst(ToRegister(input), at);
   4868   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
   4869 }
   4870 
   4871 
   4872 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   4873   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   4874     LOperand* input = instr->value();
   4875     __ SmiTst(ToRegister(input), at);
   4876     DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
   4877   }
   4878 }
   4879 
   4880 
   4881 void LCodeGen::DoCheckArrayBufferNotNeutered(
   4882     LCheckArrayBufferNotNeutered* instr) {
   4883   Register view = ToRegister(instr->view());
   4884   Register scratch = scratch0();
   4885 
   4886   __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
   4887   __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
   4888   __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
   4889   DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
   4890                Operand(zero_reg));
   4891 }
   4892 
   4893 
   4894 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   4895   Register input = ToRegister(instr->value());
   4896   Register scratch = scratch0();
   4897 
   4898   __ GetObjectType(input, scratch, scratch);
   4899 
   4900   if (instr->hydrogen()->is_interval_check()) {
   4901     InstanceType first;
   4902     InstanceType last;
   4903     instr->hydrogen()->GetCheckInterval(&first, &last);
   4904 
   4905     // If there is only one type in the interval check for equality.
   4906     if (first == last) {
   4907       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
   4908                    Operand(first));
   4909     } else {
   4910       DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
   4911                    Operand(first));
   4912       // Omit check for the last type.
   4913       if (last != LAST_TYPE) {
   4914         DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
   4915                      Operand(last));
   4916       }
   4917     }
   4918   } else {
   4919     uint8_t mask;
   4920     uint8_t tag;
   4921     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   4922 
   4923     if (base::bits::IsPowerOfTwo32(mask)) {
   4924       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
   4925       __ And(at, scratch, mask);
   4926       DeoptimizeIf(tag == 0 ? ne : eq, instr,
   4927                    DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
   4928     } else {
   4929       __ And(scratch, scratch, Operand(mask));
   4930       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
   4931                    Operand(tag));
   4932     }
   4933   }
   4934 }
   4935 
   4936 
   4937 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   4938   Register reg = ToRegister(instr->value());
   4939   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   4940   AllowDeferredHandleDereference smi_check;
   4941   if (isolate()->heap()->InNewSpace(*object)) {
   4942     Register reg = ToRegister(instr->value());
   4943     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   4944     __ li(at, Operand(cell));
   4945     __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
   4946     DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
   4947   } else {
   4948     DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
   4949                  Operand(object));
   4950   }
   4951 }
   4952 
   4953 
   4954 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   4955   Label deopt, done;
   4956   // If the map is not deprecated the migration attempt does not make sense.
   4957   __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   4958   __ lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
   4959   __ And(at, scratch0(), Operand(Map::Deprecated::kMask));
   4960   __ Branch(&deopt, eq, at, Operand(zero_reg));
   4961 
   4962   {
   4963     PushSafepointRegistersScope scope(this);
   4964     __ push(object);
   4965     __ mov(cp, zero_reg);
   4966     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   4967     RecordSafepointWithRegisters(
   4968         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   4969     __ StoreToSafepointRegisterSlot(v0, scratch0());
   4970   }
   4971   __ SmiTst(scratch0(), at);
   4972   __ Branch(&done, ne, at, Operand(zero_reg));
   4973 
   4974   __ bind(&deopt);
   4975   // In case of "al" condition the operands are not used so just pass zero_reg
   4976   // there.
   4977   DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg,
   4978                Operand(zero_reg));
   4979 
   4980   __ bind(&done);
   4981 }
   4982 
   4983 
   4984 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   4985   class DeferredCheckMaps final : public LDeferredCode {
   4986    public:
   4987     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   4988         : LDeferredCode(codegen), instr_(instr), object_(object) {
   4989       SetExit(check_maps());
   4990     }
   4991     void Generate() override {
   4992       codegen()->DoDeferredInstanceMigration(instr_, object_);
   4993     }
   4994     Label* check_maps() { return &check_maps_; }
   4995     LInstruction* instr() override { return instr_; }
   4996 
   4997    private:
   4998     LCheckMaps* instr_;
   4999     Label check_maps_;
   5000     Register object_;
   5001   };
   5002 
   5003   if (instr->hydrogen()->IsStabilityCheck()) {
   5004     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5005     for (int i = 0; i < maps->size(); ++i) {
   5006       AddStabilityDependency(maps->at(i).handle());
   5007     }
   5008     return;
   5009   }
   5010 
   5011   Register map_reg = scratch0();
   5012   LOperand* input = instr->value();
   5013   DCHECK(input->IsRegister());
   5014   Register reg = ToRegister(input);
   5015   __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
   5016 
   5017   DeferredCheckMaps* deferred = NULL;
   5018   if (instr->hydrogen()->HasMigrationTarget()) {
   5019     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   5020     __ bind(deferred->check_maps());
   5021   }
   5022 
   5023   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5024   Label success;
   5025   for (int i = 0; i < maps->size() - 1; i++) {
   5026     Handle<Map> map = maps->at(i).handle();
   5027     __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
   5028   }
   5029   Handle<Map> map = maps->at(maps->size() - 1).handle();
   5030   // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
   5031   if (instr->hydrogen()->HasMigrationTarget()) {
   5032     __ Branch(deferred->entry(), ne, map_reg, Operand(map));
   5033   } else {
   5034     DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
   5035   }
   5036 
   5037   __ bind(&success);
   5038 }
   5039 
   5040 
   5041 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5042   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
   5043   Register result_reg = ToRegister(instr->result());
   5044   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5045   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
   5046 }
   5047 
   5048 
   5049 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5050   Register unclamped_reg = ToRegister(instr->unclamped());
   5051   Register result_reg = ToRegister(instr->result());
   5052   __ ClampUint8(result_reg, unclamped_reg);
   5053 }
   5054 
   5055 
   5056 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5057   Register scratch = scratch0();
   5058   Register input_reg = ToRegister(instr->unclamped());
   5059   Register result_reg = ToRegister(instr->result());
   5060   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5061   Label is_smi, done, heap_number;
   5062 
   5063   // Both smi and heap number cases are handled.
   5064   __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
   5065 
   5066   // Check for heap number
   5067   __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5068   __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
   5069 
   5070   // Check for undefined. Undefined is converted to zero for clamping
   5071   // conversions.
   5072   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
   5073                Operand(factory()->undefined_value()));
   5074   __ mov(result_reg, zero_reg);
   5075   __ jmp(&done);
   5076 
   5077   // Heap number
   5078   __ bind(&heap_number);
   5079   __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
   5080                                              HeapNumber::kValueOffset));
   5081   __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
   5082   __ jmp(&done);
   5083 
   5084   __ bind(&is_smi);
   5085   __ ClampUint8(result_reg, scratch);
   5086 
   5087   __ bind(&done);
   5088 }
   5089 
   5090 
   5091 void LCodeGen::DoAllocate(LAllocate* instr) {
   5092   class DeferredAllocate final : public LDeferredCode {
   5093    public:
   5094     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5095         : LDeferredCode(codegen), instr_(instr) { }
   5096     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
   5097     LInstruction* instr() override { return instr_; }
   5098 
   5099    private:
   5100     LAllocate* instr_;
   5101   };
   5102 
   5103   DeferredAllocate* deferred =
   5104       new(zone()) DeferredAllocate(this, instr);
   5105 
   5106   Register result = ToRegister(instr->result());
   5107   Register scratch = ToRegister(instr->temp1());
   5108   Register scratch2 = ToRegister(instr->temp2());
   5109 
   5110   // Allocate memory for the object.
   5111   AllocationFlags flags = NO_ALLOCATION_FLAGS;
   5112   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5113     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5114   }
   5115   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5116     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5117     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5118   }
   5119 
   5120   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   5121     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
   5122   }
   5123   DCHECK(!instr->hydrogen()->IsAllocationFolded());
   5124 
   5125   if (instr->size()->IsConstantOperand()) {
   5126     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5127     CHECK(size <= kMaxRegularHeapObjectSize);
   5128     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5129   } else {
   5130     Register size = ToRegister(instr->size());
   5131     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5132   }
   5133 
   5134   __ bind(deferred->exit());
   5135 
   5136   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5137     STATIC_ASSERT(kHeapObjectTag == 1);
   5138     if (instr->size()->IsConstantOperand()) {
   5139       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5140       __ li(scratch, Operand(size - kHeapObjectTag));
   5141     } else {
   5142       __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
   5143     }
   5144     __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5145     Label loop;
   5146     __ bind(&loop);
   5147     __ Dsubu(scratch, scratch, Operand(kPointerSize));
   5148     __ Daddu(at, result, Operand(scratch));
   5149     __ sd(scratch2, MemOperand(at));
   5150     __ Branch(&loop, ge, scratch, Operand(zero_reg));
   5151   }
   5152 }
   5153 
   5154 
   5155 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5156   Register result = ToRegister(instr->result());
   5157 
   5158   // TODO(3095996): Get rid of this. For now, we need to make the
   5159   // result register contain a valid pointer because it is already
   5160   // contained in the register pointer map.
   5161   __ mov(result, zero_reg);
   5162 
   5163   PushSafepointRegistersScope scope(this);
   5164   if (instr->size()->IsRegister()) {
   5165     Register size = ToRegister(instr->size());
   5166     DCHECK(!size.is(result));
   5167     __ SmiTag(size);
   5168     __ push(size);
   5169   } else {
   5170     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5171     if (size >= 0 && size <= Smi::kMaxValue) {
   5172       __ li(v0, Operand(Smi::FromInt(size)));
   5173       __ Push(v0);
   5174     } else {
   5175       // We should never get here at runtime => abort
   5176       __ stop("invalid allocation size");
   5177       return;
   5178     }
   5179   }
   5180 
   5181   int flags = AllocateDoubleAlignFlag::encode(
   5182       instr->hydrogen()->MustAllocateDoubleAligned());
   5183   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5184     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5185     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   5186   } else {
   5187     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5188   }
   5189   __ li(v0, Operand(Smi::FromInt(flags)));
   5190   __ Push(v0);
   5191 
   5192   CallRuntimeFromDeferred(
   5193       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   5194   __ StoreToSafepointRegisterSlot(v0, result);
   5195 
   5196   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   5197     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
   5198     if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5199       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5200       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5201     }
   5202     // If the allocation folding dominator allocate triggered a GC, allocation
   5203     // happend in the runtime. We have to reset the top pointer to virtually
   5204     // undo the allocation.
   5205     ExternalReference allocation_top =
   5206         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
   5207     Register top_address = scratch0();
   5208     __ Dsubu(v0, v0, Operand(kHeapObjectTag));
   5209     __ li(top_address, Operand(allocation_top));
   5210     __ sd(v0, MemOperand(top_address));
   5211     __ Daddu(v0, v0, Operand(kHeapObjectTag));
   5212   }
   5213 }
   5214 
   5215 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
   5216   DCHECK(instr->hydrogen()->IsAllocationFolded());
   5217   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
   5218   Register result = ToRegister(instr->result());
   5219   Register scratch1 = ToRegister(instr->temp1());
   5220   Register scratch2 = ToRegister(instr->temp2());
   5221 
   5222   AllocationFlags flags = ALLOCATION_FOLDED;
   5223   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5224     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5225   }
   5226   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5227     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5228     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5229   }
   5230   if (instr->size()->IsConstantOperand()) {
   5231     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5232     CHECK(size <= kMaxRegularHeapObjectSize);
   5233     __ FastAllocate(size, result, scratch1, scratch2, flags);
   5234   } else {
   5235     Register size = ToRegister(instr->size());
   5236     __ FastAllocate(size, result, scratch1, scratch2, flags);
   5237   }
   5238 }
   5239 
   5240 
   5241 void LCodeGen::DoTypeof(LTypeof* instr) {
   5242   DCHECK(ToRegister(instr->value()).is(a3));
   5243   DCHECK(ToRegister(instr->result()).is(v0));
   5244   Label end, do_call;
   5245   Register value_register = ToRegister(instr->value());
   5246   __ JumpIfNotSmi(value_register, &do_call);
   5247   __ li(v0, Operand(isolate()->factory()->number_string()));
   5248   __ jmp(&end);
   5249   __ bind(&do_call);
   5250   Callable callable = CodeFactory::Typeof(isolate());
   5251   CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
   5252   __ bind(&end);
   5253 }
   5254 
   5255 
   5256 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5257   Register input = ToRegister(instr->value());
   5258 
   5259   Register cmp1 = no_reg;
   5260   Operand cmp2 = Operand(no_reg);
   5261 
   5262   Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
   5263                                                   instr->FalseLabel(chunk_),
   5264                                                   input,
   5265                                                   instr->type_literal(),
   5266                                                   &cmp1,
   5267                                                   &cmp2);
   5268 
   5269   DCHECK(cmp1.is_valid());
   5270   DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
   5271 
   5272   if (final_branch_condition != kNoCondition) {
   5273     EmitBranch(instr, final_branch_condition, cmp1, cmp2);
   5274   }
   5275 }
   5276 
   5277 
   5278 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   5279                                  Label* false_label,
   5280                                  Register input,
   5281                                  Handle<String> type_name,
   5282                                  Register* cmp1,
   5283                                  Operand* cmp2) {
   5284   // This function utilizes the delay slot heavily. This is used to load
   5285   // values that are always usable without depending on the type of the input
   5286   // register.
   5287   Condition final_branch_condition = kNoCondition;
   5288   Register scratch = scratch0();
   5289   Factory* factory = isolate()->factory();
   5290   if (String::Equals(type_name, factory->number_string())) {
   5291     __ JumpIfSmi(input, true_label);
   5292     __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
   5293     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   5294     *cmp1 = input;
   5295     *cmp2 = Operand(at);
   5296     final_branch_condition = eq;
   5297 
   5298   } else if (String::Equals(type_name, factory->string_string())) {
   5299     __ JumpIfSmi(input, false_label);
   5300     __ GetObjectType(input, input, scratch);
   5301     *cmp1 = scratch;
   5302     *cmp2 = Operand(FIRST_NONSTRING_TYPE);
   5303     final_branch_condition = lt;
   5304 
   5305   } else if (String::Equals(type_name, factory->symbol_string())) {
   5306     __ JumpIfSmi(input, false_label);
   5307     __ GetObjectType(input, input, scratch);
   5308     *cmp1 = scratch;
   5309     *cmp2 = Operand(SYMBOL_TYPE);
   5310     final_branch_condition = eq;
   5311 
   5312   } else if (String::Equals(type_name, factory->boolean_string())) {
   5313     __ LoadRoot(at, Heap::kTrueValueRootIndex);
   5314     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5315     __ LoadRoot(at, Heap::kFalseValueRootIndex);
   5316     *cmp1 = at;
   5317     *cmp2 = Operand(input);
   5318     final_branch_condition = eq;
   5319 
   5320   } else if (String::Equals(type_name, factory->undefined_string())) {
   5321     __ LoadRoot(at, Heap::kNullValueRootIndex);
   5322     __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
   5323     // The first instruction of JumpIfSmi is an And - it is safe in the delay
   5324     // slot.
   5325     __ JumpIfSmi(input, false_label);
   5326     // Check for undetectable objects => true.
   5327     __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
   5328     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
   5329     __ And(at, at, 1 << Map::kIsUndetectable);
   5330     *cmp1 = at;
   5331     *cmp2 = Operand(zero_reg);
   5332     final_branch_condition = ne;
   5333 
   5334   } else if (String::Equals(type_name, factory->function_string())) {
   5335     __ JumpIfSmi(input, false_label);
   5336     __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5337     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5338     __ And(scratch, scratch,
   5339            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5340     *cmp1 = scratch;
   5341     *cmp2 = Operand(1 << Map::kIsCallable);
   5342     final_branch_condition = eq;
   5343 
   5344   } else if (String::Equals(type_name, factory->object_string())) {
   5345     __ JumpIfSmi(input, false_label);
   5346     __ LoadRoot(at, Heap::kNullValueRootIndex);
   5347     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5348     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5349     __ GetObjectType(input, scratch, scratch1());
   5350     __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
   5351     // Check for callable or undetectable objects => false.
   5352     __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5353     __ And(at, scratch,
   5354            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5355     *cmp1 = at;
   5356     *cmp2 = Operand(zero_reg);
   5357     final_branch_condition = eq;
   5358 
   5359   } else {
   5360     *cmp1 = at;
   5361     *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
   5362     __ Branch(false_label);
   5363   }
   5364 
   5365   return final_branch_condition;
   5366 }
   5367 
   5368 
   5369 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5370   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   5371     // Ensure that we have enough space after the previous lazy-bailout
   5372     // instruction for patching the code here.
   5373     int current_pc = masm()->pc_offset();
   5374     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5375       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5376       DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
   5377       while (padding_size > 0) {
   5378         __ nop();
   5379         padding_size -= Assembler::kInstrSize;
   5380       }
   5381     }
   5382   }
   5383   last_lazy_deopt_pc_ = masm()->pc_offset();
   5384 }
   5385 
   5386 
   5387 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5388   last_lazy_deopt_pc_ = masm()->pc_offset();
   5389   DCHECK(instr->HasEnvironment());
   5390   LEnvironment* env = instr->environment();
   5391   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5392   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5393 }
   5394 
   5395 
   5396 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5397   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5398   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5399   // needed return address), even though the implementation of LAZY and EAGER is
   5400   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5401   // the special case below.
   5402   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5403     type = Deoptimizer::LAZY;
   5404   }
   5405 
   5406   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
   5407                Operand(zero_reg));
   5408 }
   5409 
   5410 
   5411 void LCodeGen::DoDummy(LDummy* instr) {
   5412   // Nothing to see here, move on!
   5413 }
   5414 
   5415 
   5416 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5417   // Nothing to see here, move on!
   5418 }
   5419 
   5420 
   5421 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5422   PushSafepointRegistersScope scope(this);
   5423   LoadContextFromDeferred(instr->context());
   5424   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5425   RecordSafepointWithLazyDeopt(
   5426       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5427   DCHECK(instr->HasEnvironment());
   5428   LEnvironment* env = instr->environment();
   5429   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5430 }
   5431 
   5432 
   5433 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5434   class DeferredStackCheck final : public LDeferredCode {
   5435    public:
   5436     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5437         : LDeferredCode(codegen), instr_(instr) { }
   5438     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
   5439     LInstruction* instr() override { return instr_; }
   5440 
   5441    private:
   5442     LStackCheck* instr_;
   5443   };
   5444 
   5445   DCHECK(instr->HasEnvironment());
   5446   LEnvironment* env = instr->environment();
   5447   // There is no LLazyBailout instruction for stack-checks. We have to
   5448   // prepare for lazy deoptimization explicitly here.
   5449   if (instr->hydrogen()->is_function_entry()) {
   5450     // Perform stack overflow check.
   5451     Label done;
   5452     __ LoadRoot(at, Heap::kStackLimitRootIndex);
   5453     __ Branch(&done, hs, sp, Operand(at));
   5454     DCHECK(instr->context()->IsRegister());
   5455     DCHECK(ToRegister(instr->context()).is(cp));
   5456     CallCode(isolate()->builtins()->StackCheck(),
   5457              RelocInfo::CODE_TARGET,
   5458              instr);
   5459     __ bind(&done);
   5460   } else {
   5461     DCHECK(instr->hydrogen()->is_backwards_branch());
   5462     // Perform stack overflow check if this goto needs it before jumping.
   5463     DeferredStackCheck* deferred_stack_check =
   5464         new(zone()) DeferredStackCheck(this, instr);
   5465     __ LoadRoot(at, Heap::kStackLimitRootIndex);
   5466     __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
   5467     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5468     __ bind(instr->done_label());
   5469     deferred_stack_check->SetExit(instr->done_label());
   5470     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5471     // Don't record a deoptimization index for the safepoint here.
   5472     // This will be done explicitly when emitting call and the safepoint in
   5473     // the deferred code.
   5474   }
   5475 }
   5476 
   5477 
   5478 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5479   // This is a pseudo-instruction that ensures that the environment here is
   5480   // properly registered for deoptimization and records the assembler's PC
   5481   // offset.
   5482   LEnvironment* environment = instr->environment();
   5483 
   5484   // If the environment were already registered, we would have no way of
   5485   // backpatching it with the spill slot operands.
   5486   DCHECK(!environment->HasBeenRegistered());
   5487   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5488 
   5489   GenerateOsrPrologue();
   5490 }
   5491 
   5492 
   5493 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5494   Register result = ToRegister(instr->result());
   5495   Register object = ToRegister(instr->object());
   5496 
   5497   Label use_cache, call_runtime;
   5498   DCHECK(object.is(a0));
   5499   __ CheckEnumCache(&call_runtime);
   5500 
   5501   __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
   5502   __ Branch(&use_cache);
   5503 
   5504   // Get the set of properties to enumerate.
   5505   __ bind(&call_runtime);
   5506   __ push(object);
   5507   CallRuntime(Runtime::kForInEnumerate, instr);
   5508   __ bind(&use_cache);
   5509 }
   5510 
   5511 
   5512 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5513   Register map = ToRegister(instr->map());
   5514   Register result = ToRegister(instr->result());
   5515   Label load_cache, done;
   5516   __ EnumLength(result, map);
   5517   __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
   5518   __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
   5519   __ jmp(&done);
   5520 
   5521   __ bind(&load_cache);
   5522   __ LoadInstanceDescriptors(map, result);
   5523   __ ld(result,
   5524         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5525   __ ld(result,
   5526         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5527   DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
   5528                Operand(zero_reg));
   5529 
   5530   __ bind(&done);
   5531 }
   5532 
   5533 
   5534 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5535   Register object = ToRegister(instr->value());
   5536   Register map = ToRegister(instr->map());
   5537   __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5538   DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
   5539                Operand(scratch0()));
   5540 }
   5541 
   5542 
   5543 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5544                                            Register result,
   5545                                            Register object,
   5546                                            Register index) {
   5547   PushSafepointRegistersScope scope(this);
   5548   __ Push(object, index);
   5549   __ mov(cp, zero_reg);
   5550   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5551   RecordSafepointWithRegisters(
   5552      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5553   __ StoreToSafepointRegisterSlot(v0, result);
   5554 }
   5555 
   5556 
   5557 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5558   class DeferredLoadMutableDouble final : public LDeferredCode {
   5559    public:
   5560     DeferredLoadMutableDouble(LCodeGen* codegen,
   5561                               LLoadFieldByIndex* instr,
   5562                               Register result,
   5563                               Register object,
   5564                               Register index)
   5565         : LDeferredCode(codegen),
   5566           instr_(instr),
   5567           result_(result),
   5568           object_(object),
   5569           index_(index) {
   5570     }
   5571     void Generate() override {
   5572       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5573     }
   5574     LInstruction* instr() override { return instr_; }
   5575 
   5576    private:
   5577     LLoadFieldByIndex* instr_;
   5578     Register result_;
   5579     Register object_;
   5580     Register index_;
   5581   };
   5582 
   5583   Register object = ToRegister(instr->object());
   5584   Register index = ToRegister(instr->index());
   5585   Register result = ToRegister(instr->result());
   5586   Register scratch = scratch0();
   5587 
   5588   DeferredLoadMutableDouble* deferred;
   5589   deferred = new(zone()) DeferredLoadMutableDouble(
   5590       this, instr, result, object, index);
   5591 
   5592   Label out_of_object, done;
   5593 
   5594   __ And(scratch, index, Operand(Smi::FromInt(1)));
   5595   __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
   5596   __ dsra(index, index, 1);
   5597 
   5598   __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
   5599   __ SmiScale(scratch, index, kPointerSizeLog2);  // In delay slot.
   5600   __ Daddu(scratch, object, scratch);
   5601   __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5602 
   5603   __ Branch(&done);
   5604 
   5605   __ bind(&out_of_object);
   5606   __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5607   // Index is equal to negated out of object property index plus 1.
   5608   __ Dsubu(scratch, result, scratch);
   5609   __ ld(result, FieldMemOperand(scratch,
   5610                                 FixedArray::kHeaderSize - kPointerSize));
   5611   __ bind(deferred->exit());
   5612   __ bind(&done);
   5613 }
   5614 
   5615 #undef __
   5616 
   5617 }  // namespace internal
   5618 }  // namespace v8
   5619