Home | History | Annotate | Download | only in mips
      1 // Copyright 2012 the V8 project authors. All rights reserved.7
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "src/v8.h"
     29 
     30 #include "src/mips/lithium-codegen-mips.h"
     31 #include "src/mips/lithium-gap-resolver-mips.h"
     32 #include "src/code-stubs.h"
     33 #include "src/stub-cache.h"
     34 #include "src/hydrogen-osr.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 
     40 class SafepointGenerator V8_FINAL  : public CallWrapper {
     41  public:
     42   SafepointGenerator(LCodeGen* codegen,
     43                      LPointerMap* pointers,
     44                      Safepoint::DeoptMode mode)
     45       : codegen_(codegen),
     46         pointers_(pointers),
     47         deopt_mode_(mode) { }
     48   virtual ~SafepointGenerator() {}
     49 
     50   virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
     51 
     52   virtual void AfterCall() const V8_OVERRIDE {
     53     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     54   }
     55 
     56  private:
     57   LCodeGen* codegen_;
     58   LPointerMap* pointers_;
     59   Safepoint::DeoptMode deopt_mode_;
     60 };
     61 
     62 
     63 #define __ masm()->
     64 
     65 bool LCodeGen::GenerateCode() {
     66   LPhase phase("Z_Code generation", chunk());
     67   ASSERT(is_unused());
     68   status_ = GENERATING;
     69 
     70   // Open a frame scope to indicate that there is a frame on the stack.  The
     71   // NONE indicates that the scope shouldn't actually generate code to set up
     72   // the frame (that is done in GeneratePrologue).
     73   FrameScope frame_scope(masm_, StackFrame::NONE);
     74 
     75   return GeneratePrologue() &&
     76       GenerateBody() &&
     77       GenerateDeferredCode() &&
     78       GenerateDeoptJumpTable() &&
     79       GenerateSafepointTable();
     80 }
     81 
     82 
     83 void LCodeGen::FinishCode(Handle<Code> code) {
     84   ASSERT(is_done());
     85   code->set_stack_slots(GetStackSlotCount());
     86   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     87   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
     88   PopulateDeoptimizationData(code);
     89 }
     90 
     91 
     92 void LCodeGen::SaveCallerDoubles() {
     93   ASSERT(info()->saves_caller_doubles());
     94   ASSERT(NeedsEagerFrame());
     95   Comment(";;; Save clobbered callee double registers");
     96   int count = 0;
     97   BitVector* doubles = chunk()->allocated_double_registers();
     98   BitVector::Iterator save_iterator(doubles);
     99   while (!save_iterator.Done()) {
    100     __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
    101             MemOperand(sp, count * kDoubleSize));
    102     save_iterator.Advance();
    103     count++;
    104   }
    105 }
    106 
    107 
    108 void LCodeGen::RestoreCallerDoubles() {
    109   ASSERT(info()->saves_caller_doubles());
    110   ASSERT(NeedsEagerFrame());
    111   Comment(";;; Restore clobbered callee double registers");
    112   BitVector* doubles = chunk()->allocated_double_registers();
    113   BitVector::Iterator save_iterator(doubles);
    114   int count = 0;
    115   while (!save_iterator.Done()) {
    116     __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
    117             MemOperand(sp, count * kDoubleSize));
    118     save_iterator.Advance();
    119     count++;
    120   }
    121 }
    122 
    123 
    124 bool LCodeGen::GeneratePrologue() {
    125   ASSERT(is_generating());
    126 
    127   if (info()->IsOptimizing()) {
    128     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    129 
    130 #ifdef DEBUG
    131     if (strlen(FLAG_stop_at) > 0 &&
    132         info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    133       __ stop("stop_at");
    134     }
    135 #endif
    136 
    137     // a1: Callee's JS function.
    138     // cp: Callee's context.
    139     // fp: Caller's frame pointer.
    140     // lr: Caller's pc.
    141 
    142     // Sloppy mode functions and builtins need to replace the receiver with the
    143     // global proxy when called as functions (without an explicit receiver
    144     // object).
    145     if (info_->this_has_uses() &&
    146         info_->strict_mode() == SLOPPY &&
    147         !info_->is_native()) {
    148       Label ok;
    149       int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
    150       __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
    151       __ lw(a2, MemOperand(sp, receiver_offset));
    152       __ Branch(&ok, ne, a2, Operand(at));
    153 
    154       __ lw(a2, GlobalObjectOperand());
    155       __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
    156 
    157       __ sw(a2, MemOperand(sp, receiver_offset));
    158 
    159       __ bind(&ok);
    160     }
    161   }
    162 
    163   info()->set_prologue_offset(masm_->pc_offset());
    164   if (NeedsEagerFrame()) {
    165     if (info()->IsStub()) {
    166       __ StubPrologue();
    167     } else {
    168       __ Prologue(info()->IsCodePreAgingActive());
    169     }
    170     frame_is_built_ = true;
    171     info_->AddNoFrameRange(0, masm_->pc_offset());
    172   }
    173 
    174   // Reserve space for the stack slots needed by the code.
    175   int slots = GetStackSlotCount();
    176   if (slots > 0) {
    177     if (FLAG_debug_code) {
    178       __ Subu(sp,  sp, Operand(slots * kPointerSize));
    179       __ Push(a0, a1);
    180       __ Addu(a0, sp, Operand(slots *  kPointerSize));
    181       __ li(a1, Operand(kSlotsZapValue));
    182       Label loop;
    183       __ bind(&loop);
    184       __ Subu(a0, a0, Operand(kPointerSize));
    185       __ sw(a1, MemOperand(a0, 2 * kPointerSize));
    186       __ Branch(&loop, ne, a0, Operand(sp));
    187       __ Pop(a0, a1);
    188     } else {
    189       __ Subu(sp, sp, Operand(slots * kPointerSize));
    190     }
    191   }
    192 
    193   if (info()->saves_caller_doubles()) {
    194     SaveCallerDoubles();
    195   }
    196 
    197   // Possibly allocate a local context.
    198   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    199   if (heap_slots > 0) {
    200     Comment(";;; Allocate local context");
    201     bool need_write_barrier = true;
    202     // Argument to NewContext is the function, which is in a1.
    203     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
    204       FastNewContextStub stub(isolate(), heap_slots);
    205       __ CallStub(&stub);
    206       // Result of FastNewContextStub is always in new space.
    207       need_write_barrier = false;
    208     } else {
    209       __ push(a1);
    210       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
    211     }
    212     RecordSafepoint(Safepoint::kNoLazyDeopt);
    213     // Context is returned in both v0. It replaces the context passed to us.
    214     // It's saved in the stack and kept live in cp.
    215     __ mov(cp, v0);
    216     __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
    217     // Copy any necessary parameters into the context.
    218     int num_parameters = scope()->num_parameters();
    219     for (int i = 0; i < num_parameters; i++) {
    220       Variable* var = scope()->parameter(i);
    221       if (var->IsContextSlot()) {
    222         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    223             (num_parameters - 1 - i) * kPointerSize;
    224         // Load parameter from stack.
    225         __ lw(a0, MemOperand(fp, parameter_offset));
    226         // Store it in the context.
    227         MemOperand target = ContextOperand(cp, var->index());
    228         __ sw(a0, target);
    229         // Update the write barrier. This clobbers a3 and a0.
    230         if (need_write_barrier) {
    231           __ RecordWriteContextSlot(
    232               cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
    233         } else if (FLAG_debug_code) {
    234           Label done;
    235           __ JumpIfInNewSpace(cp, a0, &done);
    236           __ Abort(kExpectedNewSpaceObject);
    237           __ bind(&done);
    238         }
    239       }
    240     }
    241     Comment(";;; End allocate local context");
    242   }
    243 
    244   // Trace the call.
    245   if (FLAG_trace && info()->IsOptimizing()) {
    246     // We have not executed any compiled code yet, so cp still holds the
    247     // incoming context.
    248     __ CallRuntime(Runtime::kTraceEnter, 0);
    249   }
    250   return !is_aborted();
    251 }
    252 
    253 
    254 void LCodeGen::GenerateOsrPrologue() {
    255   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    256   // are none, at the OSR entrypoint instruction.
    257   if (osr_pc_offset_ >= 0) return;
    258 
    259   osr_pc_offset_ = masm()->pc_offset();
    260 
    261   // Adjust the frame size, subsuming the unoptimized frame into the
    262   // optimized frame.
    263   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    264   ASSERT(slots >= 0);
    265   __ Subu(sp, sp, Operand(slots * kPointerSize));
    266 }
    267 
    268 
    269 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    270   if (instr->IsCall()) {
    271     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    272   }
    273   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    274     safepoints_.BumpLastLazySafepointIndex();
    275   }
    276 }
    277 
    278 
    279 bool LCodeGen::GenerateDeferredCode() {
    280   ASSERT(is_generating());
    281   if (deferred_.length() > 0) {
    282     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    283       LDeferredCode* code = deferred_[i];
    284 
    285       HValue* value =
    286           instructions_->at(code->instruction_index())->hydrogen_value();
    287       RecordAndWritePosition(
    288           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
    289 
    290       Comment(";;; <@%d,#%d> "
    291               "-------------------- Deferred %s --------------------",
    292               code->instruction_index(),
    293               code->instr()->hydrogen_value()->id(),
    294               code->instr()->Mnemonic());
    295       __ bind(code->entry());
    296       if (NeedsDeferredFrame()) {
    297         Comment(";;; Build frame");
    298         ASSERT(!frame_is_built_);
    299         ASSERT(info()->IsStub());
    300         frame_is_built_ = true;
    301         __ MultiPush(cp.bit() | fp.bit() | ra.bit());
    302         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
    303         __ push(scratch0());
    304         __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    305         Comment(";;; Deferred code");
    306       }
    307       code->Generate();
    308       if (NeedsDeferredFrame()) {
    309         Comment(";;; Destroy frame");
    310         ASSERT(frame_is_built_);
    311         __ pop(at);
    312         __ MultiPop(cp.bit() | fp.bit() | ra.bit());
    313         frame_is_built_ = false;
    314       }
    315       __ jmp(code->exit());
    316     }
    317   }
    318   // Deferred code is the last part of the instruction sequence. Mark
    319   // the generated code as done unless we bailed out.
    320   if (!is_aborted()) status_ = DONE;
    321   return !is_aborted();
    322 }
    323 
    324 
    325 bool LCodeGen::GenerateDeoptJumpTable() {
    326   if (deopt_jump_table_.length() > 0) {
    327     Comment(";;; -------------------- Jump table --------------------");
    328   }
    329   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
    330   Label table_start;
    331   __ bind(&table_start);
    332   Label needs_frame;
    333   for (int i = 0; i < deopt_jump_table_.length(); i++) {
    334     __ bind(&deopt_jump_table_[i].label);
    335     Address entry = deopt_jump_table_[i].address;
    336     Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
    337     int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
    338     if (id == Deoptimizer::kNotDeoptimizationEntry) {
    339       Comment(";;; jump table entry %d.", i);
    340     } else {
    341       Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
    342     }
    343     __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
    344     if (deopt_jump_table_[i].needs_frame) {
    345       ASSERT(!info()->saves_caller_doubles());
    346       if (needs_frame.is_bound()) {
    347         __ Branch(&needs_frame);
    348       } else {
    349         __ bind(&needs_frame);
    350         __ MultiPush(cp.bit() | fp.bit() | ra.bit());
    351         // This variant of deopt can only be used with stubs. Since we don't
    352         // have a function pointer to install in the stack frame that we're
    353         // building, install a special marker there instead.
    354         ASSERT(info()->IsStub());
    355         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
    356         __ push(scratch0());
    357         __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    358         __ Call(t9);
    359       }
    360     } else {
    361       if (info()->saves_caller_doubles()) {
    362         ASSERT(info()->IsStub());
    363         RestoreCallerDoubles();
    364       }
    365       __ Call(t9);
    366     }
    367   }
    368   __ RecordComment("]");
    369 
    370   // The deoptimization jump table is the last part of the instruction
    371   // sequence. Mark the generated code as done unless we bailed out.
    372   if (!is_aborted()) status_ = DONE;
    373   return !is_aborted();
    374 }
    375 
    376 
    377 bool LCodeGen::GenerateSafepointTable() {
    378   ASSERT(is_done());
    379   safepoints_.Emit(masm(), GetStackSlotCount());
    380   return !is_aborted();
    381 }
    382 
    383 
    384 Register LCodeGen::ToRegister(int index) const {
    385   return Register::FromAllocationIndex(index);
    386 }
    387 
    388 
    389 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
    390   return DoubleRegister::FromAllocationIndex(index);
    391 }
    392 
    393 
    394 Register LCodeGen::ToRegister(LOperand* op) const {
    395   ASSERT(op->IsRegister());
    396   return ToRegister(op->index());
    397 }
    398 
    399 
    400 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    401   if (op->IsRegister()) {
    402     return ToRegister(op->index());
    403   } else if (op->IsConstantOperand()) {
    404     LConstantOperand* const_op = LConstantOperand::cast(op);
    405     HConstant* constant = chunk_->LookupConstant(const_op);
    406     Handle<Object> literal = constant->handle(isolate());
    407     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    408     if (r.IsInteger32()) {
    409       ASSERT(literal->IsNumber());
    410       __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
    411     } else if (r.IsSmi()) {
    412       ASSERT(constant->HasSmiValue());
    413       __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
    414     } else if (r.IsDouble()) {
    415       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    416     } else {
    417       ASSERT(r.IsSmiOrTagged());
    418       __ li(scratch, literal);
    419     }
    420     return scratch;
    421   } else if (op->IsStackSlot()) {
    422     __ lw(scratch, ToMemOperand(op));
    423     return scratch;
    424   }
    425   UNREACHABLE();
    426   return scratch;
    427 }
    428 
    429 
    430 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    431   ASSERT(op->IsDoubleRegister());
    432   return ToDoubleRegister(op->index());
    433 }
    434 
    435 
    436 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
    437                                                 FloatRegister flt_scratch,
    438                                                 DoubleRegister dbl_scratch) {
    439   if (op->IsDoubleRegister()) {
    440     return ToDoubleRegister(op->index());
    441   } else if (op->IsConstantOperand()) {
    442     LConstantOperand* const_op = LConstantOperand::cast(op);
    443     HConstant* constant = chunk_->LookupConstant(const_op);
    444     Handle<Object> literal = constant->handle(isolate());
    445     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    446     if (r.IsInteger32()) {
    447       ASSERT(literal->IsNumber());
    448       __ li(at, Operand(static_cast<int32_t>(literal->Number())));
    449       __ mtc1(at, flt_scratch);
    450       __ cvt_d_w(dbl_scratch, flt_scratch);
    451       return dbl_scratch;
    452     } else if (r.IsDouble()) {
    453       Abort(kUnsupportedDoubleImmediate);
    454     } else if (r.IsTagged()) {
    455       Abort(kUnsupportedTaggedImmediate);
    456     }
    457   } else if (op->IsStackSlot()) {
    458     MemOperand mem_op = ToMemOperand(op);
    459     __ ldc1(dbl_scratch, mem_op);
    460     return dbl_scratch;
    461   }
    462   UNREACHABLE();
    463   return dbl_scratch;
    464 }
    465 
    466 
    467 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    468   HConstant* constant = chunk_->LookupConstant(op);
    469   ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    470   return constant->handle(isolate());
    471 }
    472 
    473 
    474 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    475   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    476 }
    477 
    478 
    479 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    480   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    481 }
    482 
    483 
    484 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    485   return ToRepresentation(op, Representation::Integer32());
    486 }
    487 
    488 
    489 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
    490                                    const Representation& r) const {
    491   HConstant* constant = chunk_->LookupConstant(op);
    492   int32_t value = constant->Integer32Value();
    493   if (r.IsInteger32()) return value;
    494   ASSERT(r.IsSmiOrTagged());
    495   return reinterpret_cast<int32_t>(Smi::FromInt(value));
    496 }
    497 
    498 
    499 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    500   HConstant* constant = chunk_->LookupConstant(op);
    501   return Smi::FromInt(constant->Integer32Value());
    502 }
    503 
    504 
    505 double LCodeGen::ToDouble(LConstantOperand* op) const {
    506   HConstant* constant = chunk_->LookupConstant(op);
    507   ASSERT(constant->HasDoubleValue());
    508   return constant->DoubleValue();
    509 }
    510 
    511 
    512 Operand LCodeGen::ToOperand(LOperand* op) {
    513   if (op->IsConstantOperand()) {
    514     LConstantOperand* const_op = LConstantOperand::cast(op);
    515     HConstant* constant = chunk()->LookupConstant(const_op);
    516     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    517     if (r.IsSmi()) {
    518       ASSERT(constant->HasSmiValue());
    519       return Operand(Smi::FromInt(constant->Integer32Value()));
    520     } else if (r.IsInteger32()) {
    521       ASSERT(constant->HasInteger32Value());
    522       return Operand(constant->Integer32Value());
    523     } else if (r.IsDouble()) {
    524       Abort(kToOperandUnsupportedDoubleImmediate);
    525     }
    526     ASSERT(r.IsTagged());
    527     return Operand(constant->handle(isolate()));
    528   } else if (op->IsRegister()) {
    529     return Operand(ToRegister(op));
    530   } else if (op->IsDoubleRegister()) {
    531     Abort(kToOperandIsDoubleRegisterUnimplemented);
    532     return Operand(0);
    533   }
    534   // Stack slots not implemented, use ToMemOperand instead.
    535   UNREACHABLE();
    536   return Operand(0);
    537 }
    538 
    539 
    540 static int ArgumentsOffsetWithoutFrame(int index) {
    541   ASSERT(index < 0);
    542   return -(index + 1) * kPointerSize;
    543 }
    544 
    545 
    546 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    547   ASSERT(!op->IsRegister());
    548   ASSERT(!op->IsDoubleRegister());
    549   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
    550   if (NeedsEagerFrame()) {
    551     return MemOperand(fp, StackSlotOffset(op->index()));
    552   } else {
    553     // Retrieve parameter without eager stack-frame relative to the
    554     // stack-pointer.
    555     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    556   }
    557 }
    558 
    559 
    560 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    561   ASSERT(op->IsDoubleStackSlot());
    562   if (NeedsEagerFrame()) {
    563     return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
    564   } else {
    565     // Retrieve parameter without eager stack-frame relative to the
    566     // stack-pointer.
    567     return MemOperand(
    568         sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    569   }
    570 }
    571 
    572 
    573 void LCodeGen::WriteTranslation(LEnvironment* environment,
    574                                 Translation* translation) {
    575   if (environment == NULL) return;
    576 
    577   // The translation includes one command per value in the environment.
    578   int translation_size = environment->translation_size();
    579   // The output frame height does not include the parameters.
    580   int height = translation_size - environment->parameter_count();
    581 
    582   WriteTranslation(environment->outer(), translation);
    583   bool has_closure_id = !info()->closure().is_null() &&
    584       !info()->closure().is_identical_to(environment->closure());
    585   int closure_id = has_closure_id
    586       ? DefineDeoptimizationLiteral(environment->closure())
    587       : Translation::kSelfLiteralId;
    588 
    589   switch (environment->frame_type()) {
    590     case JS_FUNCTION:
    591       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
    592       break;
    593     case JS_CONSTRUCT:
    594       translation->BeginConstructStubFrame(closure_id, translation_size);
    595       break;
    596     case JS_GETTER:
    597       ASSERT(translation_size == 1);
    598       ASSERT(height == 0);
    599       translation->BeginGetterStubFrame(closure_id);
    600       break;
    601     case JS_SETTER:
    602       ASSERT(translation_size == 2);
    603       ASSERT(height == 0);
    604       translation->BeginSetterStubFrame(closure_id);
    605       break;
    606     case STUB:
    607       translation->BeginCompiledStubFrame();
    608       break;
    609     case ARGUMENTS_ADAPTOR:
    610       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
    611       break;
    612   }
    613 
    614   int object_index = 0;
    615   int dematerialized_index = 0;
    616   for (int i = 0; i < translation_size; ++i) {
    617     LOperand* value = environment->values()->at(i);
    618     AddToTranslation(environment,
    619                      translation,
    620                      value,
    621                      environment->HasTaggedValueAt(i),
    622                      environment->HasUint32ValueAt(i),
    623                      &object_index,
    624                      &dematerialized_index);
    625   }
    626 }
    627 
    628 
    629 void LCodeGen::AddToTranslation(LEnvironment* environment,
    630                                 Translation* translation,
    631                                 LOperand* op,
    632                                 bool is_tagged,
    633                                 bool is_uint32,
    634                                 int* object_index_pointer,
    635                                 int* dematerialized_index_pointer) {
    636   if (op == LEnvironment::materialization_marker()) {
    637     int object_index = (*object_index_pointer)++;
    638     if (environment->ObjectIsDuplicateAt(object_index)) {
    639       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    640       translation->DuplicateObject(dupe_of);
    641       return;
    642     }
    643     int object_length = environment->ObjectLengthAt(object_index);
    644     if (environment->ObjectIsArgumentsAt(object_index)) {
    645       translation->BeginArgumentsObject(object_length);
    646     } else {
    647       translation->BeginCapturedObject(object_length);
    648     }
    649     int dematerialized_index = *dematerialized_index_pointer;
    650     int env_offset = environment->translation_size() + dematerialized_index;
    651     *dematerialized_index_pointer += object_length;
    652     for (int i = 0; i < object_length; ++i) {
    653       LOperand* value = environment->values()->at(env_offset + i);
    654       AddToTranslation(environment,
    655                        translation,
    656                        value,
    657                        environment->HasTaggedValueAt(env_offset + i),
    658                        environment->HasUint32ValueAt(env_offset + i),
    659                        object_index_pointer,
    660                        dematerialized_index_pointer);
    661     }
    662     return;
    663   }
    664 
    665   if (op->IsStackSlot()) {
    666     if (is_tagged) {
    667       translation->StoreStackSlot(op->index());
    668     } else if (is_uint32) {
    669       translation->StoreUint32StackSlot(op->index());
    670     } else {
    671       translation->StoreInt32StackSlot(op->index());
    672     }
    673   } else if (op->IsDoubleStackSlot()) {
    674     translation->StoreDoubleStackSlot(op->index());
    675   } else if (op->IsRegister()) {
    676     Register reg = ToRegister(op);
    677     if (is_tagged) {
    678       translation->StoreRegister(reg);
    679     } else if (is_uint32) {
    680       translation->StoreUint32Register(reg);
    681     } else {
    682       translation->StoreInt32Register(reg);
    683     }
    684   } else if (op->IsDoubleRegister()) {
    685     DoubleRegister reg = ToDoubleRegister(op);
    686     translation->StoreDoubleRegister(reg);
    687   } else if (op->IsConstantOperand()) {
    688     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    689     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    690     translation->StoreLiteral(src_index);
    691   } else {
    692     UNREACHABLE();
    693   }
    694 }
    695 
    696 
    697 void LCodeGen::CallCode(Handle<Code> code,
    698                         RelocInfo::Mode mode,
    699                         LInstruction* instr) {
    700   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    701 }
    702 
    703 
    704 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    705                                RelocInfo::Mode mode,
    706                                LInstruction* instr,
    707                                SafepointMode safepoint_mode) {
    708   ASSERT(instr != NULL);
    709   __ Call(code, mode);
    710   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    711 }
    712 
    713 
    714 void LCodeGen::CallRuntime(const Runtime::Function* function,
    715                            int num_arguments,
    716                            LInstruction* instr,
    717                            SaveFPRegsMode save_doubles) {
    718   ASSERT(instr != NULL);
    719 
    720   __ CallRuntime(function, num_arguments, save_doubles);
    721 
    722   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    723 }
    724 
    725 
    726 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    727   if (context->IsRegister()) {
    728     __ Move(cp, ToRegister(context));
    729   } else if (context->IsStackSlot()) {
    730     __ lw(cp, ToMemOperand(context));
    731   } else if (context->IsConstantOperand()) {
    732     HConstant* constant =
    733         chunk_->LookupConstant(LConstantOperand::cast(context));
    734     __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
    735   } else {
    736     UNREACHABLE();
    737   }
    738 }
    739 
    740 
    741 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    742                                        int argc,
    743                                        LInstruction* instr,
    744                                        LOperand* context) {
    745   LoadContextFromDeferred(context);
    746   __ CallRuntimeSaveDoubles(id);
    747   RecordSafepointWithRegisters(
    748       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    749 }
    750 
    751 
    752 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    753                                                     Safepoint::DeoptMode mode) {
    754   environment->set_has_been_used();
    755   if (!environment->HasBeenRegistered()) {
    756     // Physical stack frame layout:
    757     // -x ............. -4  0 ..................................... y
    758     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    759 
    760     // Layout of the environment:
    761     // 0 ..................................................... size-1
    762     // [parameters] [locals] [expression stack including arguments]
    763 
    764     // Layout of the translation:
    765     // 0 ........................................................ size - 1 + 4
    766     // [expression stack including arguments] [locals] [4 words] [parameters]
    767     // |>------------  translation_size ------------<|
    768 
    769     int frame_count = 0;
    770     int jsframe_count = 0;
    771     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    772       ++frame_count;
    773       if (e->frame_type() == JS_FUNCTION) {
    774         ++jsframe_count;
    775       }
    776     }
    777     Translation translation(&translations_, frame_count, jsframe_count, zone());
    778     WriteTranslation(environment, &translation);
    779     int deoptimization_index = deoptimizations_.length();
    780     int pc_offset = masm()->pc_offset();
    781     environment->Register(deoptimization_index,
    782                           translation.index(),
    783                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    784     deoptimizations_.Add(environment, zone());
    785   }
    786 }
    787 
    788 
    789 void LCodeGen::DeoptimizeIf(Condition condition,
    790                             LEnvironment* environment,
    791                             Deoptimizer::BailoutType bailout_type,
    792                             Register src1,
    793                             const Operand& src2) {
    794   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    795   ASSERT(environment->HasBeenRegistered());
    796   int id = environment->deoptimization_index();
    797   ASSERT(info()->IsOptimizing() || info()->IsStub());
    798   Address entry =
    799       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    800   if (entry == NULL) {
    801     Abort(kBailoutWasNotPrepared);
    802     return;
    803   }
    804 
    805   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    806     Register scratch = scratch0();
    807     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    808     Label no_deopt;
    809     __ Push(a1, scratch);
    810     __ li(scratch, Operand(count));
    811     __ lw(a1, MemOperand(scratch));
    812     __ Subu(a1, a1, Operand(1));
    813     __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
    814     __ li(a1, Operand(FLAG_deopt_every_n_times));
    815     __ sw(a1, MemOperand(scratch));
    816     __ Pop(a1, scratch);
    817 
    818     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    819     __ bind(&no_deopt);
    820     __ sw(a1, MemOperand(scratch));
    821     __ Pop(a1, scratch);
    822   }
    823 
    824   if (info()->ShouldTrapOnDeopt()) {
    825     Label skip;
    826     if (condition != al) {
    827       __ Branch(&skip, NegateCondition(condition), src1, src2);
    828     }
    829     __ stop("trap_on_deopt");
    830     __ bind(&skip);
    831   }
    832 
    833   ASSERT(info()->IsStub() || frame_is_built_);
    834   // Go through jump table if we need to handle condition, build frame, or
    835   // restore caller doubles.
    836   if (condition == al && frame_is_built_ &&
    837       !info()->saves_caller_doubles()) {
    838     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
    839   } else {
    840     // We often have several deopts to the same entry, reuse the last
    841     // jump entry if this is the case.
    842     if (deopt_jump_table_.is_empty() ||
    843         (deopt_jump_table_.last().address != entry) ||
    844         (deopt_jump_table_.last().bailout_type != bailout_type) ||
    845         (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
    846       Deoptimizer::JumpTableEntry table_entry(entry,
    847                                               bailout_type,
    848                                               !frame_is_built_);
    849       deopt_jump_table_.Add(table_entry, zone());
    850     }
    851     __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
    852   }
    853 }
    854 
    855 
    856 void LCodeGen::DeoptimizeIf(Condition condition,
    857                             LEnvironment* environment,
    858                             Register src1,
    859                             const Operand& src2) {
    860   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    861       ? Deoptimizer::LAZY
    862       : Deoptimizer::EAGER;
    863   DeoptimizeIf(condition, environment, bailout_type, src1, src2);
    864 }
    865 
    866 
    867 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
    868   int length = deoptimizations_.length();
    869   if (length == 0) return;
    870   Handle<DeoptimizationInputData> data =
    871       DeoptimizationInputData::New(isolate(), length, TENURED);
    872 
    873   Handle<ByteArray> translations =
    874       translations_.CreateByteArray(isolate()->factory());
    875   data->SetTranslationByteArray(*translations);
    876   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
    877   data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
    878   if (info_->IsOptimizing()) {
    879     // Reference to shared function info does not change between phases.
    880     AllowDeferredHandleDereference allow_handle_dereference;
    881     data->SetSharedFunctionInfo(*info_->shared_info());
    882   } else {
    883     data->SetSharedFunctionInfo(Smi::FromInt(0));
    884   }
    885 
    886   Handle<FixedArray> literals =
    887       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
    888   { AllowDeferredHandleDereference copy_handles;
    889     for (int i = 0; i < deoptimization_literals_.length(); i++) {
    890       literals->set(i, *deoptimization_literals_[i]);
    891     }
    892     data->SetLiteralArray(*literals);
    893   }
    894 
    895   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
    896   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
    897 
    898   // Populate the deoptimization entries.
    899   for (int i = 0; i < length; i++) {
    900     LEnvironment* env = deoptimizations_[i];
    901     data->SetAstId(i, env->ast_id());
    902     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
    903     data->SetArgumentsStackHeight(i,
    904                                   Smi::FromInt(env->arguments_stack_height()));
    905     data->SetPc(i, Smi::FromInt(env->pc_offset()));
    906   }
    907   code->set_deoptimization_data(*data);
    908 }
    909 
    910 
    911 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
    912   int result = deoptimization_literals_.length();
    913   for (int i = 0; i < deoptimization_literals_.length(); ++i) {
    914     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
    915   }
    916   deoptimization_literals_.Add(literal, zone());
    917   return result;
    918 }
    919 
    920 
    921 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
    922   ASSERT(deoptimization_literals_.length() == 0);
    923 
    924   const ZoneList<Handle<JSFunction> >* inlined_closures =
    925       chunk()->inlined_closures();
    926 
    927   for (int i = 0, length = inlined_closures->length();
    928        i < length;
    929        i++) {
    930     DefineDeoptimizationLiteral(inlined_closures->at(i));
    931   }
    932 
    933   inlined_function_count_ = deoptimization_literals_.length();
    934 }
    935 
    936 
    937 void LCodeGen::RecordSafepointWithLazyDeopt(
    938     LInstruction* instr, SafepointMode safepoint_mode) {
    939   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    940     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    941   } else {
    942     ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    943     RecordSafepointWithRegisters(
    944         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    945   }
    946 }
    947 
    948 
    949 void LCodeGen::RecordSafepoint(
    950     LPointerMap* pointers,
    951     Safepoint::Kind kind,
    952     int arguments,
    953     Safepoint::DeoptMode deopt_mode) {
    954   ASSERT(expected_safepoint_kind_ == kind);
    955 
    956   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    957   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
    958       kind, arguments, deopt_mode);
    959   for (int i = 0; i < operands->length(); i++) {
    960     LOperand* pointer = operands->at(i);
    961     if (pointer->IsStackSlot()) {
    962       safepoint.DefinePointerSlot(pointer->index(), zone());
    963     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    964       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    965     }
    966   }
    967 }
    968 
    969 
    970 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    971                                Safepoint::DeoptMode deopt_mode) {
    972   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    973 }
    974 
    975 
    976 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    977   LPointerMap empty_pointers(zone());
    978   RecordSafepoint(&empty_pointers, deopt_mode);
    979 }
    980 
    981 
    982 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    983                                             int arguments,
    984                                             Safepoint::DeoptMode deopt_mode) {
    985   RecordSafepoint(
    986       pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    987 }
    988 
    989 
    990 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
    991     LPointerMap* pointers,
    992     int arguments,
    993     Safepoint::DeoptMode deopt_mode) {
    994   RecordSafepoint(
    995       pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
    996 }
    997 
    998 
    999 void LCodeGen::RecordAndWritePosition(int position) {
   1000   if (position == RelocInfo::kNoPosition) return;
   1001   masm()->positions_recorder()->RecordPosition(position);
   1002   masm()->positions_recorder()->WriteRecordedPositions();
   1003 }
   1004 
   1005 
   1006 static const char* LabelType(LLabel* label) {
   1007   if (label->is_loop_header()) return " (loop header)";
   1008   if (label->is_osr_entry()) return " (OSR entry)";
   1009   return "";
   1010 }
   1011 
   1012 
   1013 void LCodeGen::DoLabel(LLabel* label) {
   1014   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
   1015           current_instruction_,
   1016           label->hydrogen_value()->id(),
   1017           label->block_id(),
   1018           LabelType(label));
   1019   __ bind(label->label());
   1020   current_block_ = label->block_id();
   1021   DoGap(label);
   1022 }
   1023 
   1024 
   1025 void LCodeGen::DoParallelMove(LParallelMove* move) {
   1026   resolver_.Resolve(move);
   1027 }
   1028 
   1029 
   1030 void LCodeGen::DoGap(LGap* gap) {
   1031   for (int i = LGap::FIRST_INNER_POSITION;
   1032        i <= LGap::LAST_INNER_POSITION;
   1033        i++) {
   1034     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
   1035     LParallelMove* move = gap->GetParallelMove(inner_pos);
   1036     if (move != NULL) DoParallelMove(move);
   1037   }
   1038 }
   1039 
   1040 
   1041 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
   1042   DoGap(instr);
   1043 }
   1044 
   1045 
   1046 void LCodeGen::DoParameter(LParameter* instr) {
   1047   // Nothing to do.
   1048 }
   1049 
   1050 
   1051 void LCodeGen::DoCallStub(LCallStub* instr) {
   1052   ASSERT(ToRegister(instr->context()).is(cp));
   1053   ASSERT(ToRegister(instr->result()).is(v0));
   1054   switch (instr->hydrogen()->major_key()) {
   1055     case CodeStub::RegExpExec: {
   1056       RegExpExecStub stub(isolate());
   1057       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1058       break;
   1059     }
   1060     case CodeStub::SubString: {
   1061       SubStringStub stub(isolate());
   1062       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1063       break;
   1064     }
   1065     case CodeStub::StringCompare: {
   1066       StringCompareStub stub(isolate());
   1067       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1068       break;
   1069     }
   1070     default:
   1071       UNREACHABLE();
   1072   }
   1073 }
   1074 
   1075 
   1076 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   1077   GenerateOsrPrologue();
   1078 }
   1079 
   1080 
   1081 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   1082   Register dividend = ToRegister(instr->dividend());
   1083   int32_t divisor = instr->divisor();
   1084   ASSERT(dividend.is(ToRegister(instr->result())));
   1085 
   1086   // Theoretically, a variation of the branch-free code for integer division by
   1087   // a power of 2 (calculating the remainder via an additional multiplication
   1088   // (which gets simplified to an 'and') and subtraction) should be faster, and
   1089   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
   1090   // indicate that positive dividends are heavily favored, so the branching
   1091   // version performs better.
   1092   HMod* hmod = instr->hydrogen();
   1093   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1094   Label dividend_is_not_negative, done;
   1095 
   1096   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
   1097     __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
   1098     // Note: The code below even works when right contains kMinInt.
   1099     __ subu(dividend, zero_reg, dividend);
   1100     __ And(dividend, dividend, Operand(mask));
   1101     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1102       DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
   1103     }
   1104     __ Branch(USE_DELAY_SLOT, &done);
   1105     __ subu(dividend, zero_reg, dividend);
   1106   }
   1107 
   1108   __ bind(&dividend_is_not_negative);
   1109   __ And(dividend, dividend, Operand(mask));
   1110   __ bind(&done);
   1111 }
   1112 
   1113 
   1114 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   1115   Register dividend = ToRegister(instr->dividend());
   1116   int32_t divisor = instr->divisor();
   1117   Register result = ToRegister(instr->result());
   1118   ASSERT(!dividend.is(result));
   1119 
   1120   if (divisor == 0) {
   1121     DeoptimizeIf(al, instr->environment());
   1122     return;
   1123   }
   1124 
   1125   __ TruncatingDiv(result, dividend, Abs(divisor));
   1126   __ Mul(result, result, Operand(Abs(divisor)));
   1127   __ Subu(result, dividend, Operand(result));
   1128 
   1129   // Check for negative zero.
   1130   HMod* hmod = instr->hydrogen();
   1131   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1132     Label remainder_not_zero;
   1133     __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
   1134     DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
   1135     __ bind(&remainder_not_zero);
   1136   }
   1137 }
   1138 
   1139 
   1140 void LCodeGen::DoModI(LModI* instr) {
   1141   HMod* hmod = instr->hydrogen();
   1142   const Register left_reg = ToRegister(instr->left());
   1143   const Register right_reg = ToRegister(instr->right());
   1144   const Register result_reg = ToRegister(instr->result());
   1145 
   1146   // div runs in the background while we check for special cases.
   1147   __ div(left_reg, right_reg);
   1148 
   1149   Label done;
   1150   // Check for x % 0, we have to deopt in this case because we can't return a
   1151   // NaN.
   1152   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1153     DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
   1154   }
   1155 
   1156   // Check for kMinInt % -1, div will return kMinInt, which is not what we
   1157   // want. We have to deopt if we care about -0, because we can't return that.
   1158   if (hmod->CheckFlag(HValue::kCanOverflow)) {
   1159     Label no_overflow_possible;
   1160     __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
   1161     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1162       DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
   1163     } else {
   1164       __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
   1165       __ Branch(USE_DELAY_SLOT, &done);
   1166       __ mov(result_reg, zero_reg);
   1167     }
   1168     __ bind(&no_overflow_possible);
   1169   }
   1170 
   1171   // If we care about -0, test if the dividend is <0 and the result is 0.
   1172   __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
   1173   __ mfhi(result_reg);
   1174   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1175     DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
   1176   }
   1177   __ bind(&done);
   1178 }
   1179 
   1180 
   1181 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1182   Register dividend = ToRegister(instr->dividend());
   1183   int32_t divisor = instr->divisor();
   1184   Register result = ToRegister(instr->result());
   1185   ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
   1186   ASSERT(!result.is(dividend));
   1187 
   1188   // Check for (0 / -x) that will produce negative zero.
   1189   HDiv* hdiv = instr->hydrogen();
   1190   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1191     DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
   1192   }
   1193   // Check for (kMinInt / -1).
   1194   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1195     DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
   1196   }
   1197   // Deoptimize if remainder will not be 0.
   1198   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   1199       divisor != 1 && divisor != -1) {
   1200     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1201     __ And(at, dividend, Operand(mask));
   1202     DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
   1203   }
   1204 
   1205   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   1206     __ Subu(result, zero_reg, dividend);
   1207     return;
   1208   }
   1209   uint16_t shift = WhichPowerOf2Abs(divisor);
   1210   if (shift == 0) {
   1211     __ Move(result, dividend);
   1212   } else if (shift == 1) {
   1213     __ srl(result, dividend, 31);
   1214     __ Addu(result, dividend, Operand(result));
   1215   } else {
   1216     __ sra(result, dividend, 31);
   1217     __ srl(result, result, 32 - shift);
   1218     __ Addu(result, dividend, Operand(result));
   1219   }
   1220   if (shift > 0) __ sra(result, result, shift);
   1221   if (divisor < 0) __ Subu(result, zero_reg, result);
   1222 }
   1223 
   1224 
   1225 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1226   Register dividend = ToRegister(instr->dividend());
   1227   int32_t divisor = instr->divisor();
   1228   Register result = ToRegister(instr->result());
   1229   ASSERT(!dividend.is(result));
   1230 
   1231   if (divisor == 0) {
   1232     DeoptimizeIf(al, instr->environment());
   1233     return;
   1234   }
   1235 
   1236   // Check for (0 / -x) that will produce negative zero.
   1237   HDiv* hdiv = instr->hydrogen();
   1238   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1239     DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
   1240   }
   1241 
   1242   __ TruncatingDiv(result, dividend, Abs(divisor));
   1243   if (divisor < 0) __ Subu(result, zero_reg, result);
   1244 
   1245   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1246     __ Mul(scratch0(), result, Operand(divisor));
   1247     __ Subu(scratch0(), scratch0(), dividend);
   1248     DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
   1249   }
   1250 }
   1251 
   1252 
   1253 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1254 void LCodeGen::DoDivI(LDivI* instr) {
   1255   HBinaryOperation* hdiv = instr->hydrogen();
   1256   Register dividend = ToRegister(instr->dividend());
   1257   Register divisor = ToRegister(instr->divisor());
   1258   const Register result = ToRegister(instr->result());
   1259 
   1260   // On MIPS div is asynchronous - it will run in the background while we
   1261   // check for special cases.
   1262   __ div(dividend, divisor);
   1263 
   1264   // Check for x / 0.
   1265   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1266     DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
   1267   }
   1268 
   1269   // Check for (0 / -x) that will produce negative zero.
   1270   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1271     Label left_not_zero;
   1272     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
   1273     DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
   1274     __ bind(&left_not_zero);
   1275   }
   1276 
   1277   // Check for (kMinInt / -1).
   1278   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1279       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1280     Label left_not_min_int;
   1281     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
   1282     DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
   1283     __ bind(&left_not_min_int);
   1284   }
   1285 
   1286   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1287     __ mfhi(result);
   1288     DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
   1289     __ mflo(result);
   1290   } else {
   1291     __ mflo(result);
   1292   }
   1293 }
   1294 
   1295 
   1296 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1297   DoubleRegister addend = ToDoubleRegister(instr->addend());
   1298   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1299   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1300 
   1301   // This is computed in-place.
   1302   ASSERT(addend.is(ToDoubleRegister(instr->result())));
   1303 
   1304   __ madd_d(addend, addend, multiplier, multiplicand);
   1305 }
   1306 
   1307 
   1308 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1309   Register dividend = ToRegister(instr->dividend());
   1310   Register result = ToRegister(instr->result());
   1311   int32_t divisor = instr->divisor();
   1312   Register scratch = result.is(dividend) ? scratch0() : dividend;
   1313   ASSERT(!result.is(dividend) || !scratch.is(dividend));
   1314 
   1315   // If the divisor is 1, return the dividend.
   1316   if (divisor == 1) {
   1317     __ Move(result, dividend);
   1318     return;
   1319   }
   1320 
   1321   // If the divisor is positive, things are easy: There can be no deopts and we
   1322   // can simply do an arithmetic right shift.
   1323   uint16_t shift = WhichPowerOf2Abs(divisor);
   1324   if (divisor > 1) {
   1325     __ sra(result, dividend, shift);
   1326     return;
   1327   }
   1328 
   1329   // If the divisor is negative, we have to negate and handle edge cases.
   1330 
   1331   // dividend can be the same register as result so save the value of it
   1332   // for checking overflow.
   1333   __ Move(scratch, dividend);
   1334 
   1335   __ Subu(result, zero_reg, dividend);
   1336   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1337     DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
   1338   }
   1339 
   1340   // Dividing by -1 is basically negation, unless we overflow.
   1341   __ Xor(scratch, scratch, result);
   1342   if (divisor == -1) {
   1343     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1344       DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg));
   1345     }
   1346     return;
   1347   }
   1348 
   1349   // If the negation could not overflow, simply shifting is OK.
   1350   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1351     __ sra(result, result, shift);
   1352     return;
   1353   }
   1354 
   1355   Label no_overflow, done;
   1356   __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
   1357   __ li(result, Operand(kMinInt / divisor));
   1358   __ Branch(&done);
   1359   __ bind(&no_overflow);
   1360   __ sra(result, result, shift);
   1361   __ bind(&done);
   1362 }
   1363 
   1364 
   1365 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1366   Register dividend = ToRegister(instr->dividend());
   1367   int32_t divisor = instr->divisor();
   1368   Register result = ToRegister(instr->result());
   1369   ASSERT(!dividend.is(result));
   1370 
   1371   if (divisor == 0) {
   1372     DeoptimizeIf(al, instr->environment());
   1373     return;
   1374   }
   1375 
   1376   // Check for (0 / -x) that will produce negative zero.
   1377   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1378   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1379     DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
   1380   }
   1381 
   1382   // Easy case: We need no dynamic check for the dividend and the flooring
   1383   // division is the same as the truncating division.
   1384   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1385       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1386     __ TruncatingDiv(result, dividend, Abs(divisor));
   1387     if (divisor < 0) __ Subu(result, zero_reg, result);
   1388     return;
   1389   }
   1390 
   1391   // In the general case we may need to adjust before and after the truncating
   1392   // division to get a flooring division.
   1393   Register temp = ToRegister(instr->temp());
   1394   ASSERT(!temp.is(dividend) && !temp.is(result));
   1395   Label needs_adjustment, done;
   1396   __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
   1397             dividend, Operand(zero_reg));
   1398   __ TruncatingDiv(result, dividend, Abs(divisor));
   1399   if (divisor < 0) __ Subu(result, zero_reg, result);
   1400   __ jmp(&done);
   1401   __ bind(&needs_adjustment);
   1402   __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   1403   __ TruncatingDiv(result, temp, Abs(divisor));
   1404   if (divisor < 0) __ Subu(result, zero_reg, result);
   1405   __ Subu(result, result, Operand(1));
   1406   __ bind(&done);
   1407 }
   1408 
   1409 
   1410 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1411 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1412   HBinaryOperation* hdiv = instr->hydrogen();
   1413   Register dividend = ToRegister(instr->dividend());
   1414   Register divisor = ToRegister(instr->divisor());
   1415   const Register result = ToRegister(instr->result());
   1416 
   1417   // On MIPS div is asynchronous - it will run in the background while we
   1418   // check for special cases.
   1419   __ div(dividend, divisor);
   1420 
   1421   // Check for x / 0.
   1422   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1423     DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
   1424   }
   1425 
   1426   // Check for (0 / -x) that will produce negative zero.
   1427   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1428     Label left_not_zero;
   1429     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
   1430     DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
   1431     __ bind(&left_not_zero);
   1432   }
   1433 
   1434   // Check for (kMinInt / -1).
   1435   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1436       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1437     Label left_not_min_int;
   1438     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
   1439     DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
   1440     __ bind(&left_not_min_int);
   1441   }
   1442 
   1443   // We performed a truncating division. Correct the result if necessary.
   1444   Label done;
   1445   Register remainder = scratch0();
   1446   __ mfhi(remainder);
   1447   __ mflo(result);
   1448   __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
   1449   __ Xor(remainder, remainder, Operand(divisor));
   1450   __ Branch(&done, ge, remainder, Operand(zero_reg));
   1451   __ Subu(result, result, Operand(1));
   1452   __ bind(&done);
   1453 }
   1454 
   1455 
   1456 void LCodeGen::DoMulI(LMulI* instr) {
   1457   Register scratch = scratch0();
   1458   Register result = ToRegister(instr->result());
   1459   // Note that result may alias left.
   1460   Register left = ToRegister(instr->left());
   1461   LOperand* right_op = instr->right();
   1462 
   1463   bool bailout_on_minus_zero =
   1464     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1465   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1466 
   1467   if (right_op->IsConstantOperand()) {
   1468     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1469 
   1470     if (bailout_on_minus_zero && (constant < 0)) {
   1471       // The case of a null constant will be handled separately.
   1472       // If constant is negative and left is null, the result should be -0.
   1473       DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
   1474     }
   1475 
   1476     switch (constant) {
   1477       case -1:
   1478         if (overflow) {
   1479           __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
   1480           DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
   1481         } else {
   1482           __ Subu(result, zero_reg, left);
   1483         }
   1484         break;
   1485       case 0:
   1486         if (bailout_on_minus_zero) {
   1487           // If left is strictly negative and the constant is null, the
   1488           // result is -0. Deoptimize if required, otherwise return 0.
   1489           DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
   1490         }
   1491         __ mov(result, zero_reg);
   1492         break;
   1493       case 1:
   1494         // Nothing to do.
   1495         __ Move(result, left);
   1496         break;
   1497       default:
   1498         // Multiplying by powers of two and powers of two plus or minus
   1499         // one can be done faster with shifted operands.
   1500         // For other constants we emit standard code.
   1501         int32_t mask = constant >> 31;
   1502         uint32_t constant_abs = (constant + mask) ^ mask;
   1503 
   1504         if (IsPowerOf2(constant_abs)) {
   1505           int32_t shift = WhichPowerOf2(constant_abs);
   1506           __ sll(result, left, shift);
   1507           // Correct the sign of the result if the constant is negative.
   1508           if (constant < 0)  __ Subu(result, zero_reg, result);
   1509         } else if (IsPowerOf2(constant_abs - 1)) {
   1510           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1511           __ sll(scratch, left, shift);
   1512           __ Addu(result, scratch, left);
   1513           // Correct the sign of the result if the constant is negative.
   1514           if (constant < 0)  __ Subu(result, zero_reg, result);
   1515         } else if (IsPowerOf2(constant_abs + 1)) {
   1516           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1517           __ sll(scratch, left, shift);
   1518           __ Subu(result, scratch, left);
   1519           // Correct the sign of the result if the constant is negative.
   1520           if (constant < 0)  __ Subu(result, zero_reg, result);
   1521         } else {
   1522           // Generate standard code.
   1523           __ li(at, constant);
   1524           __ Mul(result, left, at);
   1525         }
   1526     }
   1527 
   1528   } else {
   1529     ASSERT(right_op->IsRegister());
   1530     Register right = ToRegister(right_op);
   1531 
   1532     if (overflow) {
   1533       // hi:lo = left * right.
   1534       if (instr->hydrogen()->representation().IsSmi()) {
   1535         __ SmiUntag(result, left);
   1536         __ mult(result, right);
   1537         __ mfhi(scratch);
   1538         __ mflo(result);
   1539       } else {
   1540         __ mult(left, right);
   1541         __ mfhi(scratch);
   1542         __ mflo(result);
   1543       }
   1544       __ sra(at, result, 31);
   1545       DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
   1546     } else {
   1547       if (instr->hydrogen()->representation().IsSmi()) {
   1548         __ SmiUntag(result, left);
   1549         __ Mul(result, result, right);
   1550       } else {
   1551         __ Mul(result, left, right);
   1552       }
   1553     }
   1554 
   1555     if (bailout_on_minus_zero) {
   1556       Label done;
   1557       __ Xor(at, left, right);
   1558       __ Branch(&done, ge, at, Operand(zero_reg));
   1559       // Bail out if the result is minus zero.
   1560       DeoptimizeIf(eq,
   1561                    instr->environment(),
   1562                    result,
   1563                    Operand(zero_reg));
   1564       __ bind(&done);
   1565     }
   1566   }
   1567 }
   1568 
   1569 
   1570 void LCodeGen::DoBitI(LBitI* instr) {
   1571   LOperand* left_op = instr->left();
   1572   LOperand* right_op = instr->right();
   1573   ASSERT(left_op->IsRegister());
   1574   Register left = ToRegister(left_op);
   1575   Register result = ToRegister(instr->result());
   1576   Operand right(no_reg);
   1577 
   1578   if (right_op->IsStackSlot()) {
   1579     right = Operand(EmitLoadRegister(right_op, at));
   1580   } else {
   1581     ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
   1582     right = ToOperand(right_op);
   1583   }
   1584 
   1585   switch (instr->op()) {
   1586     case Token::BIT_AND:
   1587       __ And(result, left, right);
   1588       break;
   1589     case Token::BIT_OR:
   1590       __ Or(result, left, right);
   1591       break;
   1592     case Token::BIT_XOR:
   1593       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
   1594         __ Nor(result, zero_reg, left);
   1595       } else {
   1596         __ Xor(result, left, right);
   1597       }
   1598       break;
   1599     default:
   1600       UNREACHABLE();
   1601       break;
   1602   }
   1603 }
   1604 
   1605 
   1606 void LCodeGen::DoShiftI(LShiftI* instr) {
   1607   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1608   // result may alias either of them.
   1609   LOperand* right_op = instr->right();
   1610   Register left = ToRegister(instr->left());
   1611   Register result = ToRegister(instr->result());
   1612   Register scratch = scratch0();
   1613 
   1614   if (right_op->IsRegister()) {
   1615     // No need to mask the right operand on MIPS, it is built into the variable
   1616     // shift instructions.
   1617     switch (instr->op()) {
   1618       case Token::ROR:
   1619         __ Ror(result, left, Operand(ToRegister(right_op)));
   1620         break;
   1621       case Token::SAR:
   1622         __ srav(result, left, ToRegister(right_op));
   1623         break;
   1624       case Token::SHR:
   1625         __ srlv(result, left, ToRegister(right_op));
   1626         if (instr->can_deopt()) {
   1627           DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
   1628         }
   1629         break;
   1630       case Token::SHL:
   1631         __ sllv(result, left, ToRegister(right_op));
   1632         break;
   1633       default:
   1634         UNREACHABLE();
   1635         break;
   1636     }
   1637   } else {
   1638     // Mask the right_op operand.
   1639     int value = ToInteger32(LConstantOperand::cast(right_op));
   1640     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1641     switch (instr->op()) {
   1642       case Token::ROR:
   1643         if (shift_count != 0) {
   1644           __ Ror(result, left, Operand(shift_count));
   1645         } else {
   1646           __ Move(result, left);
   1647         }
   1648         break;
   1649       case Token::SAR:
   1650         if (shift_count != 0) {
   1651           __ sra(result, left, shift_count);
   1652         } else {
   1653           __ Move(result, left);
   1654         }
   1655         break;
   1656       case Token::SHR:
   1657         if (shift_count != 0) {
   1658           __ srl(result, left, shift_count);
   1659         } else {
   1660           if (instr->can_deopt()) {
   1661             __ And(at, left, Operand(0x80000000));
   1662             DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
   1663           }
   1664           __ Move(result, left);
   1665         }
   1666         break;
   1667       case Token::SHL:
   1668         if (shift_count != 0) {
   1669           if (instr->hydrogen_value()->representation().IsSmi() &&
   1670               instr->can_deopt()) {
   1671             if (shift_count != 1) {
   1672               __ sll(result, left, shift_count - 1);
   1673               __ SmiTagCheckOverflow(result, result, scratch);
   1674             } else {
   1675               __ SmiTagCheckOverflow(result, left, scratch);
   1676             }
   1677             DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
   1678           } else {
   1679             __ sll(result, left, shift_count);
   1680           }
   1681         } else {
   1682           __ Move(result, left);
   1683         }
   1684         break;
   1685       default:
   1686         UNREACHABLE();
   1687         break;
   1688     }
   1689   }
   1690 }
   1691 
   1692 
   1693 void LCodeGen::DoSubI(LSubI* instr) {
   1694   LOperand* left = instr->left();
   1695   LOperand* right = instr->right();
   1696   LOperand* result = instr->result();
   1697   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1698 
   1699   if (!can_overflow) {
   1700     if (right->IsStackSlot()) {
   1701       Register right_reg = EmitLoadRegister(right, at);
   1702       __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
   1703     } else {
   1704       ASSERT(right->IsRegister() || right->IsConstantOperand());
   1705       __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
   1706     }
   1707   } else {  // can_overflow.
   1708     Register overflow = scratch0();
   1709     Register scratch = scratch1();
   1710     if (right->IsStackSlot() || right->IsConstantOperand()) {
   1711       Register right_reg = EmitLoadRegister(right, scratch);
   1712       __ SubuAndCheckForOverflow(ToRegister(result),
   1713                                  ToRegister(left),
   1714                                  right_reg,
   1715                                  overflow);  // Reg at also used as scratch.
   1716     } else {
   1717       ASSERT(right->IsRegister());
   1718       // Due to overflow check macros not supporting constant operands,
   1719       // handling the IsConstantOperand case was moved to prev if clause.
   1720       __ SubuAndCheckForOverflow(ToRegister(result),
   1721                                  ToRegister(left),
   1722                                  ToRegister(right),
   1723                                  overflow);  // Reg at also used as scratch.
   1724     }
   1725     DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
   1726   }
   1727 }
   1728 
   1729 
   1730 void LCodeGen::DoConstantI(LConstantI* instr) {
   1731   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1732 }
   1733 
   1734 
   1735 void LCodeGen::DoConstantS(LConstantS* instr) {
   1736   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1737 }
   1738 
   1739 
   1740 void LCodeGen::DoConstantD(LConstantD* instr) {
   1741   ASSERT(instr->result()->IsDoubleRegister());
   1742   DoubleRegister result = ToDoubleRegister(instr->result());
   1743   double v = instr->value();
   1744   __ Move(result, v);
   1745 }
   1746 
   1747 
   1748 void LCodeGen::DoConstantE(LConstantE* instr) {
   1749   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1750 }
   1751 
   1752 
   1753 void LCodeGen::DoConstantT(LConstantT* instr) {
   1754   Handle<Object> object = instr->value(isolate());
   1755   AllowDeferredHandleDereference smi_check;
   1756   __ li(ToRegister(instr->result()), object);
   1757 }
   1758 
   1759 
   1760 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1761   Register result = ToRegister(instr->result());
   1762   Register map = ToRegister(instr->value());
   1763   __ EnumLength(result, map);
   1764 }
   1765 
   1766 
   1767 void LCodeGen::DoDateField(LDateField* instr) {
   1768   Register object = ToRegister(instr->date());
   1769   Register result = ToRegister(instr->result());
   1770   Register scratch = ToRegister(instr->temp());
   1771   Smi* index = instr->index();
   1772   Label runtime, done;
   1773   ASSERT(object.is(a0));
   1774   ASSERT(result.is(v0));
   1775   ASSERT(!scratch.is(scratch0()));
   1776   ASSERT(!scratch.is(object));
   1777 
   1778   __ SmiTst(object, at);
   1779   DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   1780   __ GetObjectType(object, scratch, scratch);
   1781   DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
   1782 
   1783   if (index->value() == 0) {
   1784     __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
   1785   } else {
   1786     if (index->value() < JSDate::kFirstUncachedField) {
   1787       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
   1788       __ li(scratch, Operand(stamp));
   1789       __ lw(scratch, MemOperand(scratch));
   1790       __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
   1791       __ Branch(&runtime, ne, scratch, Operand(scratch0()));
   1792       __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
   1793                                             kPointerSize * index->value()));
   1794       __ jmp(&done);
   1795     }
   1796     __ bind(&runtime);
   1797     __ PrepareCallCFunction(2, scratch);
   1798     __ li(a1, Operand(index));
   1799     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
   1800     __ bind(&done);
   1801   }
   1802 }
   1803 
   1804 
   1805 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
   1806                                            LOperand* index,
   1807                                            String::Encoding encoding) {
   1808   if (index->IsConstantOperand()) {
   1809     int offset = ToInteger32(LConstantOperand::cast(index));
   1810     if (encoding == String::TWO_BYTE_ENCODING) {
   1811       offset *= kUC16Size;
   1812     }
   1813     STATIC_ASSERT(kCharSize == 1);
   1814     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1815   }
   1816   Register scratch = scratch0();
   1817   ASSERT(!scratch.is(string));
   1818   ASSERT(!scratch.is(ToRegister(index)));
   1819   if (encoding == String::ONE_BYTE_ENCODING) {
   1820     __ Addu(scratch, string, ToRegister(index));
   1821   } else {
   1822     STATIC_ASSERT(kUC16Size == 2);
   1823     __ sll(scratch, ToRegister(index), 1);
   1824     __ Addu(scratch, string, scratch);
   1825   }
   1826   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1827 }
   1828 
   1829 
   1830 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1831   String::Encoding encoding = instr->hydrogen()->encoding();
   1832   Register string = ToRegister(instr->string());
   1833   Register result = ToRegister(instr->result());
   1834 
   1835   if (FLAG_debug_code) {
   1836     Register scratch = scratch0();
   1837     __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   1838     __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1839 
   1840     __ And(scratch, scratch,
   1841            Operand(kStringRepresentationMask | kStringEncodingMask));
   1842     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1843     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1844     __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
   1845                                 ? one_byte_seq_type : two_byte_seq_type));
   1846     __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
   1847   }
   1848 
   1849   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1850   if (encoding == String::ONE_BYTE_ENCODING) {
   1851     __ lbu(result, operand);
   1852   } else {
   1853     __ lhu(result, operand);
   1854   }
   1855 }
   1856 
   1857 
   1858 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1859   String::Encoding encoding = instr->hydrogen()->encoding();
   1860   Register string = ToRegister(instr->string());
   1861   Register value = ToRegister(instr->value());
   1862 
   1863   if (FLAG_debug_code) {
   1864     Register scratch = scratch0();
   1865     Register index = ToRegister(instr->index());
   1866     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1867     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1868     int encoding_mask =
   1869         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1870         ? one_byte_seq_type : two_byte_seq_type;
   1871     __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
   1872   }
   1873 
   1874   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1875   if (encoding == String::ONE_BYTE_ENCODING) {
   1876     __ sb(value, operand);
   1877   } else {
   1878     __ sh(value, operand);
   1879   }
   1880 }
   1881 
   1882 
   1883 void LCodeGen::DoAddI(LAddI* instr) {
   1884   LOperand* left = instr->left();
   1885   LOperand* right = instr->right();
   1886   LOperand* result = instr->result();
   1887   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1888 
   1889   if (!can_overflow) {
   1890     if (right->IsStackSlot()) {
   1891       Register right_reg = EmitLoadRegister(right, at);
   1892       __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
   1893     } else {
   1894       ASSERT(right->IsRegister() || right->IsConstantOperand());
   1895       __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
   1896     }
   1897   } else {  // can_overflow.
   1898     Register overflow = scratch0();
   1899     Register scratch = scratch1();
   1900     if (right->IsStackSlot() ||
   1901         right->IsConstantOperand()) {
   1902       Register right_reg = EmitLoadRegister(right, scratch);
   1903       __ AdduAndCheckForOverflow(ToRegister(result),
   1904                                  ToRegister(left),
   1905                                  right_reg,
   1906                                  overflow);  // Reg at also used as scratch.
   1907     } else {
   1908       ASSERT(right->IsRegister());
   1909       // Due to overflow check macros not supporting constant operands,
   1910       // handling the IsConstantOperand case was moved to prev if clause.
   1911       __ AdduAndCheckForOverflow(ToRegister(result),
   1912                                  ToRegister(left),
   1913                                  ToRegister(right),
   1914                                  overflow);  // Reg at also used as scratch.
   1915     }
   1916     DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
   1917   }
   1918 }
   1919 
   1920 
   1921 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1922   LOperand* left = instr->left();
   1923   LOperand* right = instr->right();
   1924   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1925   Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
   1926   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1927     Register left_reg = ToRegister(left);
   1928     Register right_reg = EmitLoadRegister(right, scratch0());
   1929     Register result_reg = ToRegister(instr->result());
   1930     Label return_right, done;
   1931     Register scratch = scratch1();
   1932     __ Slt(scratch, left_reg, Operand(right_reg));
   1933     if (condition == ge) {
   1934      __  Movz(result_reg, left_reg, scratch);
   1935      __  Movn(result_reg, right_reg, scratch);
   1936     } else {
   1937      ASSERT(condition == le);
   1938      __  Movn(result_reg, left_reg, scratch);
   1939      __  Movz(result_reg, right_reg, scratch);
   1940     }
   1941   } else {
   1942     ASSERT(instr->hydrogen()->representation().IsDouble());
   1943     FPURegister left_reg = ToDoubleRegister(left);
   1944     FPURegister right_reg = ToDoubleRegister(right);
   1945     FPURegister result_reg = ToDoubleRegister(instr->result());
   1946     Label check_nan_left, check_zero, return_left, return_right, done;
   1947     __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
   1948     __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
   1949     __ Branch(&return_right);
   1950 
   1951     __ bind(&check_zero);
   1952     // left == right != 0.
   1953     __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
   1954     // At this point, both left and right are either 0 or -0.
   1955     if (operation == HMathMinMax::kMathMin) {
   1956       __ neg_d(left_reg, left_reg);
   1957       __ sub_d(result_reg, left_reg, right_reg);
   1958       __ neg_d(result_reg, result_reg);
   1959     } else {
   1960       __ add_d(result_reg, left_reg, right_reg);
   1961     }
   1962     __ Branch(&done);
   1963 
   1964     __ bind(&check_nan_left);
   1965     // left == NaN.
   1966     __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
   1967     __ bind(&return_right);
   1968     if (!right_reg.is(result_reg)) {
   1969       __ mov_d(result_reg, right_reg);
   1970     }
   1971     __ Branch(&done);
   1972 
   1973     __ bind(&return_left);
   1974     if (!left_reg.is(result_reg)) {
   1975       __ mov_d(result_reg, left_reg);
   1976     }
   1977     __ bind(&done);
   1978   }
   1979 }
   1980 
   1981 
   1982 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1983   DoubleRegister left = ToDoubleRegister(instr->left());
   1984   DoubleRegister right = ToDoubleRegister(instr->right());
   1985   DoubleRegister result = ToDoubleRegister(instr->result());
   1986   switch (instr->op()) {
   1987     case Token::ADD:
   1988       __ add_d(result, left, right);
   1989       break;
   1990     case Token::SUB:
   1991       __ sub_d(result, left, right);
   1992       break;
   1993     case Token::MUL:
   1994       __ mul_d(result, left, right);
   1995       break;
   1996     case Token::DIV:
   1997       __ div_d(result, left, right);
   1998       break;
   1999     case Token::MOD: {
   2000       // Save a0-a3 on the stack.
   2001       RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
   2002       __ MultiPush(saved_regs);
   2003 
   2004       __ PrepareCallCFunction(0, 2, scratch0());
   2005       __ MovToFloatParameters(left, right);
   2006       __ CallCFunction(
   2007           ExternalReference::mod_two_doubles_operation(isolate()),
   2008           0, 2);
   2009       // Move the result in the double result register.
   2010       __ MovFromFloatResult(result);
   2011 
   2012       // Restore saved register.
   2013       __ MultiPop(saved_regs);
   2014       break;
   2015     }
   2016     default:
   2017       UNREACHABLE();
   2018       break;
   2019   }
   2020 }
   2021 
   2022 
   2023 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   2024   ASSERT(ToRegister(instr->context()).is(cp));
   2025   ASSERT(ToRegister(instr->left()).is(a1));
   2026   ASSERT(ToRegister(instr->right()).is(a0));
   2027   ASSERT(ToRegister(instr->result()).is(v0));
   2028 
   2029   BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
   2030   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2031   // Other arch use a nop here, to signal that there is no inlined
   2032   // patchable code. Mips does not need the nop, since our marker
   2033   // instruction (andi zero_reg) will never be used in normal code.
   2034 }
   2035 
   2036 
   2037 template<class InstrType>
   2038 void LCodeGen::EmitBranch(InstrType instr,
   2039                           Condition condition,
   2040                           Register src1,
   2041                           const Operand& src2) {
   2042   int left_block = instr->TrueDestination(chunk_);
   2043   int right_block = instr->FalseDestination(chunk_);
   2044 
   2045   int next_block = GetNextEmittedBlock();
   2046   if (right_block == left_block || condition == al) {
   2047     EmitGoto(left_block);
   2048   } else if (left_block == next_block) {
   2049     __ Branch(chunk_->GetAssemblyLabel(right_block),
   2050               NegateCondition(condition), src1, src2);
   2051   } else if (right_block == next_block) {
   2052     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
   2053   } else {
   2054     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
   2055     __ Branch(chunk_->GetAssemblyLabel(right_block));
   2056   }
   2057 }
   2058 
   2059 
   2060 template<class InstrType>
   2061 void LCodeGen::EmitBranchF(InstrType instr,
   2062                            Condition condition,
   2063                            FPURegister src1,
   2064                            FPURegister src2) {
   2065   int right_block = instr->FalseDestination(chunk_);
   2066   int left_block = instr->TrueDestination(chunk_);
   2067 
   2068   int next_block = GetNextEmittedBlock();
   2069   if (right_block == left_block) {
   2070     EmitGoto(left_block);
   2071   } else if (left_block == next_block) {
   2072     __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
   2073                NegateCondition(condition), src1, src2);
   2074   } else if (right_block == next_block) {
   2075     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
   2076                condition, src1, src2);
   2077   } else {
   2078     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
   2079                condition, src1, src2);
   2080     __ Branch(chunk_->GetAssemblyLabel(right_block));
   2081   }
   2082 }
   2083 
   2084 
   2085 template<class InstrType>
   2086 void LCodeGen::EmitFalseBranch(InstrType instr,
   2087                                Condition condition,
   2088                                Register src1,
   2089                                const Operand& src2) {
   2090   int false_block = instr->FalseDestination(chunk_);
   2091   __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
   2092 }
   2093 
   2094 
   2095 template<class InstrType>
   2096 void LCodeGen::EmitFalseBranchF(InstrType instr,
   2097                                 Condition condition,
   2098                                 FPURegister src1,
   2099                                 FPURegister src2) {
   2100   int false_block = instr->FalseDestination(chunk_);
   2101   __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
   2102              condition, src1, src2);
   2103 }
   2104 
   2105 
   2106 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   2107   __ stop("LDebugBreak");
   2108 }
   2109 
   2110 
   2111 void LCodeGen::DoBranch(LBranch* instr) {
   2112   Representation r = instr->hydrogen()->value()->representation();
   2113   if (r.IsInteger32() || r.IsSmi()) {
   2114     ASSERT(!info()->IsStub());
   2115     Register reg = ToRegister(instr->value());
   2116     EmitBranch(instr, ne, reg, Operand(zero_reg));
   2117   } else if (r.IsDouble()) {
   2118     ASSERT(!info()->IsStub());
   2119     DoubleRegister reg = ToDoubleRegister(instr->value());
   2120     // Test the double value. Zero and NaN are false.
   2121     EmitBranchF(instr, nue, reg, kDoubleRegZero);
   2122   } else {
   2123     ASSERT(r.IsTagged());
   2124     Register reg = ToRegister(instr->value());
   2125     HType type = instr->hydrogen()->value()->type();
   2126     if (type.IsBoolean()) {
   2127       ASSERT(!info()->IsStub());
   2128       __ LoadRoot(at, Heap::kTrueValueRootIndex);
   2129       EmitBranch(instr, eq, reg, Operand(at));
   2130     } else if (type.IsSmi()) {
   2131       ASSERT(!info()->IsStub());
   2132       EmitBranch(instr, ne, reg, Operand(zero_reg));
   2133     } else if (type.IsJSArray()) {
   2134       ASSERT(!info()->IsStub());
   2135       EmitBranch(instr, al, zero_reg, Operand(zero_reg));
   2136     } else if (type.IsHeapNumber()) {
   2137       ASSERT(!info()->IsStub());
   2138       DoubleRegister dbl_scratch = double_scratch0();
   2139       __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2140       // Test the double value. Zero and NaN are false.
   2141       EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
   2142     } else if (type.IsString()) {
   2143       ASSERT(!info()->IsStub());
   2144       __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
   2145       EmitBranch(instr, ne, at, Operand(zero_reg));
   2146     } else {
   2147       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   2148       // Avoid deopts in the case where we've never executed this path before.
   2149       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   2150 
   2151       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   2152         // undefined -> false.
   2153         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   2154         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2155       }
   2156       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   2157         // Boolean -> its value.
   2158         __ LoadRoot(at, Heap::kTrueValueRootIndex);
   2159         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
   2160         __ LoadRoot(at, Heap::kFalseValueRootIndex);
   2161         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2162       }
   2163       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   2164         // 'null' -> false.
   2165         __ LoadRoot(at, Heap::kNullValueRootIndex);
   2166         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2167       }
   2168 
   2169       if (expected.Contains(ToBooleanStub::SMI)) {
   2170         // Smis: 0 -> false, all other -> true.
   2171         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
   2172         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2173       } else if (expected.NeedsMap()) {
   2174         // If we need a map later and have a Smi -> deopt.
   2175         __ SmiTst(reg, at);
   2176         DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   2177       }
   2178 
   2179       const Register map = scratch0();
   2180       if (expected.NeedsMap()) {
   2181         __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2182         if (expected.CanBeUndetectable()) {
   2183           // Undetectable -> false.
   2184           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
   2185           __ And(at, at, Operand(1 << Map::kIsUndetectable));
   2186           __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
   2187         }
   2188       }
   2189 
   2190       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2191         // spec object -> true.
   2192         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2193         __ Branch(instr->TrueLabel(chunk_),
   2194                   ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
   2195       }
   2196 
   2197       if (expected.Contains(ToBooleanStub::STRING)) {
   2198         // String value -> false iff empty.
   2199         Label not_string;
   2200         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2201         __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
   2202         __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
   2203         __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
   2204         __ Branch(instr->FalseLabel(chunk_));
   2205         __ bind(&not_string);
   2206       }
   2207 
   2208       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2209         // Symbol value -> true.
   2210         const Register scratch = scratch1();
   2211         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2212         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
   2213       }
   2214 
   2215       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2216         // heap number -> false iff +0, -0, or NaN.
   2217         DoubleRegister dbl_scratch = double_scratch0();
   2218         Label not_heap_number;
   2219         __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   2220         __ Branch(&not_heap_number, ne, map, Operand(at));
   2221         __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2222         __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2223                    ne, dbl_scratch, kDoubleRegZero);
   2224         // Falls through if dbl_scratch == 0.
   2225         __ Branch(instr->FalseLabel(chunk_));
   2226         __ bind(&not_heap_number);
   2227       }
   2228 
   2229       if (!expected.IsGeneric()) {
   2230         // We've seen something for the first time -> deopt.
   2231         // This can only happen if we are not generic already.
   2232         DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
   2233       }
   2234     }
   2235   }
   2236 }
   2237 
   2238 
   2239 void LCodeGen::EmitGoto(int block) {
   2240   if (!IsNextEmittedBlock(block)) {
   2241     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2242   }
   2243 }
   2244 
   2245 
   2246 void LCodeGen::DoGoto(LGoto* instr) {
   2247   EmitGoto(instr->block_id());
   2248 }
   2249 
   2250 
   2251 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2252   Condition cond = kNoCondition;
   2253   switch (op) {
   2254     case Token::EQ:
   2255     case Token::EQ_STRICT:
   2256       cond = eq;
   2257       break;
   2258     case Token::NE:
   2259     case Token::NE_STRICT:
   2260       cond = ne;
   2261       break;
   2262     case Token::LT:
   2263       cond = is_unsigned ? lo : lt;
   2264       break;
   2265     case Token::GT:
   2266       cond = is_unsigned ? hi : gt;
   2267       break;
   2268     case Token::LTE:
   2269       cond = is_unsigned ? ls : le;
   2270       break;
   2271     case Token::GTE:
   2272       cond = is_unsigned ? hs : ge;
   2273       break;
   2274     case Token::IN:
   2275     case Token::INSTANCEOF:
   2276     default:
   2277       UNREACHABLE();
   2278   }
   2279   return cond;
   2280 }
   2281 
   2282 
   2283 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2284   LOperand* left = instr->left();
   2285   LOperand* right = instr->right();
   2286   bool is_unsigned =
   2287       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2288       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2289   Condition cond = TokenToCondition(instr->op(), is_unsigned);
   2290 
   2291   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2292     // We can statically evaluate the comparison.
   2293     double left_val = ToDouble(LConstantOperand::cast(left));
   2294     double right_val = ToDouble(LConstantOperand::cast(right));
   2295     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2296         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2297     EmitGoto(next_block);
   2298   } else {
   2299     if (instr->is_double()) {
   2300       // Compare left and right as doubles and load the
   2301       // resulting flags into the normal status register.
   2302       FPURegister left_reg = ToDoubleRegister(left);
   2303       FPURegister right_reg = ToDoubleRegister(right);
   2304 
   2305       // If a NaN is involved, i.e. the result is unordered,
   2306       // jump to false block label.
   2307       __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
   2308                  left_reg, right_reg);
   2309 
   2310       EmitBranchF(instr, cond, left_reg, right_reg);
   2311     } else {
   2312       Register cmp_left;
   2313       Operand cmp_right = Operand(0);
   2314 
   2315       if (right->IsConstantOperand()) {
   2316         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2317         if (instr->hydrogen_value()->representation().IsSmi()) {
   2318           cmp_left = ToRegister(left);
   2319           cmp_right = Operand(Smi::FromInt(value));
   2320         } else {
   2321           cmp_left = ToRegister(left);
   2322           cmp_right = Operand(value);
   2323         }
   2324       } else if (left->IsConstantOperand()) {
   2325         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2326         if (instr->hydrogen_value()->representation().IsSmi()) {
   2327            cmp_left = ToRegister(right);
   2328            cmp_right = Operand(Smi::FromInt(value));
   2329         } else {
   2330           cmp_left = ToRegister(right);
   2331           cmp_right = Operand(value);
   2332         }
   2333         // We commuted the operands, so commute the condition.
   2334         cond = CommuteCondition(cond);
   2335       } else {
   2336         cmp_left = ToRegister(left);
   2337         cmp_right = Operand(ToRegister(right));
   2338       }
   2339 
   2340       EmitBranch(instr, cond, cmp_left, cmp_right);
   2341     }
   2342   }
   2343 }
   2344 
   2345 
   2346 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2347   Register left = ToRegister(instr->left());
   2348   Register right = ToRegister(instr->right());
   2349 
   2350   EmitBranch(instr, eq, left, Operand(right));
   2351 }
   2352 
   2353 
   2354 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2355   if (instr->hydrogen()->representation().IsTagged()) {
   2356     Register input_reg = ToRegister(instr->object());
   2357     __ li(at, Operand(factory()->the_hole_value()));
   2358     EmitBranch(instr, eq, input_reg, Operand(at));
   2359     return;
   2360   }
   2361 
   2362   DoubleRegister input_reg = ToDoubleRegister(instr->object());
   2363   EmitFalseBranchF(instr, eq, input_reg, input_reg);
   2364 
   2365   Register scratch = scratch0();
   2366   __ FmoveHigh(scratch, input_reg);
   2367   EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
   2368 }
   2369 
   2370 
   2371 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   2372   Representation rep = instr->hydrogen()->value()->representation();
   2373   ASSERT(!rep.IsInteger32());
   2374   Register scratch = ToRegister(instr->temp());
   2375 
   2376   if (rep.IsDouble()) {
   2377     DoubleRegister value = ToDoubleRegister(instr->value());
   2378     EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
   2379     __ FmoveHigh(scratch, value);
   2380     __ li(at, 0x80000000);
   2381   } else {
   2382     Register value = ToRegister(instr->value());
   2383     __ CheckMap(value,
   2384                 scratch,
   2385                 Heap::kHeapNumberMapRootIndex,
   2386                 instr->FalseLabel(chunk()),
   2387                 DO_SMI_CHECK);
   2388     __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
   2389     EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
   2390     __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
   2391     __ mov(at, zero_reg);
   2392   }
   2393   EmitBranch(instr, eq, scratch, Operand(at));
   2394 }
   2395 
   2396 
   2397 Condition LCodeGen::EmitIsObject(Register input,
   2398                                  Register temp1,
   2399                                  Register temp2,
   2400                                  Label* is_not_object,
   2401                                  Label* is_object) {
   2402   __ JumpIfSmi(input, is_not_object);
   2403 
   2404   __ LoadRoot(temp2, Heap::kNullValueRootIndex);
   2405   __ Branch(is_object, eq, input, Operand(temp2));
   2406 
   2407   // Load map.
   2408   __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
   2409   // Undetectable objects behave like undefined.
   2410   __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
   2411   __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
   2412   __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
   2413 
   2414   // Load instance type and check that it is in object type range.
   2415   __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
   2416   __ Branch(is_not_object,
   2417             lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2418 
   2419   return le;
   2420 }
   2421 
   2422 
   2423 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   2424   Register reg = ToRegister(instr->value());
   2425   Register temp1 = ToRegister(instr->temp());
   2426   Register temp2 = scratch0();
   2427 
   2428   Condition true_cond =
   2429       EmitIsObject(reg, temp1, temp2,
   2430           instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
   2431 
   2432   EmitBranch(instr, true_cond, temp2,
   2433              Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2434 }
   2435 
   2436 
   2437 Condition LCodeGen::EmitIsString(Register input,
   2438                                  Register temp1,
   2439                                  Label* is_not_string,
   2440                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2441   if (check_needed == INLINE_SMI_CHECK) {
   2442     __ JumpIfSmi(input, is_not_string);
   2443   }
   2444   __ GetObjectType(input, temp1, temp1);
   2445 
   2446   return lt;
   2447 }
   2448 
   2449 
   2450 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2451   Register reg = ToRegister(instr->value());
   2452   Register temp1 = ToRegister(instr->temp());
   2453 
   2454   SmiCheck check_needed =
   2455       instr->hydrogen()->value()->type().IsHeapObject()
   2456           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2457   Condition true_cond =
   2458       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2459 
   2460   EmitBranch(instr, true_cond, temp1,
   2461              Operand(FIRST_NONSTRING_TYPE));
   2462 }
   2463 
   2464 
   2465 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2466   Register input_reg = EmitLoadRegister(instr->value(), at);
   2467   __ And(at, input_reg, kSmiTagMask);
   2468   EmitBranch(instr, eq, at, Operand(zero_reg));
   2469 }
   2470 
   2471 
   2472 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2473   Register input = ToRegister(instr->value());
   2474   Register temp = ToRegister(instr->temp());
   2475 
   2476   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2477     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2478   }
   2479   __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2480   __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2481   __ And(at, temp, Operand(1 << Map::kIsUndetectable));
   2482   EmitBranch(instr, ne, at, Operand(zero_reg));
   2483 }
   2484 
   2485 
   2486 static Condition ComputeCompareCondition(Token::Value op) {
   2487   switch (op) {
   2488     case Token::EQ_STRICT:
   2489     case Token::EQ:
   2490       return eq;
   2491     case Token::LT:
   2492       return lt;
   2493     case Token::GT:
   2494       return gt;
   2495     case Token::LTE:
   2496       return le;
   2497     case Token::GTE:
   2498       return ge;
   2499     default:
   2500       UNREACHABLE();
   2501       return kNoCondition;
   2502   }
   2503 }
   2504 
   2505 
   2506 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2507   ASSERT(ToRegister(instr->context()).is(cp));
   2508   Token::Value op = instr->op();
   2509 
   2510   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2511   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2512 
   2513   Condition condition = ComputeCompareCondition(op);
   2514 
   2515   EmitBranch(instr, condition, v0, Operand(zero_reg));
   2516 }
   2517 
   2518 
   2519 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2520   InstanceType from = instr->from();
   2521   InstanceType to = instr->to();
   2522   if (from == FIRST_TYPE) return to;
   2523   ASSERT(from == to || to == LAST_TYPE);
   2524   return from;
   2525 }
   2526 
   2527 
   2528 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2529   InstanceType from = instr->from();
   2530   InstanceType to = instr->to();
   2531   if (from == to) return eq;
   2532   if (to == LAST_TYPE) return hs;
   2533   if (from == FIRST_TYPE) return ls;
   2534   UNREACHABLE();
   2535   return eq;
   2536 }
   2537 
   2538 
   2539 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2540   Register scratch = scratch0();
   2541   Register input = ToRegister(instr->value());
   2542 
   2543   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2544     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2545   }
   2546 
   2547   __ GetObjectType(input, scratch, scratch);
   2548   EmitBranch(instr,
   2549              BranchCondition(instr->hydrogen()),
   2550              scratch,
   2551              Operand(TestType(instr->hydrogen())));
   2552 }
   2553 
   2554 
   2555 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2556   Register input = ToRegister(instr->value());
   2557   Register result = ToRegister(instr->result());
   2558 
   2559   __ AssertString(input);
   2560 
   2561   __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
   2562   __ IndexFromHash(result, result);
   2563 }
   2564 
   2565 
   2566 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2567     LHasCachedArrayIndexAndBranch* instr) {
   2568   Register input = ToRegister(instr->value());
   2569   Register scratch = scratch0();
   2570 
   2571   __ lw(scratch,
   2572          FieldMemOperand(input, String::kHashFieldOffset));
   2573   __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
   2574   EmitBranch(instr, eq, at, Operand(zero_reg));
   2575 }
   2576 
   2577 
   2578 // Branches to a label or falls through with the answer in flags.  Trashes
   2579 // the temp registers, but not the input.
   2580 void LCodeGen::EmitClassOfTest(Label* is_true,
   2581                                Label* is_false,
   2582                                Handle<String>class_name,
   2583                                Register input,
   2584                                Register temp,
   2585                                Register temp2) {
   2586   ASSERT(!input.is(temp));
   2587   ASSERT(!input.is(temp2));
   2588   ASSERT(!temp.is(temp2));
   2589 
   2590   __ JumpIfSmi(input, is_false);
   2591 
   2592   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
   2593     // Assuming the following assertions, we can use the same compares to test
   2594     // for both being a function type and being in the object type range.
   2595     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   2596     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2597                   FIRST_SPEC_OBJECT_TYPE + 1);
   2598     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2599                   LAST_SPEC_OBJECT_TYPE - 1);
   2600     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
   2601 
   2602     __ GetObjectType(input, temp, temp2);
   2603     __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
   2604     __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
   2605     __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
   2606   } else {
   2607     // Faster code path to avoid two compares: subtract lower bound from the
   2608     // actual type and do a signed compare with the width of the type range.
   2609     __ GetObjectType(input, temp, temp2);
   2610     __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2611     __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
   2612                                            FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2613   }
   2614 
   2615   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   2616   // Check if the constructor in the map is a function.
   2617   __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
   2618 
   2619   // Objects with a non-function constructor have class 'Object'.
   2620   __ GetObjectType(temp, temp2, temp2);
   2621   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
   2622     __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
   2623   } else {
   2624     __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
   2625   }
   2626 
   2627   // temp now contains the constructor function. Grab the
   2628   // instance class name from there.
   2629   __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2630   __ lw(temp, FieldMemOperand(temp,
   2631                                SharedFunctionInfo::kInstanceClassNameOffset));
   2632   // The class name we are testing against is internalized since it's a literal.
   2633   // The name in the constructor is internalized because of the way the context
   2634   // is booted.  This routine isn't expected to work for random API-created
   2635   // classes and it doesn't have to because you can't access it with natives
   2636   // syntax.  Since both sides are internalized it is sufficient to use an
   2637   // identity comparison.
   2638 
   2639   // End with the address of this class_name instance in temp register.
   2640   // On MIPS, the caller must do the comparison with Handle<String>class_name.
   2641 }
   2642 
   2643 
   2644 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2645   Register input = ToRegister(instr->value());
   2646   Register temp = scratch0();
   2647   Register temp2 = ToRegister(instr->temp());
   2648   Handle<String> class_name = instr->hydrogen()->class_name();
   2649 
   2650   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2651                   class_name, input, temp, temp2);
   2652 
   2653   EmitBranch(instr, eq, temp, Operand(class_name));
   2654 }
   2655 
   2656 
   2657 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2658   Register reg = ToRegister(instr->value());
   2659   Register temp = ToRegister(instr->temp());
   2660 
   2661   __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2662   EmitBranch(instr, eq, temp, Operand(instr->map()));
   2663 }
   2664 
   2665 
   2666 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2667   ASSERT(ToRegister(instr->context()).is(cp));
   2668   Label true_label, done;
   2669   ASSERT(ToRegister(instr->left()).is(a0));  // Object is in a0.
   2670   ASSERT(ToRegister(instr->right()).is(a1));  // Function is in a1.
   2671   Register result = ToRegister(instr->result());
   2672   ASSERT(result.is(v0));
   2673 
   2674   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
   2675   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2676 
   2677   __ Branch(&true_label, eq, result, Operand(zero_reg));
   2678   __ li(result, Operand(factory()->false_value()));
   2679   __ Branch(&done);
   2680   __ bind(&true_label);
   2681   __ li(result, Operand(factory()->true_value()));
   2682   __ bind(&done);
   2683 }
   2684 
   2685 
   2686 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   2687   class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
   2688    public:
   2689     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
   2690                                   LInstanceOfKnownGlobal* instr)
   2691         : LDeferredCode(codegen), instr_(instr) { }
   2692     virtual void Generate() V8_OVERRIDE {
   2693       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
   2694     }
   2695     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   2696     Label* map_check() { return &map_check_; }
   2697 
   2698    private:
   2699     LInstanceOfKnownGlobal* instr_;
   2700     Label map_check_;
   2701   };
   2702 
   2703   DeferredInstanceOfKnownGlobal* deferred;
   2704   deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
   2705 
   2706   Label done, false_result;
   2707   Register object = ToRegister(instr->value());
   2708   Register temp = ToRegister(instr->temp());
   2709   Register result = ToRegister(instr->result());
   2710 
   2711   ASSERT(object.is(a0));
   2712   ASSERT(result.is(v0));
   2713 
   2714   // A Smi is not instance of anything.
   2715   __ JumpIfSmi(object, &false_result);
   2716 
   2717   // This is the inlined call site instanceof cache. The two occurences of the
   2718   // hole value will be patched to the last map/result pair generated by the
   2719   // instanceof stub.
   2720   Label cache_miss;
   2721   Register map = temp;
   2722   __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
   2723 
   2724   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   2725   __ bind(deferred->map_check());  // Label for calculating code patching.
   2726   // We use Factory::the_hole_value() on purpose instead of loading from the
   2727   // root array to force relocation to be able to later patch with
   2728   // the cached map.
   2729   Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
   2730   __ li(at, Operand(Handle<Object>(cell)));
   2731   __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
   2732   __ BranchShort(&cache_miss, ne, map, Operand(at));
   2733   // We use Factory::the_hole_value() on purpose instead of loading from the
   2734   // root array to force relocation to be able to later patch
   2735   // with true or false. The distance from map check has to be constant.
   2736   __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
   2737   __ Branch(&done);
   2738 
   2739   // The inlined call site cache did not match. Check null and string before
   2740   // calling the deferred code.
   2741   __ bind(&cache_miss);
   2742   // Null is not instance of anything.
   2743   __ LoadRoot(temp, Heap::kNullValueRootIndex);
   2744   __ Branch(&false_result, eq, object, Operand(temp));
   2745 
   2746   // String values is not instance of anything.
   2747   Condition cc = __ IsObjectStringType(object, temp, temp);
   2748   __ Branch(&false_result, cc, temp, Operand(zero_reg));
   2749 
   2750   // Go to the deferred code.
   2751   __ Branch(deferred->entry());
   2752 
   2753   __ bind(&false_result);
   2754   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   2755 
   2756   // Here result has either true or false. Deferred code also produces true or
   2757   // false object.
   2758   __ bind(deferred->exit());
   2759   __ bind(&done);
   2760 }
   2761 
   2762 
   2763 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
   2764                                                Label* map_check) {
   2765   Register result = ToRegister(instr->result());
   2766   ASSERT(result.is(v0));
   2767 
   2768   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   2769   flags = static_cast<InstanceofStub::Flags>(
   2770       flags | InstanceofStub::kArgsInRegisters);
   2771   flags = static_cast<InstanceofStub::Flags>(
   2772       flags | InstanceofStub::kCallSiteInlineCheck);
   2773   flags = static_cast<InstanceofStub::Flags>(
   2774       flags | InstanceofStub::kReturnTrueFalseObject);
   2775   InstanceofStub stub(isolate(), flags);
   2776 
   2777   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   2778   LoadContextFromDeferred(instr->context());
   2779 
   2780   // Get the temp register reserved by the instruction. This needs to be t0 as
   2781   // its slot of the pushing of safepoint registers is used to communicate the
   2782   // offset to the location of the map check.
   2783   Register temp = ToRegister(instr->temp());
   2784   ASSERT(temp.is(t0));
   2785   __ li(InstanceofStub::right(), instr->function());
   2786   static const int kAdditionalDelta = 7;
   2787   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
   2788   Label before_push_delta;
   2789   __ bind(&before_push_delta);
   2790   {
   2791     Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   2792     __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
   2793     __ StoreToSafepointRegisterSlot(temp, temp);
   2794   }
   2795   CallCodeGeneric(stub.GetCode(),
   2796                   RelocInfo::CODE_TARGET,
   2797                   instr,
   2798                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   2799   LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   2800   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2801   // Put the result value into the result register slot and
   2802   // restore all registers.
   2803   __ StoreToSafepointRegisterSlot(result, result);
   2804 }
   2805 
   2806 
   2807 void LCodeGen::DoCmpT(LCmpT* instr) {
   2808   ASSERT(ToRegister(instr->context()).is(cp));
   2809   Token::Value op = instr->op();
   2810 
   2811   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2812   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2813   // On MIPS there is no need for a "no inlined smi code" marker (nop).
   2814 
   2815   Condition condition = ComputeCompareCondition(op);
   2816   // A minor optimization that relies on LoadRoot always emitting one
   2817   // instruction.
   2818   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
   2819   Label done, check;
   2820   __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
   2821   __ bind(&check);
   2822   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2823   ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
   2824   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2825   __ bind(&done);
   2826 }
   2827 
   2828 
   2829 void LCodeGen::DoReturn(LReturn* instr) {
   2830   if (FLAG_trace && info()->IsOptimizing()) {
   2831     // Push the return value on the stack as the parameter.
   2832     // Runtime::TraceExit returns its parameter in v0. We're leaving the code
   2833     // managed by the register allocator and tearing down the frame, it's
   2834     // safe to write to the context register.
   2835     __ push(v0);
   2836     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2837     __ CallRuntime(Runtime::kTraceExit, 1);
   2838   }
   2839   if (info()->saves_caller_doubles()) {
   2840     RestoreCallerDoubles();
   2841   }
   2842   int no_frame_start = -1;
   2843   if (NeedsEagerFrame()) {
   2844     __ mov(sp, fp);
   2845     no_frame_start = masm_->pc_offset();
   2846     __ Pop(ra, fp);
   2847   }
   2848   if (instr->has_constant_parameter_count()) {
   2849     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2850     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2851     if (sp_delta != 0) {
   2852       __ Addu(sp, sp, Operand(sp_delta));
   2853     }
   2854   } else {
   2855     Register reg = ToRegister(instr->parameter_count());
   2856     // The argument count parameter is a smi
   2857     __ SmiUntag(reg);
   2858     __ sll(at, reg, kPointerSizeLog2);
   2859     __ Addu(sp, sp, at);
   2860   }
   2861 
   2862   __ Jump(ra);
   2863 
   2864   if (no_frame_start != -1) {
   2865     info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
   2866   }
   2867 }
   2868 
   2869 
   2870 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   2871   Register result = ToRegister(instr->result());
   2872   __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
   2873   __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
   2874   if (instr->hydrogen()->RequiresHoleCheck()) {
   2875     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2876     DeoptimizeIf(eq, instr->environment(), result, Operand(at));
   2877   }
   2878 }
   2879 
   2880 
   2881 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   2882   ASSERT(ToRegister(instr->context()).is(cp));
   2883   ASSERT(ToRegister(instr->global_object()).is(a0));
   2884   ASSERT(ToRegister(instr->result()).is(v0));
   2885 
   2886   __ li(a2, Operand(instr->name()));
   2887   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
   2888   Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
   2889   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2890 }
   2891 
   2892 
   2893 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   2894   Register value = ToRegister(instr->value());
   2895   Register cell = scratch0();
   2896 
   2897   // Load the cell.
   2898   __ li(cell, Operand(instr->hydrogen()->cell().handle()));
   2899 
   2900   // If the cell we are storing to contains the hole it could have
   2901   // been deleted from the property dictionary. In that case, we need
   2902   // to update the property details in the property dictionary to mark
   2903   // it as no longer deleted.
   2904   if (instr->hydrogen()->RequiresHoleCheck()) {
   2905     // We use a temp to check the payload.
   2906     Register payload = ToRegister(instr->temp());
   2907     __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
   2908     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2909     DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
   2910   }
   2911 
   2912   // Store the value.
   2913   __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
   2914   // Cells are always rescanned, so no write barrier here.
   2915 }
   2916 
   2917 
   2918 
   2919 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2920   Register context = ToRegister(instr->context());
   2921   Register result = ToRegister(instr->result());
   2922 
   2923   __ lw(result, ContextOperand(context, instr->slot_index()));
   2924   if (instr->hydrogen()->RequiresHoleCheck()) {
   2925     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2926 
   2927     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2928       DeoptimizeIf(eq, instr->environment(), result, Operand(at));
   2929     } else {
   2930       Label is_not_hole;
   2931       __ Branch(&is_not_hole, ne, result, Operand(at));
   2932       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2933       __ bind(&is_not_hole);
   2934     }
   2935   }
   2936 }
   2937 
   2938 
   2939 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2940   Register context = ToRegister(instr->context());
   2941   Register value = ToRegister(instr->value());
   2942   Register scratch = scratch0();
   2943   MemOperand target = ContextOperand(context, instr->slot_index());
   2944 
   2945   Label skip_assignment;
   2946 
   2947   if (instr->hydrogen()->RequiresHoleCheck()) {
   2948     __ lw(scratch, target);
   2949     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2950 
   2951     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2952       DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
   2953     } else {
   2954       __ Branch(&skip_assignment, ne, scratch, Operand(at));
   2955     }
   2956   }
   2957 
   2958   __ sw(value, target);
   2959   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2960     SmiCheck check_needed =
   2961         instr->hydrogen()->value()->type().IsHeapObject()
   2962             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2963     __ RecordWriteContextSlot(context,
   2964                               target.offset(),
   2965                               value,
   2966                               scratch0(),
   2967                               GetRAState(),
   2968                               kSaveFPRegs,
   2969                               EMIT_REMEMBERED_SET,
   2970                               check_needed);
   2971   }
   2972 
   2973   __ bind(&skip_assignment);
   2974 }
   2975 
   2976 
   2977 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2978   HObjectAccess access = instr->hydrogen()->access();
   2979   int offset = access.offset();
   2980   Register object = ToRegister(instr->object());
   2981 
   2982   if (access.IsExternalMemory()) {
   2983     Register result = ToRegister(instr->result());
   2984     MemOperand operand = MemOperand(object, offset);
   2985     __ Load(result, operand, access.representation());
   2986     return;
   2987   }
   2988 
   2989   if (instr->hydrogen()->representation().IsDouble()) {
   2990     DoubleRegister result = ToDoubleRegister(instr->result());
   2991     __ ldc1(result, FieldMemOperand(object, offset));
   2992     return;
   2993   }
   2994 
   2995   Register result = ToRegister(instr->result());
   2996   if (!access.IsInobject()) {
   2997     __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   2998     object = result;
   2999   }
   3000   MemOperand operand = FieldMemOperand(object, offset);
   3001   __ Load(result, operand, access.representation());
   3002 }
   3003 
   3004 
   3005 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   3006   ASSERT(ToRegister(instr->context()).is(cp));
   3007   ASSERT(ToRegister(instr->object()).is(a0));
   3008   ASSERT(ToRegister(instr->result()).is(v0));
   3009 
   3010   // Name is always in a2.
   3011   __ li(a2, Operand(instr->name()));
   3012   Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
   3013   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3014 }
   3015 
   3016 
   3017 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   3018   Register scratch = scratch0();
   3019   Register function = ToRegister(instr->function());
   3020   Register result = ToRegister(instr->result());
   3021 
   3022   // Check that the function really is a function. Load map into the
   3023   // result register.
   3024   __ GetObjectType(function, result, scratch);
   3025   DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
   3026 
   3027   // Make sure that the function has an instance prototype.
   3028   Label non_instance;
   3029   __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
   3030   __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
   3031   __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
   3032 
   3033   // Get the prototype or initial map from the function.
   3034   __ lw(result,
   3035          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   3036 
   3037   // Check that the function has a prototype or an initial map.
   3038   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   3039   DeoptimizeIf(eq, instr->environment(), result, Operand(at));
   3040 
   3041   // If the function does not have an initial map, we're done.
   3042   Label done;
   3043   __ GetObjectType(result, scratch, scratch);
   3044   __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
   3045 
   3046   // Get the prototype from the initial map.
   3047   __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
   3048   __ Branch(&done);
   3049 
   3050   // Non-instance prototype: Fetch prototype from constructor field
   3051   // in initial map.
   3052   __ bind(&non_instance);
   3053   __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
   3054 
   3055   // All done.
   3056   __ bind(&done);
   3057 }
   3058 
   3059 
   3060 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   3061   Register result = ToRegister(instr->result());
   3062   __ LoadRoot(result, instr->index());
   3063 }
   3064 
   3065 
   3066 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   3067   Register arguments = ToRegister(instr->arguments());
   3068   Register result = ToRegister(instr->result());
   3069   // There are two words between the frame pointer and the last argument.
   3070   // Subtracting from length accounts for one of them add one more.
   3071   if (instr->length()->IsConstantOperand()) {
   3072     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   3073     if (instr->index()->IsConstantOperand()) {
   3074       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3075       int index = (const_length - const_index) + 1;
   3076       __ lw(result, MemOperand(arguments, index * kPointerSize));
   3077     } else {
   3078       Register index = ToRegister(instr->index());
   3079       __ li(at, Operand(const_length + 1));
   3080       __ Subu(result, at, index);
   3081       __ sll(at, result, kPointerSizeLog2);
   3082       __ Addu(at, arguments, at);
   3083       __ lw(result, MemOperand(at));
   3084     }
   3085   } else if (instr->index()->IsConstantOperand()) {
   3086     Register length = ToRegister(instr->length());
   3087     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3088     int loc = const_index - 1;
   3089     if (loc != 0) {
   3090       __ Subu(result, length, Operand(loc));
   3091       __ sll(at, result, kPointerSizeLog2);
   3092       __ Addu(at, arguments, at);
   3093       __ lw(result, MemOperand(at));
   3094     } else {
   3095       __ sll(at, length, kPointerSizeLog2);
   3096       __ Addu(at, arguments, at);
   3097       __ lw(result, MemOperand(at));
   3098     }
   3099   } else {
   3100     Register length = ToRegister(instr->length());
   3101     Register index = ToRegister(instr->index());
   3102     __ Subu(result, length, index);
   3103     __ Addu(result, result, 1);
   3104     __ sll(at, result, kPointerSizeLog2);
   3105     __ Addu(at, arguments, at);
   3106     __ lw(result, MemOperand(at));
   3107   }
   3108 }
   3109 
   3110 
   3111 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   3112   Register external_pointer = ToRegister(instr->elements());
   3113   Register key = no_reg;
   3114   ElementsKind elements_kind = instr->elements_kind();
   3115   bool key_is_constant = instr->key()->IsConstantOperand();
   3116   int constant_key = 0;
   3117   if (key_is_constant) {
   3118     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3119     if (constant_key & 0xF0000000) {
   3120       Abort(kArrayIndexConstantValueTooBig);
   3121     }
   3122   } else {
   3123     key = ToRegister(instr->key());
   3124   }
   3125   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3126   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3127       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3128   int base_offset = instr->base_offset();
   3129 
   3130   if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   3131       elements_kind == FLOAT32_ELEMENTS ||
   3132       elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
   3133       elements_kind == FLOAT64_ELEMENTS) {
   3134     int base_offset = instr->base_offset();
   3135     FPURegister result = ToDoubleRegister(instr->result());
   3136     if (key_is_constant) {
   3137       __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
   3138     } else {
   3139       __ sll(scratch0(), key, shift_size);
   3140       __ Addu(scratch0(), scratch0(), external_pointer);
   3141     }
   3142     if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   3143         elements_kind == FLOAT32_ELEMENTS) {
   3144       __ lwc1(result, MemOperand(scratch0(), base_offset));
   3145       __ cvt_d_s(result, result);
   3146     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   3147       __ ldc1(result, MemOperand(scratch0(), base_offset));
   3148     }
   3149   } else {
   3150     Register result = ToRegister(instr->result());
   3151     MemOperand mem_operand = PrepareKeyedOperand(
   3152         key, external_pointer, key_is_constant, constant_key,
   3153         element_size_shift, shift_size, base_offset);
   3154     switch (elements_kind) {
   3155       case EXTERNAL_INT8_ELEMENTS:
   3156       case INT8_ELEMENTS:
   3157         __ lb(result, mem_operand);
   3158         break;
   3159       case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
   3160       case EXTERNAL_UINT8_ELEMENTS:
   3161       case UINT8_ELEMENTS:
   3162       case UINT8_CLAMPED_ELEMENTS:
   3163         __ lbu(result, mem_operand);
   3164         break;
   3165       case EXTERNAL_INT16_ELEMENTS:
   3166       case INT16_ELEMENTS:
   3167         __ lh(result, mem_operand);
   3168         break;
   3169       case EXTERNAL_UINT16_ELEMENTS:
   3170       case UINT16_ELEMENTS:
   3171         __ lhu(result, mem_operand);
   3172         break;
   3173       case EXTERNAL_INT32_ELEMENTS:
   3174       case INT32_ELEMENTS:
   3175         __ lw(result, mem_operand);
   3176         break;
   3177       case EXTERNAL_UINT32_ELEMENTS:
   3178       case UINT32_ELEMENTS:
   3179         __ lw(result, mem_operand);
   3180         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   3181           DeoptimizeIf(Ugreater_equal, instr->environment(),
   3182               result, Operand(0x80000000));
   3183         }
   3184         break;
   3185       case FLOAT32_ELEMENTS:
   3186       case FLOAT64_ELEMENTS:
   3187       case EXTERNAL_FLOAT32_ELEMENTS:
   3188       case EXTERNAL_FLOAT64_ELEMENTS:
   3189       case FAST_DOUBLE_ELEMENTS:
   3190       case FAST_ELEMENTS:
   3191       case FAST_SMI_ELEMENTS:
   3192       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3193       case FAST_HOLEY_ELEMENTS:
   3194       case FAST_HOLEY_SMI_ELEMENTS:
   3195       case DICTIONARY_ELEMENTS:
   3196       case SLOPPY_ARGUMENTS_ELEMENTS:
   3197         UNREACHABLE();
   3198         break;
   3199     }
   3200   }
   3201 }
   3202 
   3203 
   3204 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   3205   Register elements = ToRegister(instr->elements());
   3206   bool key_is_constant = instr->key()->IsConstantOperand();
   3207   Register key = no_reg;
   3208   DoubleRegister result = ToDoubleRegister(instr->result());
   3209   Register scratch = scratch0();
   3210 
   3211   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   3212 
   3213   int base_offset = instr->base_offset();
   3214   if (key_is_constant) {
   3215     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3216     if (constant_key & 0xF0000000) {
   3217       Abort(kArrayIndexConstantValueTooBig);
   3218     }
   3219     base_offset += constant_key * kDoubleSize;
   3220   }
   3221   __ Addu(scratch, elements, Operand(base_offset));
   3222 
   3223   if (!key_is_constant) {
   3224     key = ToRegister(instr->key());
   3225     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3226         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3227     __ sll(at, key, shift_size);
   3228     __ Addu(scratch, scratch, at);
   3229   }
   3230 
   3231   __ ldc1(result, MemOperand(scratch));
   3232 
   3233   if (instr->hydrogen()->RequiresHoleCheck()) {
   3234     __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
   3235     DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
   3236   }
   3237 }
   3238 
   3239 
   3240 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   3241   Register elements = ToRegister(instr->elements());
   3242   Register result = ToRegister(instr->result());
   3243   Register scratch = scratch0();
   3244   Register store_base = scratch;
   3245   int offset = instr->base_offset();
   3246 
   3247   if (instr->key()->IsConstantOperand()) {
   3248     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   3249     offset += ToInteger32(const_operand) * kPointerSize;
   3250     store_base = elements;
   3251   } else {
   3252     Register key = ToRegister(instr->key());
   3253     // Even though the HLoadKeyed instruction forces the input
   3254     // representation for the key to be an integer, the input gets replaced
   3255     // during bound check elimination with the index argument to the bounds
   3256     // check, which can be tagged, so that case must be handled here, too.
   3257     if (instr->hydrogen()->key()->representation().IsSmi()) {
   3258       __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
   3259       __ addu(scratch, elements, scratch);
   3260     } else {
   3261       __ sll(scratch, key, kPointerSizeLog2);
   3262       __ addu(scratch, elements, scratch);
   3263     }
   3264   }
   3265   __ lw(result, MemOperand(store_base, offset));
   3266 
   3267   // Check for the hole value.
   3268   if (instr->hydrogen()->RequiresHoleCheck()) {
   3269     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   3270       __ SmiTst(result, scratch);
   3271       DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
   3272     } else {
   3273       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   3274       DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
   3275     }
   3276   }
   3277 }
   3278 
   3279 
   3280 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3281   if (instr->is_typed_elements()) {
   3282     DoLoadKeyedExternalArray(instr);
   3283   } else if (instr->hydrogen()->representation().IsDouble()) {
   3284     DoLoadKeyedFixedDoubleArray(instr);
   3285   } else {
   3286     DoLoadKeyedFixedArray(instr);
   3287   }
   3288 }
   3289 
   3290 
   3291 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
   3292                                          Register base,
   3293                                          bool key_is_constant,
   3294                                          int constant_key,
   3295                                          int element_size,
   3296                                          int shift_size,
   3297                                          int base_offset) {
   3298   if (key_is_constant) {
   3299     return MemOperand(base, (constant_key << element_size) + base_offset);
   3300   }
   3301 
   3302   if (base_offset == 0) {
   3303     if (shift_size >= 0) {
   3304       __ sll(scratch0(), key, shift_size);
   3305       __ Addu(scratch0(), base, scratch0());
   3306       return MemOperand(scratch0());
   3307     } else {
   3308       ASSERT_EQ(-1, shift_size);
   3309       __ srl(scratch0(), key, 1);
   3310       __ Addu(scratch0(), base, scratch0());
   3311       return MemOperand(scratch0());
   3312     }
   3313   }
   3314 
   3315   if (shift_size >= 0) {
   3316     __ sll(scratch0(), key, shift_size);
   3317     __ Addu(scratch0(), base, scratch0());
   3318     return MemOperand(scratch0(), base_offset);
   3319   } else {
   3320     ASSERT_EQ(-1, shift_size);
   3321     __ sra(scratch0(), key, 1);
   3322     __ Addu(scratch0(), base, scratch0());
   3323     return MemOperand(scratch0(), base_offset);
   3324   }
   3325 }
   3326 
   3327 
   3328 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3329   ASSERT(ToRegister(instr->context()).is(cp));
   3330   ASSERT(ToRegister(instr->object()).is(a1));
   3331   ASSERT(ToRegister(instr->key()).is(a0));
   3332 
   3333   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
   3334   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3335 }
   3336 
   3337 
   3338 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3339   Register scratch = scratch0();
   3340   Register temp = scratch1();
   3341   Register result = ToRegister(instr->result());
   3342 
   3343   if (instr->hydrogen()->from_inlined()) {
   3344     __ Subu(result, sp, 2 * kPointerSize);
   3345   } else {
   3346     // Check if the calling frame is an arguments adaptor frame.
   3347     Label done, adapted;
   3348     __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3349     __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
   3350     __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3351 
   3352     // Result is the frame pointer for the frame if not adapted and for the real
   3353     // frame below the adaptor frame if adapted.
   3354     __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
   3355     __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
   3356   }
   3357 }
   3358 
   3359 
   3360 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3361   Register elem = ToRegister(instr->elements());
   3362   Register result = ToRegister(instr->result());
   3363 
   3364   Label done;
   3365 
   3366   // If no arguments adaptor frame the number of arguments is fixed.
   3367   __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
   3368   __ Branch(&done, eq, fp, Operand(elem));
   3369 
   3370   // Arguments adaptor frame present. Get argument length from there.
   3371   __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3372   __ lw(result,
   3373         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3374   __ SmiUntag(result);
   3375 
   3376   // Argument length is in result register.
   3377   __ bind(&done);
   3378 }
   3379 
   3380 
   3381 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3382   Register receiver = ToRegister(instr->receiver());
   3383   Register function = ToRegister(instr->function());
   3384   Register result = ToRegister(instr->result());
   3385   Register scratch = scratch0();
   3386 
   3387   // If the receiver is null or undefined, we have to pass the global
   3388   // object as a receiver to normal functions. Values have to be
   3389   // passed unchanged to builtins and strict-mode functions.
   3390   Label global_object, result_in_receiver;
   3391 
   3392   if (!instr->hydrogen()->known_function()) {
   3393     // Do not transform the receiver to object for strict mode
   3394     // functions.
   3395     __ lw(scratch,
   3396            FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3397     __ lw(scratch,
   3398            FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3399 
   3400     // Do not transform the receiver to object for builtins.
   3401     int32_t strict_mode_function_mask =
   3402         1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
   3403     int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
   3404     __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
   3405     __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
   3406   }
   3407 
   3408   // Normal function. Replace undefined or null with global receiver.
   3409   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
   3410   __ Branch(&global_object, eq, receiver, Operand(scratch));
   3411   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3412   __ Branch(&global_object, eq, receiver, Operand(scratch));
   3413 
   3414   // Deoptimize if the receiver is not a JS object.
   3415   __ SmiTst(receiver, scratch);
   3416   DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
   3417 
   3418   __ GetObjectType(receiver, scratch, scratch);
   3419   DeoptimizeIf(lt, instr->environment(),
   3420                scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
   3421 
   3422   __ Branch(&result_in_receiver);
   3423   __ bind(&global_object);
   3424   __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
   3425   __ lw(result,
   3426         ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
   3427   __ lw(result,
   3428         FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
   3429 
   3430   if (result.is(receiver)) {
   3431     __ bind(&result_in_receiver);
   3432   } else {
   3433     Label result_ok;
   3434     __ Branch(&result_ok);
   3435     __ bind(&result_in_receiver);
   3436     __ mov(result, receiver);
   3437     __ bind(&result_ok);
   3438   }
   3439 }
   3440 
   3441 
   3442 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3443   Register receiver = ToRegister(instr->receiver());
   3444   Register function = ToRegister(instr->function());
   3445   Register length = ToRegister(instr->length());
   3446   Register elements = ToRegister(instr->elements());
   3447   Register scratch = scratch0();
   3448   ASSERT(receiver.is(a0));  // Used for parameter count.
   3449   ASSERT(function.is(a1));  // Required by InvokeFunction.
   3450   ASSERT(ToRegister(instr->result()).is(v0));
   3451 
   3452   // Copy the arguments to this function possibly from the
   3453   // adaptor frame below it.
   3454   const uint32_t kArgumentsLimit = 1 * KB;
   3455   DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
   3456 
   3457   // Push the receiver and use the register to keep the original
   3458   // number of arguments.
   3459   __ push(receiver);
   3460   __ Move(receiver, length);
   3461   // The arguments are at a one pointer size offset from elements.
   3462   __ Addu(elements, elements, Operand(1 * kPointerSize));
   3463 
   3464   // Loop through the arguments pushing them onto the execution
   3465   // stack.
   3466   Label invoke, loop;
   3467   // length is a small non-negative integer, due to the test above.
   3468   __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
   3469   __ sll(scratch, length, 2);
   3470   __ bind(&loop);
   3471   __ Addu(scratch, elements, scratch);
   3472   __ lw(scratch, MemOperand(scratch));
   3473   __ push(scratch);
   3474   __ Subu(length, length, Operand(1));
   3475   __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
   3476   __ sll(scratch, length, 2);
   3477 
   3478   __ bind(&invoke);
   3479   ASSERT(instr->HasPointerMap());
   3480   LPointerMap* pointers = instr->pointer_map();
   3481   SafepointGenerator safepoint_generator(
   3482       this, pointers, Safepoint::kLazyDeopt);
   3483   // The number of arguments is stored in receiver which is a0, as expected
   3484   // by InvokeFunction.
   3485   ParameterCount actual(receiver);
   3486   __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
   3487 }
   3488 
   3489 
   3490 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3491   LOperand* argument = instr->value();
   3492   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3493     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3494   } else {
   3495     Register argument_reg = EmitLoadRegister(argument, at);
   3496     __ push(argument_reg);
   3497   }
   3498 }
   3499 
   3500 
   3501 void LCodeGen::DoDrop(LDrop* instr) {
   3502   __ Drop(instr->count());
   3503 }
   3504 
   3505 
   3506 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3507   Register result = ToRegister(instr->result());
   3508   __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3509 }
   3510 
   3511 
   3512 void LCodeGen::DoContext(LContext* instr) {
   3513   // If there is a non-return use, the context must be moved to a register.
   3514   Register result = ToRegister(instr->result());
   3515   if (info()->IsOptimizing()) {
   3516     __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3517   } else {
   3518     // If there is no frame, the context must be in cp.
   3519     ASSERT(result.is(cp));
   3520   }
   3521 }
   3522 
   3523 
   3524 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3525   ASSERT(ToRegister(instr->context()).is(cp));
   3526   __ li(scratch0(), instr->hydrogen()->pairs());
   3527   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   3528   // The context is the first argument.
   3529   __ Push(cp, scratch0(), scratch1());
   3530   CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
   3531 }
   3532 
   3533 
   3534 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3535                                  int formal_parameter_count,
   3536                                  int arity,
   3537                                  LInstruction* instr,
   3538                                  A1State a1_state) {
   3539   bool dont_adapt_arguments =
   3540       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3541   bool can_invoke_directly =
   3542       dont_adapt_arguments || formal_parameter_count == arity;
   3543 
   3544   LPointerMap* pointers = instr->pointer_map();
   3545 
   3546   if (can_invoke_directly) {
   3547     if (a1_state == A1_UNINITIALIZED) {
   3548       __ li(a1, function);
   3549     }
   3550 
   3551     // Change context.
   3552     __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
   3553 
   3554     // Set r0 to arguments count if adaption is not needed. Assumes that r0
   3555     // is available to write to at this point.
   3556     if (dont_adapt_arguments) {
   3557       __ li(a0, Operand(arity));
   3558     }
   3559 
   3560     // Invoke function.
   3561     __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
   3562     __ Call(at);
   3563 
   3564     // Set up deoptimization.
   3565     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3566   } else {
   3567     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3568     ParameterCount count(arity);
   3569     ParameterCount expected(formal_parameter_count);
   3570     __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
   3571   }
   3572 }
   3573 
   3574 
   3575 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3576   ASSERT(instr->context() != NULL);
   3577   ASSERT(ToRegister(instr->context()).is(cp));
   3578   Register input = ToRegister(instr->value());
   3579   Register result = ToRegister(instr->result());
   3580   Register scratch = scratch0();
   3581 
   3582   // Deoptimize if not a heap number.
   3583   __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3584   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   3585   DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
   3586 
   3587   Label done;
   3588   Register exponent = scratch0();
   3589   scratch = no_reg;
   3590   __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3591   // Check the sign of the argument. If the argument is positive, just
   3592   // return it.
   3593   __ Move(result, input);
   3594   __ And(at, exponent, Operand(HeapNumber::kSignMask));
   3595   __ Branch(&done, eq, at, Operand(zero_reg));
   3596 
   3597   // Input is negative. Reverse its sign.
   3598   // Preserve the value of all registers.
   3599   {
   3600     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   3601 
   3602     // Registers were saved at the safepoint, so we can use
   3603     // many scratch registers.
   3604     Register tmp1 = input.is(a1) ? a0 : a1;
   3605     Register tmp2 = input.is(a2) ? a0 : a2;
   3606     Register tmp3 = input.is(a3) ? a0 : a3;
   3607     Register tmp4 = input.is(t0) ? a0 : t0;
   3608 
   3609     // exponent: floating point exponent value.
   3610 
   3611     Label allocated, slow;
   3612     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3613     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3614     __ Branch(&allocated);
   3615 
   3616     // Slow case: Call the runtime system to do the number allocation.
   3617     __ bind(&slow);
   3618 
   3619     CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
   3620                             instr->context());
   3621     // Set the pointer to the new heap number in tmp.
   3622     if (!tmp1.is(v0))
   3623       __ mov(tmp1, v0);
   3624     // Restore input_reg after call to runtime.
   3625     __ LoadFromSafepointRegisterSlot(input, input);
   3626     __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3627 
   3628     __ bind(&allocated);
   3629     // exponent: floating point exponent value.
   3630     // tmp1: allocated heap number.
   3631     __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
   3632     __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3633     __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3634     __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3635 
   3636     __ StoreToSafepointRegisterSlot(tmp1, result);
   3637   }
   3638 
   3639   __ bind(&done);
   3640 }
   3641 
   3642 
   3643 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3644   Register input = ToRegister(instr->value());
   3645   Register result = ToRegister(instr->result());
   3646   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   3647   Label done;
   3648   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
   3649   __ mov(result, input);
   3650   __ subu(result, zero_reg, input);
   3651   // Overflow if result is still negative, i.e. 0x80000000.
   3652   DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
   3653   __ bind(&done);
   3654 }
   3655 
   3656 
   3657 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3658   // Class for deferred case.
   3659   class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
   3660    public:
   3661     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3662         : LDeferredCode(codegen), instr_(instr) { }
   3663     virtual void Generate() V8_OVERRIDE {
   3664       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3665     }
   3666     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   3667    private:
   3668     LMathAbs* instr_;
   3669   };
   3670 
   3671   Representation r = instr->hydrogen()->value()->representation();
   3672   if (r.IsDouble()) {
   3673     FPURegister input = ToDoubleRegister(instr->value());
   3674     FPURegister result = ToDoubleRegister(instr->result());
   3675     __ abs_d(result, input);
   3676   } else if (r.IsSmiOrInteger32()) {
   3677     EmitIntegerMathAbs(instr);
   3678   } else {
   3679     // Representation is tagged.
   3680     DeferredMathAbsTaggedHeapNumber* deferred =
   3681         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3682     Register input = ToRegister(instr->value());
   3683     // Smi check.
   3684     __ JumpIfNotSmi(input, deferred->entry());
   3685     // If smi, handle it directly.
   3686     EmitIntegerMathAbs(instr);
   3687     __ bind(deferred->exit());
   3688   }
   3689 }
   3690 
   3691 
   3692 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3693   DoubleRegister input = ToDoubleRegister(instr->value());
   3694   Register result = ToRegister(instr->result());
   3695   Register scratch1 = scratch0();
   3696   Register except_flag = ToRegister(instr->temp());
   3697 
   3698   __ EmitFPUTruncate(kRoundToMinusInf,
   3699                      result,
   3700                      input,
   3701                      scratch1,
   3702                      double_scratch0(),
   3703                      except_flag);
   3704 
   3705   // Deopt if the operation did not succeed.
   3706   DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   3707 
   3708   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3709     // Test for -0.
   3710     Label done;
   3711     __ Branch(&done, ne, result, Operand(zero_reg));
   3712     __ mfc1(scratch1, input.high());
   3713     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   3714     DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
   3715     __ bind(&done);
   3716   }
   3717 }
   3718 
   3719 
   3720 void LCodeGen::DoMathRound(LMathRound* instr) {
   3721   DoubleRegister input = ToDoubleRegister(instr->value());
   3722   Register result = ToRegister(instr->result());
   3723   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3724   Register scratch = scratch0();
   3725   Label done, check_sign_on_zero;
   3726 
   3727   // Extract exponent bits.
   3728   __ mfc1(result, input.high());
   3729   __ Ext(scratch,
   3730          result,
   3731          HeapNumber::kExponentShift,
   3732          HeapNumber::kExponentBits);
   3733 
   3734   // If the number is in ]-0.5, +0.5[, the result is +/- 0.
   3735   Label skip1;
   3736   __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
   3737   __ mov(result, zero_reg);
   3738   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3739     __ Branch(&check_sign_on_zero);
   3740   } else {
   3741     __ Branch(&done);
   3742   }
   3743   __ bind(&skip1);
   3744 
   3745   // The following conversion will not work with numbers
   3746   // outside of ]-2^32, 2^32[.
   3747   DeoptimizeIf(ge, instr->environment(), scratch,
   3748                Operand(HeapNumber::kExponentBias + 32));
   3749 
   3750   // Save the original sign for later comparison.
   3751   __ And(scratch, result, Operand(HeapNumber::kSignMask));
   3752 
   3753   __ Move(double_scratch0(), 0.5);
   3754   __ add_d(double_scratch0(), input, double_scratch0());
   3755 
   3756   // Check sign of the result: if the sign changed, the input
   3757   // value was in ]0.5, 0[ and the result should be -0.
   3758   __ mfc1(result, double_scratch0().high());
   3759   __ Xor(result, result, Operand(scratch));
   3760   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3761     // ARM uses 'mi' here, which is 'lt'
   3762     DeoptimizeIf(lt, instr->environment(), result,
   3763                  Operand(zero_reg));
   3764   } else {
   3765     Label skip2;
   3766     // ARM uses 'mi' here, which is 'lt'
   3767     // Negating it results in 'ge'
   3768     __ Branch(&skip2, ge, result, Operand(zero_reg));
   3769     __ mov(result, zero_reg);
   3770     __ Branch(&done);
   3771     __ bind(&skip2);
   3772   }
   3773 
   3774   Register except_flag = scratch;
   3775   __ EmitFPUTruncate(kRoundToMinusInf,
   3776                      result,
   3777                      double_scratch0(),
   3778                      at,
   3779                      double_scratch1,
   3780                      except_flag);
   3781 
   3782   DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   3783 
   3784   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3785     // Test for -0.
   3786     __ Branch(&done, ne, result, Operand(zero_reg));
   3787     __ bind(&check_sign_on_zero);
   3788     __ mfc1(scratch, input.high());
   3789     __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
   3790     DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
   3791   }
   3792   __ bind(&done);
   3793 }
   3794 
   3795 
   3796 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3797   DoubleRegister input = ToDoubleRegister(instr->value());
   3798   DoubleRegister result = ToDoubleRegister(instr->result());
   3799   __ sqrt_d(result, input);
   3800 }
   3801 
   3802 
   3803 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3804   DoubleRegister input = ToDoubleRegister(instr->value());
   3805   DoubleRegister result = ToDoubleRegister(instr->result());
   3806   DoubleRegister temp = ToDoubleRegister(instr->temp());
   3807 
   3808   ASSERT(!input.is(result));
   3809 
   3810   // Note that according to ECMA-262 15.8.2.13:
   3811   // Math.pow(-Infinity, 0.5) == Infinity
   3812   // Math.sqrt(-Infinity) == NaN
   3813   Label done;
   3814   __ Move(temp, -V8_INFINITY);
   3815   __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
   3816   // Set up Infinity in the delay slot.
   3817   // result is overwritten if the branch is not taken.
   3818   __ neg_d(result, temp);
   3819 
   3820   // Add +0 to convert -0 to +0.
   3821   __ add_d(result, input, kDoubleRegZero);
   3822   __ sqrt_d(result, result);
   3823   __ bind(&done);
   3824 }
   3825 
   3826 
   3827 void LCodeGen::DoPower(LPower* instr) {
   3828   Representation exponent_type = instr->hydrogen()->right()->representation();
   3829   // Having marked this as a call, we can use any registers.
   3830   // Just make sure that the input/output registers are the expected ones.
   3831   ASSERT(!instr->right()->IsDoubleRegister() ||
   3832          ToDoubleRegister(instr->right()).is(f4));
   3833   ASSERT(!instr->right()->IsRegister() ||
   3834          ToRegister(instr->right()).is(a2));
   3835   ASSERT(ToDoubleRegister(instr->left()).is(f2));
   3836   ASSERT(ToDoubleRegister(instr->result()).is(f0));
   3837 
   3838   if (exponent_type.IsSmi()) {
   3839     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3840     __ CallStub(&stub);
   3841   } else if (exponent_type.IsTagged()) {
   3842     Label no_deopt;
   3843     __ JumpIfSmi(a2, &no_deopt);
   3844     __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
   3845     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   3846     DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
   3847     __ bind(&no_deopt);
   3848     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3849     __ CallStub(&stub);
   3850   } else if (exponent_type.IsInteger32()) {
   3851     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3852     __ CallStub(&stub);
   3853   } else {
   3854     ASSERT(exponent_type.IsDouble());
   3855     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3856     __ CallStub(&stub);
   3857   }
   3858 }
   3859 
   3860 
   3861 void LCodeGen::DoMathExp(LMathExp* instr) {
   3862   DoubleRegister input = ToDoubleRegister(instr->value());
   3863   DoubleRegister result = ToDoubleRegister(instr->result());
   3864   DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
   3865   DoubleRegister double_scratch2 = double_scratch0();
   3866   Register temp1 = ToRegister(instr->temp1());
   3867   Register temp2 = ToRegister(instr->temp2());
   3868 
   3869   MathExpGenerator::EmitMathExp(
   3870       masm(), input, result, double_scratch1, double_scratch2,
   3871       temp1, temp2, scratch0());
   3872 }
   3873 
   3874 
   3875 void LCodeGen::DoMathLog(LMathLog* instr) {
   3876   __ PrepareCallCFunction(0, 1, scratch0());
   3877   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3878   __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
   3879                    0, 1);
   3880   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3881 }
   3882 
   3883 
   3884 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3885   Register input = ToRegister(instr->value());
   3886   Register result = ToRegister(instr->result());
   3887   __ Clz(result, input);
   3888 }
   3889 
   3890 
   3891 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3892   ASSERT(ToRegister(instr->context()).is(cp));
   3893   ASSERT(ToRegister(instr->function()).is(a1));
   3894   ASSERT(instr->HasPointerMap());
   3895 
   3896   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   3897   if (known_function.is_null()) {
   3898     LPointerMap* pointers = instr->pointer_map();
   3899     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3900     ParameterCount count(instr->arity());
   3901     __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
   3902   } else {
   3903     CallKnownFunction(known_function,
   3904                       instr->hydrogen()->formal_parameter_count(),
   3905                       instr->arity(),
   3906                       instr,
   3907                       A1_CONTAINS_TARGET);
   3908   }
   3909 }
   3910 
   3911 
   3912 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3913   ASSERT(ToRegister(instr->result()).is(v0));
   3914 
   3915   LPointerMap* pointers = instr->pointer_map();
   3916   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3917 
   3918   if (instr->target()->IsConstantOperand()) {
   3919     LConstantOperand* target = LConstantOperand::cast(instr->target());
   3920     Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3921     generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   3922     __ Call(code, RelocInfo::CODE_TARGET);
   3923   } else {
   3924     ASSERT(instr->target()->IsRegister());
   3925     Register target = ToRegister(instr->target());
   3926     generator.BeforeCall(__ CallSize(target));
   3927     __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3928     __ Call(target);
   3929   }
   3930   generator.AfterCall();
   3931 }
   3932 
   3933 
   3934 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
   3935   ASSERT(ToRegister(instr->function()).is(a1));
   3936   ASSERT(ToRegister(instr->result()).is(v0));
   3937 
   3938   if (instr->hydrogen()->pass_argument_count()) {
   3939     __ li(a0, Operand(instr->arity()));
   3940   }
   3941 
   3942   // Change context.
   3943   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
   3944 
   3945   // Load the code entry address
   3946   __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
   3947   __ Call(at);
   3948 
   3949   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3950 }
   3951 
   3952 
   3953 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   3954   ASSERT(ToRegister(instr->context()).is(cp));
   3955   ASSERT(ToRegister(instr->function()).is(a1));
   3956   ASSERT(ToRegister(instr->result()).is(v0));
   3957 
   3958   int arity = instr->arity();
   3959   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
   3960   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3961 }
   3962 
   3963 
   3964 void LCodeGen::DoCallNew(LCallNew* instr) {
   3965   ASSERT(ToRegister(instr->context()).is(cp));
   3966   ASSERT(ToRegister(instr->constructor()).is(a1));
   3967   ASSERT(ToRegister(instr->result()).is(v0));
   3968 
   3969   __ li(a0, Operand(instr->arity()));
   3970   // No cell in a2 for construct type feedback in optimized code
   3971   __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
   3972   CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
   3973   CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   3974 }
   3975 
   3976 
   3977 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3978   ASSERT(ToRegister(instr->context()).is(cp));
   3979   ASSERT(ToRegister(instr->constructor()).is(a1));
   3980   ASSERT(ToRegister(instr->result()).is(v0));
   3981 
   3982   __ li(a0, Operand(instr->arity()));
   3983   __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
   3984   ElementsKind kind = instr->hydrogen()->elements_kind();
   3985   AllocationSiteOverrideMode override_mode =
   3986       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3987           ? DISABLE_ALLOCATION_SITES
   3988           : DONT_OVERRIDE;
   3989 
   3990   if (instr->arity() == 0) {
   3991     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3992     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   3993   } else if (instr->arity() == 1) {
   3994     Label done;
   3995     if (IsFastPackedElementsKind(kind)) {
   3996       Label packed_case;
   3997       // We might need a change here,
   3998       // look at the first argument.
   3999       __ lw(t1, MemOperand(sp, 0));
   4000       __ Branch(&packed_case, eq, t1, Operand(zero_reg));
   4001 
   4002       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   4003       ArraySingleArgumentConstructorStub stub(isolate(),
   4004                                               holey_kind,
   4005                                               override_mode);
   4006       CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4007       __ jmp(&done);
   4008       __ bind(&packed_case);
   4009     }
   4010 
   4011     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   4012     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4013     __ bind(&done);
   4014   } else {
   4015     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
   4016     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4017   }
   4018 }
   4019 
   4020 
   4021 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   4022   CallRuntime(instr->function(), instr->arity(), instr);
   4023 }
   4024 
   4025 
   4026 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   4027   Register function = ToRegister(instr->function());
   4028   Register code_object = ToRegister(instr->code_object());
   4029   __ Addu(code_object, code_object,
   4030           Operand(Code::kHeaderSize - kHeapObjectTag));
   4031   __ sw(code_object,
   4032         FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   4033 }
   4034 
   4035 
   4036 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   4037   Register result = ToRegister(instr->result());
   4038   Register base = ToRegister(instr->base_object());
   4039   if (instr->offset()->IsConstantOperand()) {
   4040     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   4041     __ Addu(result, base, Operand(ToInteger32(offset)));
   4042   } else {
   4043     Register offset = ToRegister(instr->offset());
   4044     __ Addu(result, base, offset);
   4045   }
   4046 }
   4047 
   4048 
   4049 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   4050   Representation representation = instr->representation();
   4051 
   4052   Register object = ToRegister(instr->object());
   4053   Register scratch = scratch0();
   4054   HObjectAccess access = instr->hydrogen()->access();
   4055   int offset = access.offset();
   4056 
   4057   if (access.IsExternalMemory()) {
   4058     Register value = ToRegister(instr->value());
   4059     MemOperand operand = MemOperand(object, offset);
   4060     __ Store(value, operand, representation);
   4061     return;
   4062   }
   4063 
   4064   __ AssertNotSmi(object);
   4065 
   4066   ASSERT(!representation.IsSmi() ||
   4067          !instr->value()->IsConstantOperand() ||
   4068          IsSmi(LConstantOperand::cast(instr->value())));
   4069   if (representation.IsDouble()) {
   4070     ASSERT(access.IsInobject());
   4071     ASSERT(!instr->hydrogen()->has_transition());
   4072     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4073     DoubleRegister value = ToDoubleRegister(instr->value());
   4074     __ sdc1(value, FieldMemOperand(object, offset));
   4075     return;
   4076   }
   4077 
   4078   if (instr->hydrogen()->has_transition()) {
   4079     Handle<Map> transition = instr->hydrogen()->transition_map();
   4080     AddDeprecationDependency(transition);
   4081     __ li(scratch, Operand(transition));
   4082     __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   4083     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   4084       Register temp = ToRegister(instr->temp());
   4085       // Update the write barrier for the map field.
   4086       __ RecordWriteForMap(object,
   4087                            scratch,
   4088                            temp,
   4089                            GetRAState(),
   4090                            kSaveFPRegs);
   4091     }
   4092   }
   4093 
   4094   // Do the store.
   4095   Register value = ToRegister(instr->value());
   4096   if (access.IsInobject()) {
   4097     MemOperand operand = FieldMemOperand(object, offset);
   4098     __ Store(value, operand, representation);
   4099     if (instr->hydrogen()->NeedsWriteBarrier()) {
   4100       // Update the write barrier for the object for in-object properties.
   4101       __ RecordWriteField(object,
   4102                           offset,
   4103                           value,
   4104                           scratch,
   4105                           GetRAState(),
   4106                           kSaveFPRegs,
   4107                           EMIT_REMEMBERED_SET,
   4108                           instr->hydrogen()->SmiCheckForWriteBarrier(),
   4109                           instr->hydrogen()->PointersToHereCheckForValue());
   4110     }
   4111   } else {
   4112     __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
   4113     MemOperand operand = FieldMemOperand(scratch, offset);
   4114     __ Store(value, operand, representation);
   4115     if (instr->hydrogen()->NeedsWriteBarrier()) {
   4116       // Update the write barrier for the properties array.
   4117       // object is used as a scratch register.
   4118       __ RecordWriteField(scratch,
   4119                           offset,
   4120                           value,
   4121                           object,
   4122                           GetRAState(),
   4123                           kSaveFPRegs,
   4124                           EMIT_REMEMBERED_SET,
   4125                           instr->hydrogen()->SmiCheckForWriteBarrier(),
   4126                           instr->hydrogen()->PointersToHereCheckForValue());
   4127     }
   4128   }
   4129 }
   4130 
   4131 
   4132 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   4133   ASSERT(ToRegister(instr->context()).is(cp));
   4134   ASSERT(ToRegister(instr->object()).is(a1));
   4135   ASSERT(ToRegister(instr->value()).is(a0));
   4136 
   4137   // Name is always in a2.
   4138   __ li(a2, Operand(instr->name()));
   4139   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   4140   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4141 }
   4142 
   4143 
   4144 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   4145   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
   4146   Operand operand(0);
   4147   Register reg;
   4148   if (instr->index()->IsConstantOperand()) {
   4149     operand = ToOperand(instr->index());
   4150     reg = ToRegister(instr->length());
   4151     cc = CommuteCondition(cc);
   4152   } else {
   4153     reg = ToRegister(instr->index());
   4154     operand = ToOperand(instr->length());
   4155   }
   4156   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   4157     Label done;
   4158     __ Branch(&done, NegateCondition(cc), reg, operand);
   4159     __ stop("eliminated bounds check failed");
   4160     __ bind(&done);
   4161   } else {
   4162     DeoptimizeIf(cc, instr->environment(), reg, operand);
   4163   }
   4164 }
   4165 
   4166 
   4167 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   4168   Register external_pointer = ToRegister(instr->elements());
   4169   Register key = no_reg;
   4170   ElementsKind elements_kind = instr->elements_kind();
   4171   bool key_is_constant = instr->key()->IsConstantOperand();
   4172   int constant_key = 0;
   4173   if (key_is_constant) {
   4174     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4175     if (constant_key & 0xF0000000) {
   4176       Abort(kArrayIndexConstantValueTooBig);
   4177     }
   4178   } else {
   4179     key = ToRegister(instr->key());
   4180   }
   4181   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   4182   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4183       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4184   int base_offset = instr->base_offset();
   4185 
   4186   if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   4187       elements_kind == FLOAT32_ELEMENTS ||
   4188       elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
   4189       elements_kind == FLOAT64_ELEMENTS) {
   4190     Register address = scratch0();
   4191     FPURegister value(ToDoubleRegister(instr->value()));
   4192     if (key_is_constant) {
   4193       if (constant_key != 0) {
   4194         __ Addu(address, external_pointer,
   4195                 Operand(constant_key << element_size_shift));
   4196       } else {
   4197         address = external_pointer;
   4198       }
   4199     } else {
   4200       __ sll(address, key, shift_size);
   4201       __ Addu(address, external_pointer, address);
   4202     }
   4203 
   4204     if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   4205         elements_kind == FLOAT32_ELEMENTS) {
   4206       __ cvt_s_d(double_scratch0(), value);
   4207       __ swc1(double_scratch0(), MemOperand(address, base_offset));
   4208     } else {  // Storing doubles, not floats.
   4209       __ sdc1(value, MemOperand(address, base_offset));
   4210     }
   4211   } else {
   4212     Register value(ToRegister(instr->value()));
   4213     MemOperand mem_operand = PrepareKeyedOperand(
   4214         key, external_pointer, key_is_constant, constant_key,
   4215         element_size_shift, shift_size,
   4216         base_offset);
   4217     switch (elements_kind) {
   4218       case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
   4219       case EXTERNAL_INT8_ELEMENTS:
   4220       case EXTERNAL_UINT8_ELEMENTS:
   4221       case UINT8_ELEMENTS:
   4222       case UINT8_CLAMPED_ELEMENTS:
   4223       case INT8_ELEMENTS:
   4224         __ sb(value, mem_operand);
   4225         break;
   4226       case EXTERNAL_INT16_ELEMENTS:
   4227       case EXTERNAL_UINT16_ELEMENTS:
   4228       case INT16_ELEMENTS:
   4229       case UINT16_ELEMENTS:
   4230         __ sh(value, mem_operand);
   4231         break;
   4232       case EXTERNAL_INT32_ELEMENTS:
   4233       case EXTERNAL_UINT32_ELEMENTS:
   4234       case INT32_ELEMENTS:
   4235       case UINT32_ELEMENTS:
   4236         __ sw(value, mem_operand);
   4237         break;
   4238       case FLOAT32_ELEMENTS:
   4239       case FLOAT64_ELEMENTS:
   4240       case EXTERNAL_FLOAT32_ELEMENTS:
   4241       case EXTERNAL_FLOAT64_ELEMENTS:
   4242       case FAST_DOUBLE_ELEMENTS:
   4243       case FAST_ELEMENTS:
   4244       case FAST_SMI_ELEMENTS:
   4245       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4246       case FAST_HOLEY_ELEMENTS:
   4247       case FAST_HOLEY_SMI_ELEMENTS:
   4248       case DICTIONARY_ELEMENTS:
   4249       case SLOPPY_ARGUMENTS_ELEMENTS:
   4250         UNREACHABLE();
   4251         break;
   4252     }
   4253   }
   4254 }
   4255 
   4256 
   4257 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4258   DoubleRegister value = ToDoubleRegister(instr->value());
   4259   Register elements = ToRegister(instr->elements());
   4260   Register scratch = scratch0();
   4261   DoubleRegister double_scratch = double_scratch0();
   4262   bool key_is_constant = instr->key()->IsConstantOperand();
   4263   int base_offset = instr->base_offset();
   4264   Label not_nan, done;
   4265 
   4266   // Calculate the effective address of the slot in the array to store the
   4267   // double value.
   4268   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4269   if (key_is_constant) {
   4270     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4271     if (constant_key & 0xF0000000) {
   4272       Abort(kArrayIndexConstantValueTooBig);
   4273     }
   4274     __ Addu(scratch, elements,
   4275            Operand((constant_key << element_size_shift) + base_offset));
   4276   } else {
   4277     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4278         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4279     __ Addu(scratch, elements, Operand(base_offset));
   4280     __ sll(at, ToRegister(instr->key()), shift_size);
   4281     __ Addu(scratch, scratch, at);
   4282   }
   4283 
   4284   if (instr->NeedsCanonicalization()) {
   4285     Label is_nan;
   4286     // Check for NaN. All NaNs must be canonicalized.
   4287     __ BranchF(NULL, &is_nan, eq, value, value);
   4288     __ Branch(&not_nan);
   4289 
   4290     // Only load canonical NaN if the comparison above set the overflow.
   4291     __ bind(&is_nan);
   4292     __ LoadRoot(at, Heap::kNanValueRootIndex);
   4293     __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
   4294     __ sdc1(double_scratch, MemOperand(scratch, 0));
   4295     __ Branch(&done);
   4296   }
   4297 
   4298   __ bind(&not_nan);
   4299   __ sdc1(value, MemOperand(scratch, 0));
   4300   __ bind(&done);
   4301 }
   4302 
   4303 
   4304 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4305   Register value = ToRegister(instr->value());
   4306   Register elements = ToRegister(instr->elements());
   4307   Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
   4308       : no_reg;
   4309   Register scratch = scratch0();
   4310   Register store_base = scratch;
   4311   int offset = instr->base_offset();
   4312 
   4313   // Do the store.
   4314   if (instr->key()->IsConstantOperand()) {
   4315     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4316     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4317     offset += ToInteger32(const_operand) * kPointerSize;
   4318     store_base = elements;
   4319   } else {
   4320     // Even though the HLoadKeyed instruction forces the input
   4321     // representation for the key to be an integer, the input gets replaced
   4322     // during bound check elimination with the index argument to the bounds
   4323     // check, which can be tagged, so that case must be handled here, too.
   4324     if (instr->hydrogen()->key()->representation().IsSmi()) {
   4325       __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
   4326       __ addu(scratch, elements, scratch);
   4327     } else {
   4328       __ sll(scratch, key, kPointerSizeLog2);
   4329       __ addu(scratch, elements, scratch);
   4330     }
   4331   }
   4332   __ sw(value, MemOperand(store_base, offset));
   4333 
   4334   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4335     SmiCheck check_needed =
   4336         instr->hydrogen()->value()->type().IsHeapObject()
   4337             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4338     // Compute address of modified element and store it into key register.
   4339     __ Addu(key, store_base, Operand(offset));
   4340     __ RecordWrite(elements,
   4341                    key,
   4342                    value,
   4343                    GetRAState(),
   4344                    kSaveFPRegs,
   4345                    EMIT_REMEMBERED_SET,
   4346                    check_needed,
   4347                    instr->hydrogen()->PointersToHereCheckForValue());
   4348   }
   4349 }
   4350 
   4351 
   4352 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4353   // By cases: external, fast double
   4354   if (instr->is_typed_elements()) {
   4355     DoStoreKeyedExternalArray(instr);
   4356   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4357     DoStoreKeyedFixedDoubleArray(instr);
   4358   } else {
   4359     DoStoreKeyedFixedArray(instr);
   4360   }
   4361 }
   4362 
   4363 
   4364 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4365   ASSERT(ToRegister(instr->context()).is(cp));
   4366   ASSERT(ToRegister(instr->object()).is(a2));
   4367   ASSERT(ToRegister(instr->key()).is(a1));
   4368   ASSERT(ToRegister(instr->value()).is(a0));
   4369 
   4370   Handle<Code> ic = (instr->strict_mode() == STRICT)
   4371       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
   4372       : isolate()->builtins()->KeyedStoreIC_Initialize();
   4373   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4374 }
   4375 
   4376 
   4377 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4378   Register object_reg = ToRegister(instr->object());
   4379   Register scratch = scratch0();
   4380 
   4381   Handle<Map> from_map = instr->original_map();
   4382   Handle<Map> to_map = instr->transitioned_map();
   4383   ElementsKind from_kind = instr->from_kind();
   4384   ElementsKind to_kind = instr->to_kind();
   4385 
   4386   Label not_applicable;
   4387   __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4388   __ Branch(&not_applicable, ne, scratch, Operand(from_map));
   4389 
   4390   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4391     Register new_map_reg = ToRegister(instr->new_map_temp());
   4392     __ li(new_map_reg, Operand(to_map));
   4393     __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4394     // Write barrier.
   4395     __ RecordWriteForMap(object_reg,
   4396                          new_map_reg,
   4397                          scratch,
   4398                          GetRAState(),
   4399                          kDontSaveFPRegs);
   4400   } else {
   4401     ASSERT(object_reg.is(a0));
   4402     ASSERT(ToRegister(instr->context()).is(cp));
   4403     PushSafepointRegistersScope scope(
   4404         this, Safepoint::kWithRegistersAndDoubles);
   4405     __ li(a1, Operand(to_map));
   4406     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
   4407     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
   4408     __ CallStub(&stub);
   4409     RecordSafepointWithRegistersAndDoubles(
   4410         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   4411   }
   4412   __ bind(&not_applicable);
   4413 }
   4414 
   4415 
   4416 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4417   Register object = ToRegister(instr->object());
   4418   Register temp = ToRegister(instr->temp());
   4419   Label no_memento_found;
   4420   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
   4421                                      ne, &no_memento_found);
   4422   DeoptimizeIf(al, instr->environment());
   4423   __ bind(&no_memento_found);
   4424 }
   4425 
   4426 
   4427 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4428   ASSERT(ToRegister(instr->context()).is(cp));
   4429   ASSERT(ToRegister(instr->left()).is(a1));
   4430   ASSERT(ToRegister(instr->right()).is(a0));
   4431   StringAddStub stub(isolate(),
   4432                      instr->hydrogen()->flags(),
   4433                      instr->hydrogen()->pretenure_flag());
   4434   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4435 }
   4436 
   4437 
   4438 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4439   class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
   4440    public:
   4441     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4442         : LDeferredCode(codegen), instr_(instr) { }
   4443     virtual void Generate() V8_OVERRIDE {
   4444       codegen()->DoDeferredStringCharCodeAt(instr_);
   4445     }
   4446     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4447    private:
   4448     LStringCharCodeAt* instr_;
   4449   };
   4450 
   4451   DeferredStringCharCodeAt* deferred =
   4452       new(zone()) DeferredStringCharCodeAt(this, instr);
   4453   StringCharLoadGenerator::Generate(masm(),
   4454                                     ToRegister(instr->string()),
   4455                                     ToRegister(instr->index()),
   4456                                     ToRegister(instr->result()),
   4457                                     deferred->entry());
   4458   __ bind(deferred->exit());
   4459 }
   4460 
   4461 
   4462 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4463   Register string = ToRegister(instr->string());
   4464   Register result = ToRegister(instr->result());
   4465   Register scratch = scratch0();
   4466 
   4467   // TODO(3095996): Get rid of this. For now, we need to make the
   4468   // result register contain a valid pointer because it is already
   4469   // contained in the register pointer map.
   4470   __ mov(result, zero_reg);
   4471 
   4472   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4473   __ push(string);
   4474   // Push the index as a smi. This is safe because of the checks in
   4475   // DoStringCharCodeAt above.
   4476   if (instr->index()->IsConstantOperand()) {
   4477     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4478     __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
   4479     __ push(scratch);
   4480   } else {
   4481     Register index = ToRegister(instr->index());
   4482     __ SmiTag(index);
   4483     __ push(index);
   4484   }
   4485   CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
   4486                           instr->context());
   4487   __ AssertSmi(v0);
   4488   __ SmiUntag(v0);
   4489   __ StoreToSafepointRegisterSlot(v0, result);
   4490 }
   4491 
   4492 
   4493 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4494   class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
   4495    public:
   4496     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4497         : LDeferredCode(codegen), instr_(instr) { }
   4498     virtual void Generate() V8_OVERRIDE {
   4499       codegen()->DoDeferredStringCharFromCode(instr_);
   4500     }
   4501     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4502    private:
   4503     LStringCharFromCode* instr_;
   4504   };
   4505 
   4506   DeferredStringCharFromCode* deferred =
   4507       new(zone()) DeferredStringCharFromCode(this, instr);
   4508 
   4509   ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
   4510   Register char_code = ToRegister(instr->char_code());
   4511   Register result = ToRegister(instr->result());
   4512   Register scratch = scratch0();
   4513   ASSERT(!char_code.is(result));
   4514 
   4515   __ Branch(deferred->entry(), hi,
   4516             char_code, Operand(String::kMaxOneByteCharCode));
   4517   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4518   __ sll(scratch, char_code, kPointerSizeLog2);
   4519   __ Addu(result, result, scratch);
   4520   __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4521   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   4522   __ Branch(deferred->entry(), eq, result, Operand(scratch));
   4523   __ bind(deferred->exit());
   4524 }
   4525 
   4526 
   4527 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4528   Register char_code = ToRegister(instr->char_code());
   4529   Register result = ToRegister(instr->result());
   4530 
   4531   // TODO(3095996): Get rid of this. For now, we need to make the
   4532   // result register contain a valid pointer because it is already
   4533   // contained in the register pointer map.
   4534   __ mov(result, zero_reg);
   4535 
   4536   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4537   __ SmiTag(char_code);
   4538   __ push(char_code);
   4539   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
   4540   __ StoreToSafepointRegisterSlot(v0, result);
   4541 }
   4542 
   4543 
   4544 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4545   LOperand* input = instr->value();
   4546   ASSERT(input->IsRegister() || input->IsStackSlot());
   4547   LOperand* output = instr->result();
   4548   ASSERT(output->IsDoubleRegister());
   4549   FPURegister single_scratch = double_scratch0().low();
   4550   if (input->IsStackSlot()) {
   4551     Register scratch = scratch0();
   4552     __ lw(scratch, ToMemOperand(input));
   4553     __ mtc1(scratch, single_scratch);
   4554   } else {
   4555     __ mtc1(ToRegister(input), single_scratch);
   4556   }
   4557   __ cvt_d_w(ToDoubleRegister(output), single_scratch);
   4558 }
   4559 
   4560 
   4561 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4562   LOperand* input = instr->value();
   4563   LOperand* output = instr->result();
   4564 
   4565   FPURegister dbl_scratch = double_scratch0();
   4566   __ mtc1(ToRegister(input), dbl_scratch);
   4567   __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
   4568 }
   4569 
   4570 
   4571 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4572   class DeferredNumberTagI V8_FINAL : public LDeferredCode {
   4573    public:
   4574     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4575         : LDeferredCode(codegen), instr_(instr) { }
   4576     virtual void Generate() V8_OVERRIDE {
   4577       codegen()->DoDeferredNumberTagIU(instr_,
   4578                                        instr_->value(),
   4579                                        instr_->temp1(),
   4580                                        instr_->temp2(),
   4581                                        SIGNED_INT32);
   4582     }
   4583     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4584    private:
   4585     LNumberTagI* instr_;
   4586   };
   4587 
   4588   Register src = ToRegister(instr->value());
   4589   Register dst = ToRegister(instr->result());
   4590   Register overflow = scratch0();
   4591 
   4592   DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
   4593   __ SmiTagCheckOverflow(dst, src, overflow);
   4594   __ BranchOnOverflow(deferred->entry(), overflow);
   4595   __ bind(deferred->exit());
   4596 }
   4597 
   4598 
   4599 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4600   class DeferredNumberTagU V8_FINAL : public LDeferredCode {
   4601    public:
   4602     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4603         : LDeferredCode(codegen), instr_(instr) { }
   4604     virtual void Generate() V8_OVERRIDE {
   4605       codegen()->DoDeferredNumberTagIU(instr_,
   4606                                        instr_->value(),
   4607                                        instr_->temp1(),
   4608                                        instr_->temp2(),
   4609                                        UNSIGNED_INT32);
   4610     }
   4611     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4612    private:
   4613     LNumberTagU* instr_;
   4614   };
   4615 
   4616   Register input = ToRegister(instr->value());
   4617   Register result = ToRegister(instr->result());
   4618 
   4619   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4620   __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
   4621   __ SmiTag(result, input);
   4622   __ bind(deferred->exit());
   4623 }
   4624 
   4625 
   4626 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
   4627                                      LOperand* value,
   4628                                      LOperand* temp1,
   4629                                      LOperand* temp2,
   4630                                      IntegerSignedness signedness) {
   4631   Label done, slow;
   4632   Register src = ToRegister(value);
   4633   Register dst = ToRegister(instr->result());
   4634   Register tmp1 = scratch0();
   4635   Register tmp2 = ToRegister(temp1);
   4636   Register tmp3 = ToRegister(temp2);
   4637   DoubleRegister dbl_scratch = double_scratch0();
   4638 
   4639   if (signedness == SIGNED_INT32) {
   4640     // There was overflow, so bits 30 and 31 of the original integer
   4641     // disagree. Try to allocate a heap number in new space and store
   4642     // the value in there. If that fails, call the runtime system.
   4643     if (dst.is(src)) {
   4644       __ SmiUntag(src, dst);
   4645       __ Xor(src, src, Operand(0x80000000));
   4646     }
   4647     __ mtc1(src, dbl_scratch);
   4648     __ cvt_d_w(dbl_scratch, dbl_scratch);
   4649   } else {
   4650     __ mtc1(src, dbl_scratch);
   4651     __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
   4652   }
   4653 
   4654   if (FLAG_inline_new) {
   4655     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
   4656     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
   4657     __ Branch(&done);
   4658   }
   4659 
   4660   // Slow case: Call the runtime system to do the number allocation.
   4661   __ bind(&slow);
   4662   {
   4663     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4664     // result register is stored, as this register is in the pointer map, but
   4665     // contains an integer value.
   4666     __ mov(dst, zero_reg);
   4667 
   4668     // Preserve the value of all registers.
   4669     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4670 
   4671     // NumberTagI and NumberTagD use the context from the frame, rather than
   4672     // the environment's HContext or HInlinedContext value.
   4673     // They only call Runtime::kHiddenAllocateHeapNumber.
   4674     // The corresponding HChange instructions are added in a phase that does
   4675     // not have easy access to the local context.
   4676     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4677     __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
   4678     RecordSafepointWithRegisters(
   4679         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4680     __ Subu(v0, v0, kHeapObjectTag);
   4681     __ StoreToSafepointRegisterSlot(v0, dst);
   4682   }
   4683 
   4684 
   4685   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4686   // number.
   4687   __ bind(&done);
   4688   __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
   4689   __ Addu(dst, dst, kHeapObjectTag);
   4690 }
   4691 
   4692 
   4693 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4694   class DeferredNumberTagD V8_FINAL : public LDeferredCode {
   4695    public:
   4696     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4697         : LDeferredCode(codegen), instr_(instr) { }
   4698     virtual void Generate() V8_OVERRIDE {
   4699       codegen()->DoDeferredNumberTagD(instr_);
   4700     }
   4701     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4702    private:
   4703     LNumberTagD* instr_;
   4704   };
   4705 
   4706   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   4707   Register scratch = scratch0();
   4708   Register reg = ToRegister(instr->result());
   4709   Register temp1 = ToRegister(instr->temp());
   4710   Register temp2 = ToRegister(instr->temp2());
   4711 
   4712   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4713   if (FLAG_inline_new) {
   4714     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4715     // We want the untagged address first for performance
   4716     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
   4717                           DONT_TAG_RESULT);
   4718   } else {
   4719     __ Branch(deferred->entry());
   4720   }
   4721   __ bind(deferred->exit());
   4722   __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
   4723   // Now that we have finished with the object's real address tag it
   4724   __ Addu(reg, reg, kHeapObjectTag);
   4725 }
   4726 
   4727 
   4728 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4729   // TODO(3095996): Get rid of this. For now, we need to make the
   4730   // result register contain a valid pointer because it is already
   4731   // contained in the register pointer map.
   4732   Register reg = ToRegister(instr->result());
   4733   __ mov(reg, zero_reg);
   4734 
   4735   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4736   // NumberTagI and NumberTagD use the context from the frame, rather than
   4737   // the environment's HContext or HInlinedContext value.
   4738   // They only call Runtime::kHiddenAllocateHeapNumber.
   4739   // The corresponding HChange instructions are added in a phase that does
   4740   // not have easy access to the local context.
   4741   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4742   __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
   4743   RecordSafepointWithRegisters(
   4744       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4745   __ Subu(v0, v0, kHeapObjectTag);
   4746   __ StoreToSafepointRegisterSlot(v0, reg);
   4747 }
   4748 
   4749 
   4750 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4751   HChange* hchange = instr->hydrogen();
   4752   Register input = ToRegister(instr->value());
   4753   Register output = ToRegister(instr->result());
   4754   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4755       hchange->value()->CheckFlag(HValue::kUint32)) {
   4756     __ And(at, input, Operand(0xc0000000));
   4757     DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
   4758   }
   4759   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4760       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4761     __ SmiTagCheckOverflow(output, input, at);
   4762     DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
   4763   } else {
   4764     __ SmiTag(output, input);
   4765   }
   4766 }
   4767 
   4768 
   4769 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4770   Register scratch = scratch0();
   4771   Register input = ToRegister(instr->value());
   4772   Register result = ToRegister(instr->result());
   4773   if (instr->needs_check()) {
   4774     STATIC_ASSERT(kHeapObjectTag == 1);
   4775     // If the input is a HeapObject, value of scratch won't be zero.
   4776     __ And(scratch, input, Operand(kHeapObjectTag));
   4777     __ SmiUntag(result, input);
   4778     DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
   4779   } else {
   4780     __ SmiUntag(result, input);
   4781   }
   4782 }
   4783 
   4784 
   4785 void LCodeGen::EmitNumberUntagD(Register input_reg,
   4786                                 DoubleRegister result_reg,
   4787                                 bool can_convert_undefined_to_nan,
   4788                                 bool deoptimize_on_minus_zero,
   4789                                 LEnvironment* env,
   4790                                 NumberUntagDMode mode) {
   4791   Register scratch = scratch0();
   4792   Label convert, load_smi, done;
   4793   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4794     // Smi check.
   4795     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4796     // Heap number map check.
   4797     __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4798     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   4799     if (can_convert_undefined_to_nan) {
   4800       __ Branch(&convert, ne, scratch, Operand(at));
   4801     } else {
   4802       DeoptimizeIf(ne, env, scratch, Operand(at));
   4803     }
   4804     // Load heap number.
   4805     __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4806     if (deoptimize_on_minus_zero) {
   4807       __ mfc1(at, result_reg.low());
   4808       __ Branch(&done, ne, at, Operand(zero_reg));
   4809       __ mfc1(scratch, result_reg.high());
   4810       DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
   4811     }
   4812     __ Branch(&done);
   4813     if (can_convert_undefined_to_nan) {
   4814       __ bind(&convert);
   4815       // Convert undefined (and hole) to NaN.
   4816       __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   4817       DeoptimizeIf(ne, env, input_reg, Operand(at));
   4818       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4819       __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4820       __ Branch(&done);
   4821     }
   4822   } else {
   4823     __ SmiUntag(scratch, input_reg);
   4824     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   4825   }
   4826   // Smi to double register conversion
   4827   __ bind(&load_smi);
   4828   // scratch: untagged value of input_reg
   4829   __ mtc1(scratch, result_reg);
   4830   __ cvt_d_w(result_reg, result_reg);
   4831   __ bind(&done);
   4832 }
   4833 
   4834 
   4835 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4836   Register input_reg = ToRegister(instr->value());
   4837   Register scratch1 = scratch0();
   4838   Register scratch2 = ToRegister(instr->temp());
   4839   DoubleRegister double_scratch = double_scratch0();
   4840   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4841 
   4842   ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4843   ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4844 
   4845   Label done;
   4846 
   4847   // The input is a tagged HeapObject.
   4848   // Heap number map check.
   4849   __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4850   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   4851   // This 'at' value and scratch1 map value are used for tests in both clauses
   4852   // of the if.
   4853 
   4854   if (instr->truncating()) {
   4855     // Performs a truncating conversion of a floating point number as used by
   4856     // the JS bitwise operations.
   4857     Label no_heap_number, check_bools, check_false;
   4858     // Check HeapNumber map.
   4859     __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
   4860     __ mov(scratch2, input_reg);  // In delay slot.
   4861     __ TruncateHeapNumberToI(input_reg, scratch2);
   4862     __ Branch(&done);
   4863 
   4864     // Check for Oddballs. Undefined/False is converted to zero and True to one
   4865     // for truncating conversions.
   4866     __ bind(&no_heap_number);
   4867     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   4868     __ Branch(&check_bools, ne, input_reg, Operand(at));
   4869     ASSERT(ToRegister(instr->result()).is(input_reg));
   4870     __ Branch(USE_DELAY_SLOT, &done);
   4871     __ mov(input_reg, zero_reg);  // In delay slot.
   4872 
   4873     __ bind(&check_bools);
   4874     __ LoadRoot(at, Heap::kTrueValueRootIndex);
   4875     __ Branch(&check_false, ne, scratch2, Operand(at));
   4876     __ Branch(USE_DELAY_SLOT, &done);
   4877     __ li(input_reg, Operand(1));  // In delay slot.
   4878 
   4879     __ bind(&check_false);
   4880     __ LoadRoot(at, Heap::kFalseValueRootIndex);
   4881     DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
   4882     __ Branch(USE_DELAY_SLOT, &done);
   4883     __ mov(input_reg, zero_reg);  // In delay slot.
   4884   } else {
   4885     // Deoptimize if we don't have a heap number.
   4886     DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
   4887 
   4888     // Load the double value.
   4889     __ ldc1(double_scratch,
   4890             FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4891 
   4892     Register except_flag = scratch2;
   4893     __ EmitFPUTruncate(kRoundToZero,
   4894                        input_reg,
   4895                        double_scratch,
   4896                        scratch1,
   4897                        double_scratch2,
   4898                        except_flag,
   4899                        kCheckForInexactConversion);
   4900 
   4901     // Deopt if the operation did not succeed.
   4902     DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   4903 
   4904     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4905       __ Branch(&done, ne, input_reg, Operand(zero_reg));
   4906 
   4907       __ mfc1(scratch1, double_scratch.high());
   4908       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4909       DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
   4910     }
   4911   }
   4912   __ bind(&done);
   4913 }
   4914 
   4915 
   4916 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4917   class DeferredTaggedToI V8_FINAL : public LDeferredCode {
   4918    public:
   4919     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4920         : LDeferredCode(codegen), instr_(instr) { }
   4921     virtual void Generate() V8_OVERRIDE {
   4922       codegen()->DoDeferredTaggedToI(instr_);
   4923     }
   4924     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4925    private:
   4926     LTaggedToI* instr_;
   4927   };
   4928 
   4929   LOperand* input = instr->value();
   4930   ASSERT(input->IsRegister());
   4931   ASSERT(input->Equals(instr->result()));
   4932 
   4933   Register input_reg = ToRegister(input);
   4934 
   4935   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4936     __ SmiUntag(input_reg);
   4937   } else {
   4938     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   4939 
   4940     // Let the deferred code handle the HeapObject case.
   4941     __ JumpIfNotSmi(input_reg, deferred->entry());
   4942 
   4943     // Smi to int32 conversion.
   4944     __ SmiUntag(input_reg);
   4945     __ bind(deferred->exit());
   4946   }
   4947 }
   4948 
   4949 
   4950 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4951   LOperand* input = instr->value();
   4952   ASSERT(input->IsRegister());
   4953   LOperand* result = instr->result();
   4954   ASSERT(result->IsDoubleRegister());
   4955 
   4956   Register input_reg = ToRegister(input);
   4957   DoubleRegister result_reg = ToDoubleRegister(result);
   4958 
   4959   HValue* value = instr->hydrogen()->value();
   4960   NumberUntagDMode mode = value->representation().IsSmi()
   4961       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4962 
   4963   EmitNumberUntagD(input_reg, result_reg,
   4964                    instr->hydrogen()->can_convert_undefined_to_nan(),
   4965                    instr->hydrogen()->deoptimize_on_minus_zero(),
   4966                    instr->environment(),
   4967                    mode);
   4968 }
   4969 
   4970 
   4971 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4972   Register result_reg = ToRegister(instr->result());
   4973   Register scratch1 = scratch0();
   4974   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4975 
   4976   if (instr->truncating()) {
   4977     __ TruncateDoubleToI(result_reg, double_input);
   4978   } else {
   4979     Register except_flag = LCodeGen::scratch1();
   4980 
   4981     __ EmitFPUTruncate(kRoundToMinusInf,
   4982                        result_reg,
   4983                        double_input,
   4984                        scratch1,
   4985                        double_scratch0(),
   4986                        except_flag,
   4987                        kCheckForInexactConversion);
   4988 
   4989     // Deopt if the operation did not succeed (except_flag != 0).
   4990     DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   4991 
   4992     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4993       Label done;
   4994       __ Branch(&done, ne, result_reg, Operand(zero_reg));
   4995       __ mfc1(scratch1, double_input.high());
   4996       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4997       DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
   4998       __ bind(&done);
   4999     }
   5000   }
   5001 }
   5002 
   5003 
   5004 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   5005   Register result_reg = ToRegister(instr->result());
   5006   Register scratch1 = LCodeGen::scratch0();
   5007   DoubleRegister double_input = ToDoubleRegister(instr->value());
   5008 
   5009   if (instr->truncating()) {
   5010     __ TruncateDoubleToI(result_reg, double_input);
   5011   } else {
   5012     Register except_flag = LCodeGen::scratch1();
   5013 
   5014     __ EmitFPUTruncate(kRoundToMinusInf,
   5015                        result_reg,
   5016                        double_input,
   5017                        scratch1,
   5018                        double_scratch0(),
   5019                        except_flag,
   5020                        kCheckForInexactConversion);
   5021 
   5022     // Deopt if the operation did not succeed (except_flag != 0).
   5023     DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   5024 
   5025     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5026       Label done;
   5027       __ Branch(&done, ne, result_reg, Operand(zero_reg));
   5028       __ mfc1(scratch1, double_input.high());
   5029       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   5030       DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
   5031       __ bind(&done);
   5032     }
   5033   }
   5034   __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
   5035   DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
   5036 }
   5037 
   5038 
   5039 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   5040   LOperand* input = instr->value();
   5041   __ SmiTst(ToRegister(input), at);
   5042   DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
   5043 }
   5044 
   5045 
   5046 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   5047   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   5048     LOperand* input = instr->value();
   5049     __ SmiTst(ToRegister(input), at);
   5050     DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   5051   }
   5052 }
   5053 
   5054 
   5055 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   5056   Register input = ToRegister(instr->value());
   5057   Register scratch = scratch0();
   5058 
   5059   __ GetObjectType(input, scratch, scratch);
   5060 
   5061   if (instr->hydrogen()->is_interval_check()) {
   5062     InstanceType first;
   5063     InstanceType last;
   5064     instr->hydrogen()->GetCheckInterval(&first, &last);
   5065 
   5066     // If there is only one type in the interval check for equality.
   5067     if (first == last) {
   5068       DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
   5069     } else {
   5070       DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
   5071       // Omit check for the last type.
   5072       if (last != LAST_TYPE) {
   5073         DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
   5074       }
   5075     }
   5076   } else {
   5077     uint8_t mask;
   5078     uint8_t tag;
   5079     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   5080 
   5081     if (IsPowerOf2(mask)) {
   5082       ASSERT(tag == 0 || IsPowerOf2(tag));
   5083       __ And(at, scratch, mask);
   5084       DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
   5085           at, Operand(zero_reg));
   5086     } else {
   5087       __ And(scratch, scratch, Operand(mask));
   5088       DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
   5089     }
   5090   }
   5091 }
   5092 
   5093 
   5094 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   5095   Register reg = ToRegister(instr->value());
   5096   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   5097   AllowDeferredHandleDereference smi_check;
   5098   if (isolate()->heap()->InNewSpace(*object)) {
   5099     Register reg = ToRegister(instr->value());
   5100     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   5101     __ li(at, Operand(Handle<Object>(cell)));
   5102     __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
   5103     DeoptimizeIf(ne, instr->environment(), reg,
   5104                  Operand(at));
   5105   } else {
   5106     DeoptimizeIf(ne, instr->environment(), reg,
   5107                  Operand(object));
   5108   }
   5109 }
   5110 
   5111 
   5112 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   5113   {
   5114     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5115     __ push(object);
   5116     __ mov(cp, zero_reg);
   5117     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   5118     RecordSafepointWithRegisters(
   5119         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   5120     __ StoreToSafepointRegisterSlot(v0, scratch0());
   5121   }
   5122   __ SmiTst(scratch0(), at);
   5123   DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   5124 }
   5125 
   5126 
   5127 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   5128   class DeferredCheckMaps V8_FINAL : public LDeferredCode {
   5129    public:
   5130     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   5131         : LDeferredCode(codegen), instr_(instr), object_(object) {
   5132       SetExit(check_maps());
   5133     }
   5134     virtual void Generate() V8_OVERRIDE {
   5135       codegen()->DoDeferredInstanceMigration(instr_, object_);
   5136     }
   5137     Label* check_maps() { return &check_maps_; }
   5138     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5139    private:
   5140     LCheckMaps* instr_;
   5141     Label check_maps_;
   5142     Register object_;
   5143   };
   5144 
   5145   if (instr->hydrogen()->IsStabilityCheck()) {
   5146     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5147     for (int i = 0; i < maps->size(); ++i) {
   5148       AddStabilityDependency(maps->at(i).handle());
   5149     }
   5150     return;
   5151   }
   5152 
   5153   Register map_reg = scratch0();
   5154   LOperand* input = instr->value();
   5155   ASSERT(input->IsRegister());
   5156   Register reg = ToRegister(input);
   5157   __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
   5158 
   5159   DeferredCheckMaps* deferred = NULL;
   5160   if (instr->hydrogen()->HasMigrationTarget()) {
   5161     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   5162     __ bind(deferred->check_maps());
   5163   }
   5164 
   5165   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5166   Label success;
   5167   for (int i = 0; i < maps->size() - 1; i++) {
   5168     Handle<Map> map = maps->at(i).handle();
   5169     __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
   5170   }
   5171   Handle<Map> map = maps->at(maps->size() - 1).handle();
   5172   // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
   5173   if (instr->hydrogen()->HasMigrationTarget()) {
   5174     __ Branch(deferred->entry(), ne, map_reg, Operand(map));
   5175   } else {
   5176     DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
   5177   }
   5178 
   5179   __ bind(&success);
   5180 }
   5181 
   5182 
   5183 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5184   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
   5185   Register result_reg = ToRegister(instr->result());
   5186   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5187   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
   5188 }
   5189 
   5190 
   5191 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5192   Register unclamped_reg = ToRegister(instr->unclamped());
   5193   Register result_reg = ToRegister(instr->result());
   5194   __ ClampUint8(result_reg, unclamped_reg);
   5195 }
   5196 
   5197 
   5198 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5199   Register scratch = scratch0();
   5200   Register input_reg = ToRegister(instr->unclamped());
   5201   Register result_reg = ToRegister(instr->result());
   5202   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5203   Label is_smi, done, heap_number;
   5204 
   5205   // Both smi and heap number cases are handled.
   5206   __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
   5207 
   5208   // Check for heap number
   5209   __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5210   __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
   5211 
   5212   // Check for undefined. Undefined is converted to zero for clamping
   5213   // conversions.
   5214   DeoptimizeIf(ne, instr->environment(), input_reg,
   5215                Operand(factory()->undefined_value()));
   5216   __ mov(result_reg, zero_reg);
   5217   __ jmp(&done);
   5218 
   5219   // Heap number
   5220   __ bind(&heap_number);
   5221   __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
   5222                                              HeapNumber::kValueOffset));
   5223   __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
   5224   __ jmp(&done);
   5225 
   5226   __ bind(&is_smi);
   5227   __ ClampUint8(result_reg, scratch);
   5228 
   5229   __ bind(&done);
   5230 }
   5231 
   5232 
   5233 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
   5234   DoubleRegister value_reg = ToDoubleRegister(instr->value());
   5235   Register result_reg = ToRegister(instr->result());
   5236   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
   5237     __ FmoveHigh(result_reg, value_reg);
   5238   } else {
   5239     __ FmoveLow(result_reg, value_reg);
   5240   }
   5241 }
   5242 
   5243 
   5244 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
   5245   Register hi_reg = ToRegister(instr->hi());
   5246   Register lo_reg = ToRegister(instr->lo());
   5247   DoubleRegister result_reg = ToDoubleRegister(instr->result());
   5248   __ Move(result_reg, lo_reg, hi_reg);
   5249 }
   5250 
   5251 
   5252 void LCodeGen::DoAllocate(LAllocate* instr) {
   5253   class DeferredAllocate V8_FINAL : public LDeferredCode {
   5254    public:
   5255     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5256         : LDeferredCode(codegen), instr_(instr) { }
   5257     virtual void Generate() V8_OVERRIDE {
   5258       codegen()->DoDeferredAllocate(instr_);
   5259     }
   5260     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5261    private:
   5262     LAllocate* instr_;
   5263   };
   5264 
   5265   DeferredAllocate* deferred =
   5266       new(zone()) DeferredAllocate(this, instr);
   5267 
   5268   Register result = ToRegister(instr->result());
   5269   Register scratch = ToRegister(instr->temp1());
   5270   Register scratch2 = ToRegister(instr->temp2());
   5271 
   5272   // Allocate memory for the object.
   5273   AllocationFlags flags = TAG_OBJECT;
   5274   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5275     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5276   }
   5277   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5278     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5279     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5280     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   5281   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5282     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5283     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   5284   }
   5285   if (instr->size()->IsConstantOperand()) {
   5286     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5287     if (size <= Page::kMaxRegularHeapObjectSize) {
   5288       __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5289     } else {
   5290       __ jmp(deferred->entry());
   5291     }
   5292   } else {
   5293     Register size = ToRegister(instr->size());
   5294     __ Allocate(size,
   5295                 result,
   5296                 scratch,
   5297                 scratch2,
   5298                 deferred->entry(),
   5299                 flags);
   5300   }
   5301 
   5302   __ bind(deferred->exit());
   5303 
   5304   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5305     if (instr->size()->IsConstantOperand()) {
   5306       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5307       __ li(scratch, Operand(size));
   5308     } else {
   5309       scratch = ToRegister(instr->size());
   5310     }
   5311     __ Subu(scratch, scratch, Operand(kPointerSize));
   5312     __ Subu(result, result, Operand(kHeapObjectTag));
   5313     Label loop;
   5314     __ bind(&loop);
   5315     __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5316     __ Addu(at, result, Operand(scratch));
   5317     __ sw(scratch2, MemOperand(at));
   5318     __ Subu(scratch, scratch, Operand(kPointerSize));
   5319     __ Branch(&loop, ge, scratch, Operand(zero_reg));
   5320     __ Addu(result, result, Operand(kHeapObjectTag));
   5321   }
   5322 }
   5323 
   5324 
   5325 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5326   Register result = ToRegister(instr->result());
   5327 
   5328   // TODO(3095996): Get rid of this. For now, we need to make the
   5329   // result register contain a valid pointer because it is already
   5330   // contained in the register pointer map.
   5331   __ mov(result, zero_reg);
   5332 
   5333   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5334   if (instr->size()->IsRegister()) {
   5335     Register size = ToRegister(instr->size());
   5336     ASSERT(!size.is(result));
   5337     __ SmiTag(size);
   5338     __ push(size);
   5339   } else {
   5340     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5341     if (size >= 0 && size <= Smi::kMaxValue) {
   5342       __ Push(Smi::FromInt(size));
   5343     } else {
   5344       // We should never get here at runtime => abort
   5345       __ stop("invalid allocation size");
   5346       return;
   5347     }
   5348   }
   5349 
   5350   int flags = AllocateDoubleAlignFlag::encode(
   5351       instr->hydrogen()->MustAllocateDoubleAligned());
   5352   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5353     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5354     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5355     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   5356   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5357     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5358     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   5359   } else {
   5360     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5361   }
   5362   __ Push(Smi::FromInt(flags));
   5363 
   5364   CallRuntimeFromDeferred(
   5365       Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
   5366   __ StoreToSafepointRegisterSlot(v0, result);
   5367 }
   5368 
   5369 
   5370 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5371   ASSERT(ToRegister(instr->value()).is(a0));
   5372   ASSERT(ToRegister(instr->result()).is(v0));
   5373   __ push(a0);
   5374   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5375 }
   5376 
   5377 
   5378 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
   5379   ASSERT(ToRegister(instr->context()).is(cp));
   5380   Label materialized;
   5381   // Registers will be used as follows:
   5382   // t3 = literals array.
   5383   // a1 = regexp literal.
   5384   // a0 = regexp literal clone.
   5385   // a2 and t0-t2 are used as temporaries.
   5386   int literal_offset =
   5387       FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
   5388   __ li(t3, instr->hydrogen()->literals());
   5389   __ lw(a1, FieldMemOperand(t3, literal_offset));
   5390   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   5391   __ Branch(&materialized, ne, a1, Operand(at));
   5392 
   5393   // Create regexp literal using runtime function
   5394   // Result will be in v0.
   5395   __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
   5396   __ li(t1, Operand(instr->hydrogen()->pattern()));
   5397   __ li(t0, Operand(instr->hydrogen()->flags()));
   5398   __ Push(t3, t2, t1, t0);
   5399   CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
   5400   __ mov(a1, v0);
   5401 
   5402   __ bind(&materialized);
   5403   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
   5404   Label allocated, runtime_allocate;
   5405 
   5406   __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
   5407   __ jmp(&allocated);
   5408 
   5409   __ bind(&runtime_allocate);
   5410   __ li(a0, Operand(Smi::FromInt(size)));
   5411   __ Push(a1, a0);
   5412   CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
   5413   __ pop(a1);
   5414 
   5415   __ bind(&allocated);
   5416   // Copy the content into the newly allocated memory.
   5417   // (Unroll copy loop once for better throughput).
   5418   for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
   5419     __ lw(a3, FieldMemOperand(a1, i));
   5420     __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
   5421     __ sw(a3, FieldMemOperand(v0, i));
   5422     __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
   5423   }
   5424   if ((size % (2 * kPointerSize)) != 0) {
   5425     __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
   5426     __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
   5427   }
   5428 }
   5429 
   5430 
   5431 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
   5432   ASSERT(ToRegister(instr->context()).is(cp));
   5433   // Use the fast case closure allocation code that allocates in new
   5434   // space for nested functions that don't need literals cloning.
   5435   bool pretenure = instr->hydrogen()->pretenure();
   5436   if (!pretenure && instr->hydrogen()->has_no_literals()) {
   5437     FastNewClosureStub stub(isolate(),
   5438                             instr->hydrogen()->strict_mode(),
   5439                             instr->hydrogen()->is_generator());
   5440     __ li(a2, Operand(instr->hydrogen()->shared_info()));
   5441     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5442   } else {
   5443     __ li(a2, Operand(instr->hydrogen()->shared_info()));
   5444     __ li(a1, Operand(pretenure ? factory()->true_value()
   5445                                 : factory()->false_value()));
   5446     __ Push(cp, a2, a1);
   5447     CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
   5448   }
   5449 }
   5450 
   5451 
   5452 void LCodeGen::DoTypeof(LTypeof* instr) {
   5453   ASSERT(ToRegister(instr->result()).is(v0));
   5454   Register input = ToRegister(instr->value());
   5455   __ push(input);
   5456   CallRuntime(Runtime::kTypeof, 1, instr);
   5457 }
   5458 
   5459 
   5460 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5461   Register input = ToRegister(instr->value());
   5462 
   5463   Register cmp1 = no_reg;
   5464   Operand cmp2 = Operand(no_reg);
   5465 
   5466   Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
   5467                                                   instr->FalseLabel(chunk_),
   5468                                                   input,
   5469                                                   instr->type_literal(),
   5470                                                   &cmp1,
   5471                                                   &cmp2);
   5472 
   5473   ASSERT(cmp1.is_valid());
   5474   ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
   5475 
   5476   if (final_branch_condition != kNoCondition) {
   5477     EmitBranch(instr, final_branch_condition, cmp1, cmp2);
   5478   }
   5479 }
   5480 
   5481 
   5482 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   5483                                  Label* false_label,
   5484                                  Register input,
   5485                                  Handle<String> type_name,
   5486                                  Register* cmp1,
   5487                                  Operand* cmp2) {
   5488   // This function utilizes the delay slot heavily. This is used to load
   5489   // values that are always usable without depending on the type of the input
   5490   // register.
   5491   Condition final_branch_condition = kNoCondition;
   5492   Register scratch = scratch0();
   5493   Factory* factory = isolate()->factory();
   5494   if (String::Equals(type_name, factory->number_string())) {
   5495     __ JumpIfSmi(input, true_label);
   5496     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
   5497     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   5498     *cmp1 = input;
   5499     *cmp2 = Operand(at);
   5500     final_branch_condition = eq;
   5501 
   5502   } else if (String::Equals(type_name, factory->string_string())) {
   5503     __ JumpIfSmi(input, false_label);
   5504     __ GetObjectType(input, input, scratch);
   5505     __ Branch(USE_DELAY_SLOT, false_label,
   5506               ge, scratch, Operand(FIRST_NONSTRING_TYPE));
   5507     // input is an object so we can load the BitFieldOffset even if we take the
   5508     // other branch.
   5509     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
   5510     __ And(at, at, 1 << Map::kIsUndetectable);
   5511     *cmp1 = at;
   5512     *cmp2 = Operand(zero_reg);
   5513     final_branch_condition = eq;
   5514 
   5515   } else if (String::Equals(type_name, factory->symbol_string())) {
   5516     __ JumpIfSmi(input, false_label);
   5517     __ GetObjectType(input, input, scratch);
   5518     *cmp1 = scratch;
   5519     *cmp2 = Operand(SYMBOL_TYPE);
   5520     final_branch_condition = eq;
   5521 
   5522   } else if (String::Equals(type_name, factory->boolean_string())) {
   5523     __ LoadRoot(at, Heap::kTrueValueRootIndex);
   5524     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5525     __ LoadRoot(at, Heap::kFalseValueRootIndex);
   5526     *cmp1 = at;
   5527     *cmp2 = Operand(input);
   5528     final_branch_condition = eq;
   5529 
   5530   } else if (FLAG_harmony_typeof &&
   5531              String::Equals(type_name, factory->null_string())) {
   5532     __ LoadRoot(at, Heap::kNullValueRootIndex);
   5533     *cmp1 = at;
   5534     *cmp2 = Operand(input);
   5535     final_branch_condition = eq;
   5536 
   5537   } else if (String::Equals(type_name, factory->undefined_string())) {
   5538     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   5539     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5540     // The first instruction of JumpIfSmi is an And - it is safe in the delay
   5541     // slot.
   5542     __ JumpIfSmi(input, false_label);
   5543     // Check for undetectable objects => true.
   5544     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
   5545     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
   5546     __ And(at, at, 1 << Map::kIsUndetectable);
   5547     *cmp1 = at;
   5548     *cmp2 = Operand(zero_reg);
   5549     final_branch_condition = ne;
   5550 
   5551   } else if (String::Equals(type_name, factory->function_string())) {
   5552     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   5553     __ JumpIfSmi(input, false_label);
   5554     __ GetObjectType(input, scratch, input);
   5555     __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
   5556     *cmp1 = input;
   5557     *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
   5558     final_branch_condition = eq;
   5559 
   5560   } else if (String::Equals(type_name, factory->object_string())) {
   5561     __ JumpIfSmi(input, false_label);
   5562     if (!FLAG_harmony_typeof) {
   5563       __ LoadRoot(at, Heap::kNullValueRootIndex);
   5564       __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5565     }
   5566     Register map = input;
   5567     __ GetObjectType(input, map, scratch);
   5568     __ Branch(false_label,
   5569               lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   5570     __ Branch(USE_DELAY_SLOT, false_label,
   5571               gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   5572     // map is still valid, so the BitField can be loaded in delay slot.
   5573     // Check for undetectable objects => false.
   5574     __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
   5575     __ And(at, at, 1 << Map::kIsUndetectable);
   5576     *cmp1 = at;
   5577     *cmp2 = Operand(zero_reg);
   5578     final_branch_condition = eq;
   5579 
   5580   } else {
   5581     *cmp1 = at;
   5582     *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
   5583     __ Branch(false_label);
   5584   }
   5585 
   5586   return final_branch_condition;
   5587 }
   5588 
   5589 
   5590 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   5591   Register temp1 = ToRegister(instr->temp());
   5592 
   5593   EmitIsConstructCall(temp1, scratch0());
   5594 
   5595   EmitBranch(instr, eq, temp1,
   5596              Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
   5597 }
   5598 
   5599 
   5600 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
   5601   ASSERT(!temp1.is(temp2));
   5602   // Get the frame pointer for the calling frame.
   5603   __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   5604 
   5605   // Skip the arguments adaptor frame if it exists.
   5606   Label check_frame_marker;
   5607   __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
   5608   __ Branch(&check_frame_marker, ne, temp2,
   5609             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   5610   __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
   5611 
   5612   // Check the marker in the calling frame.
   5613   __ bind(&check_frame_marker);
   5614   __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
   5615 }
   5616 
   5617 
   5618 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5619   if (!info()->IsStub()) {
   5620     // Ensure that we have enough space after the previous lazy-bailout
   5621     // instruction for patching the code here.
   5622     int current_pc = masm()->pc_offset();
   5623     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5624       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5625       ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
   5626       while (padding_size > 0) {
   5627         __ nop();
   5628         padding_size -= Assembler::kInstrSize;
   5629       }
   5630     }
   5631   }
   5632   last_lazy_deopt_pc_ = masm()->pc_offset();
   5633 }
   5634 
   5635 
   5636 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5637   last_lazy_deopt_pc_ = masm()->pc_offset();
   5638   ASSERT(instr->HasEnvironment());
   5639   LEnvironment* env = instr->environment();
   5640   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5641   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5642 }
   5643 
   5644 
   5645 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5646   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5647   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5648   // needed return address), even though the implementation of LAZY and EAGER is
   5649   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5650   // the special case below.
   5651   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5652     type = Deoptimizer::LAZY;
   5653   }
   5654 
   5655   Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
   5656   DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
   5657 }
   5658 
   5659 
   5660 void LCodeGen::DoDummy(LDummy* instr) {
   5661   // Nothing to see here, move on!
   5662 }
   5663 
   5664 
   5665 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5666   // Nothing to see here, move on!
   5667 }
   5668 
   5669 
   5670 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5671   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5672   LoadContextFromDeferred(instr->context());
   5673   __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
   5674   RecordSafepointWithLazyDeopt(
   5675       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5676   ASSERT(instr->HasEnvironment());
   5677   LEnvironment* env = instr->environment();
   5678   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5679 }
   5680 
   5681 
   5682 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5683   class DeferredStackCheck V8_FINAL : public LDeferredCode {
   5684    public:
   5685     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5686         : LDeferredCode(codegen), instr_(instr) { }
   5687     virtual void Generate() V8_OVERRIDE {
   5688       codegen()->DoDeferredStackCheck(instr_);
   5689     }
   5690     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5691    private:
   5692     LStackCheck* instr_;
   5693   };
   5694 
   5695   ASSERT(instr->HasEnvironment());
   5696   LEnvironment* env = instr->environment();
   5697   // There is no LLazyBailout instruction for stack-checks. We have to
   5698   // prepare for lazy deoptimization explicitly here.
   5699   if (instr->hydrogen()->is_function_entry()) {
   5700     // Perform stack overflow check.
   5701     Label done;
   5702     __ LoadRoot(at, Heap::kStackLimitRootIndex);
   5703     __ Branch(&done, hs, sp, Operand(at));
   5704     ASSERT(instr->context()->IsRegister());
   5705     ASSERT(ToRegister(instr->context()).is(cp));
   5706     CallCode(isolate()->builtins()->StackCheck(),
   5707              RelocInfo::CODE_TARGET,
   5708              instr);
   5709     __ bind(&done);
   5710   } else {
   5711     ASSERT(instr->hydrogen()->is_backwards_branch());
   5712     // Perform stack overflow check if this goto needs it before jumping.
   5713     DeferredStackCheck* deferred_stack_check =
   5714         new(zone()) DeferredStackCheck(this, instr);
   5715     __ LoadRoot(at, Heap::kStackLimitRootIndex);
   5716     __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
   5717     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5718     __ bind(instr->done_label());
   5719     deferred_stack_check->SetExit(instr->done_label());
   5720     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5721     // Don't record a deoptimization index for the safepoint here.
   5722     // This will be done explicitly when emitting call and the safepoint in
   5723     // the deferred code.
   5724   }
   5725 }
   5726 
   5727 
   5728 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5729   // This is a pseudo-instruction that ensures that the environment here is
   5730   // properly registered for deoptimization and records the assembler's PC
   5731   // offset.
   5732   LEnvironment* environment = instr->environment();
   5733 
   5734   // If the environment were already registered, we would have no way of
   5735   // backpatching it with the spill slot operands.
   5736   ASSERT(!environment->HasBeenRegistered());
   5737   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5738 
   5739   GenerateOsrPrologue();
   5740 }
   5741 
   5742 
   5743 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5744   Register result = ToRegister(instr->result());
   5745   Register object = ToRegister(instr->object());
   5746   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   5747   DeoptimizeIf(eq, instr->environment(), object, Operand(at));
   5748 
   5749   Register null_value = t1;
   5750   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   5751   DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
   5752 
   5753   __ And(at, object, kSmiTagMask);
   5754   DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   5755 
   5756   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   5757   __ GetObjectType(object, a1, a1);
   5758   DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
   5759 
   5760   Label use_cache, call_runtime;
   5761   ASSERT(object.is(a0));
   5762   __ CheckEnumCache(null_value, &call_runtime);
   5763 
   5764   __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
   5765   __ Branch(&use_cache);
   5766 
   5767   // Get the set of properties to enumerate.
   5768   __ bind(&call_runtime);
   5769   __ push(object);
   5770   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
   5771 
   5772   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   5773   ASSERT(result.is(v0));
   5774   __ LoadRoot(at, Heap::kMetaMapRootIndex);
   5775   DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
   5776   __ bind(&use_cache);
   5777 }
   5778 
   5779 
   5780 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5781   Register map = ToRegister(instr->map());
   5782   Register result = ToRegister(instr->result());
   5783   Label load_cache, done;
   5784   __ EnumLength(result, map);
   5785   __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
   5786   __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
   5787   __ jmp(&done);
   5788 
   5789   __ bind(&load_cache);
   5790   __ LoadInstanceDescriptors(map, result);
   5791   __ lw(result,
   5792         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5793   __ lw(result,
   5794         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5795   DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
   5796 
   5797   __ bind(&done);
   5798 }
   5799 
   5800 
   5801 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5802   Register object = ToRegister(instr->value());
   5803   Register map = ToRegister(instr->map());
   5804   __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5805   DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
   5806 }
   5807 
   5808 
   5809 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5810                                            Register result,
   5811                                            Register object,
   5812                                            Register index) {
   5813   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5814   __ Push(object, index);
   5815   __ mov(cp, zero_reg);
   5816   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5817   RecordSafepointWithRegisters(
   5818      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5819   __ StoreToSafepointRegisterSlot(v0, result);
   5820 }
   5821 
   5822 
   5823 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5824   class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
   5825    public:
   5826     DeferredLoadMutableDouble(LCodeGen* codegen,
   5827                               LLoadFieldByIndex* instr,
   5828                               Register result,
   5829                               Register object,
   5830                               Register index)
   5831         : LDeferredCode(codegen),
   5832           instr_(instr),
   5833           result_(result),
   5834           object_(object),
   5835           index_(index) {
   5836     }
   5837     virtual void Generate() V8_OVERRIDE {
   5838       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5839     }
   5840     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5841    private:
   5842     LLoadFieldByIndex* instr_;
   5843     Register result_;
   5844     Register object_;
   5845     Register index_;
   5846   };
   5847 
   5848   Register object = ToRegister(instr->object());
   5849   Register index = ToRegister(instr->index());
   5850   Register result = ToRegister(instr->result());
   5851   Register scratch = scratch0();
   5852 
   5853   DeferredLoadMutableDouble* deferred;
   5854   deferred = new(zone()) DeferredLoadMutableDouble(
   5855       this, instr, result, object, index);
   5856 
   5857   Label out_of_object, done;
   5858 
   5859   __ And(scratch, index, Operand(Smi::FromInt(1)));
   5860   __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
   5861   __ sra(index, index, 1);
   5862 
   5863   __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
   5864   __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
   5865 
   5866   STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
   5867   __ Addu(scratch, object, scratch);
   5868   __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5869 
   5870   __ Branch(&done);
   5871 
   5872   __ bind(&out_of_object);
   5873   __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5874   // Index is equal to negated out of object property index plus 1.
   5875   __ Subu(scratch, result, scratch);
   5876   __ lw(result, FieldMemOperand(scratch,
   5877                                 FixedArray::kHeaderSize - kPointerSize));
   5878   __ bind(deferred->exit());
   5879   __ bind(&done);
   5880 }
   5881 
   5882 
   5883 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
   5884   Register context = ToRegister(instr->context());
   5885   __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
   5886 }
   5887 
   5888 
   5889 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
   5890   Handle<ScopeInfo> scope_info = instr->scope_info();
   5891   __ li(at, scope_info);
   5892   __ Push(at, ToRegister(instr->function()));
   5893   CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
   5894   RecordSafepoint(Safepoint::kNoLazyDeopt);
   5895 }
   5896 
   5897 
   5898 #undef __
   5899 
   5900 } }  // namespace v8::internal
   5901