Home | History | Annotate | Download | only in mips
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "mips/lithium-codegen-mips.h"
     31 #include "mips/lithium-gap-resolver-mips.h"
     32 #include "code-stubs.h"
     33 #include "stub-cache.h"
     34 #include "hydrogen-osr.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 
     40 class SafepointGenerator V8_FINAL  : public CallWrapper {
     41  public:
     42   SafepointGenerator(LCodeGen* codegen,
     43                      LPointerMap* pointers,
     44                      Safepoint::DeoptMode mode)
     45       : codegen_(codegen),
     46         pointers_(pointers),
     47         deopt_mode_(mode) { }
     48   virtual ~SafepointGenerator() {}
     49 
     50   virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
     51 
     52   virtual void AfterCall() const V8_OVERRIDE {
     53     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     54   }
     55 
     56  private:
     57   LCodeGen* codegen_;
     58   LPointerMap* pointers_;
     59   Safepoint::DeoptMode deopt_mode_;
     60 };
     61 
     62 
     63 #define __ masm()->
     64 
     65 bool LCodeGen::GenerateCode() {
     66   LPhase phase("Z_Code generation", chunk());
     67   ASSERT(is_unused());
     68   status_ = GENERATING;
     69 
     70   // Open a frame scope to indicate that there is a frame on the stack.  The
     71   // NONE indicates that the scope shouldn't actually generate code to set up
     72   // the frame (that is done in GeneratePrologue).
     73   FrameScope frame_scope(masm_, StackFrame::NONE);
     74 
     75   return GeneratePrologue() &&
     76       GenerateBody() &&
     77       GenerateDeferredCode() &&
     78       GenerateDeoptJumpTable() &&
     79       GenerateSafepointTable();
     80 }
     81 
     82 
     83 void LCodeGen::FinishCode(Handle<Code> code) {
     84   ASSERT(is_done());
     85   code->set_stack_slots(GetStackSlotCount());
     86   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     87   RegisterDependentCodeForEmbeddedMaps(code);
     88   PopulateDeoptimizationData(code);
     89   info()->CommitDependencies(code);
     90 }
     91 
     92 
     93 void LChunkBuilder::Abort(BailoutReason reason) {
     94   info()->set_bailout_reason(reason);
     95   status_ = ABORTED;
     96 }
     97 
     98 
     99 void LCodeGen::SaveCallerDoubles() {
    100   ASSERT(info()->saves_caller_doubles());
    101   ASSERT(NeedsEagerFrame());
    102   Comment(";;; Save clobbered callee double registers");
    103   int count = 0;
    104   BitVector* doubles = chunk()->allocated_double_registers();
    105   BitVector::Iterator save_iterator(doubles);
    106   while (!save_iterator.Done()) {
    107     __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
    108             MemOperand(sp, count * kDoubleSize));
    109     save_iterator.Advance();
    110     count++;
    111   }
    112 }
    113 
    114 
    115 void LCodeGen::RestoreCallerDoubles() {
    116   ASSERT(info()->saves_caller_doubles());
    117   ASSERT(NeedsEagerFrame());
    118   Comment(";;; Restore clobbered callee double registers");
    119   BitVector* doubles = chunk()->allocated_double_registers();
    120   BitVector::Iterator save_iterator(doubles);
    121   int count = 0;
    122   while (!save_iterator.Done()) {
    123     __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
    124             MemOperand(sp, count * kDoubleSize));
    125     save_iterator.Advance();
    126     count++;
    127   }
    128 }
    129 
    130 
    131 bool LCodeGen::GeneratePrologue() {
    132   ASSERT(is_generating());
    133 
    134   if (info()->IsOptimizing()) {
    135     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    136 
    137 #ifdef DEBUG
    138     if (strlen(FLAG_stop_at) > 0 &&
    139         info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    140       __ stop("stop_at");
    141     }
    142 #endif
    143 
    144     // a1: Callee's JS function.
    145     // cp: Callee's context.
    146     // fp: Caller's frame pointer.
    147     // lr: Caller's pc.
    148 
    149     // Strict mode functions and builtins need to replace the receiver
    150     // with undefined when called as functions (without an explicit
    151     // receiver object). r5 is zero for method calls and non-zero for
    152     // function calls.
    153     if (!info_->is_classic_mode() || info_->is_native()) {
    154       Label ok;
    155       __ Branch(&ok, eq, t1, Operand(zero_reg));
    156 
    157       int receiver_offset = scope()->num_parameters() * kPointerSize;
    158       __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
    159       __ sw(a2, MemOperand(sp, receiver_offset));
    160       __ bind(&ok);
    161     }
    162   }
    163 
    164   info()->set_prologue_offset(masm_->pc_offset());
    165   if (NeedsEagerFrame()) {
    166     __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
    167     frame_is_built_ = true;
    168     info_->AddNoFrameRange(0, masm_->pc_offset());
    169   }
    170 
    171   // Reserve space for the stack slots needed by the code.
    172   int slots = GetStackSlotCount();
    173   if (slots > 0) {
    174     if (FLAG_debug_code) {
    175       __ Subu(sp,  sp, Operand(slots * kPointerSize));
    176       __ push(a0);
    177       __ push(a1);
    178       __ Addu(a0, sp, Operand(slots *  kPointerSize));
    179       __ li(a1, Operand(kSlotsZapValue));
    180       Label loop;
    181       __ bind(&loop);
    182       __ Subu(a0, a0, Operand(kPointerSize));
    183       __ sw(a1, MemOperand(a0, 2 * kPointerSize));
    184       __ Branch(&loop, ne, a0, Operand(sp));
    185       __ pop(a1);
    186       __ pop(a0);
    187     } else {
    188       __ Subu(sp, sp, Operand(slots * kPointerSize));
    189     }
    190   }
    191 
    192   if (info()->saves_caller_doubles()) {
    193     SaveCallerDoubles();
    194   }
    195 
    196   // Possibly allocate a local context.
    197   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    198   if (heap_slots > 0) {
    199     Comment(";;; Allocate local context");
    200     // Argument to NewContext is the function, which is in a1.
    201     __ push(a1);
    202     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
    203       FastNewContextStub stub(heap_slots);
    204       __ CallStub(&stub);
    205     } else {
    206       __ CallRuntime(Runtime::kNewFunctionContext, 1);
    207     }
    208     RecordSafepoint(Safepoint::kNoLazyDeopt);
    209     // Context is returned in both v0 and cp.  It replaces the context
    210     // passed to us.  It's saved in the stack and kept live in cp.
    211     __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
    212     // Copy any necessary parameters into the context.
    213     int num_parameters = scope()->num_parameters();
    214     for (int i = 0; i < num_parameters; i++) {
    215       Variable* var = scope()->parameter(i);
    216       if (var->IsContextSlot()) {
    217         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    218             (num_parameters - 1 - i) * kPointerSize;
    219         // Load parameter from stack.
    220         __ lw(a0, MemOperand(fp, parameter_offset));
    221         // Store it in the context.
    222         MemOperand target = ContextOperand(cp, var->index());
    223         __ sw(a0, target);
    224         // Update the write barrier. This clobbers a3 and a0.
    225         __ RecordWriteContextSlot(
    226             cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
    227       }
    228     }
    229     Comment(";;; End allocate local context");
    230   }
    231 
    232   // Trace the call.
    233   if (FLAG_trace && info()->IsOptimizing()) {
    234     // We have not executed any compiled code yet, so cp still holds the
    235     // incoming context.
    236     __ CallRuntime(Runtime::kTraceEnter, 0);
    237   }
    238   return !is_aborted();
    239 }
    240 
    241 
    242 void LCodeGen::GenerateOsrPrologue() {
    243   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    244   // are none, at the OSR entrypoint instruction.
    245   if (osr_pc_offset_ >= 0) return;
    246 
    247   osr_pc_offset_ = masm()->pc_offset();
    248 
    249   // Adjust the frame size, subsuming the unoptimized frame into the
    250   // optimized frame.
    251   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    252   ASSERT(slots >= 0);
    253   __ Subu(sp, sp, Operand(slots * kPointerSize));
    254 }
    255 
    256 
    257 bool LCodeGen::GenerateDeferredCode() {
    258   ASSERT(is_generating());
    259   if (deferred_.length() > 0) {
    260     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    261       LDeferredCode* code = deferred_[i];
    262 
    263       HValue* value =
    264           instructions_->at(code->instruction_index())->hydrogen_value();
    265       RecordAndWritePosition(value->position());
    266 
    267       Comment(";;; <@%d,#%d> "
    268               "-------------------- Deferred %s --------------------",
    269               code->instruction_index(),
    270               code->instr()->hydrogen_value()->id(),
    271               code->instr()->Mnemonic());
    272       __ bind(code->entry());
    273       if (NeedsDeferredFrame()) {
    274         Comment(";;; Build frame");
    275         ASSERT(!frame_is_built_);
    276         ASSERT(info()->IsStub());
    277         frame_is_built_ = true;
    278         __ MultiPush(cp.bit() | fp.bit() | ra.bit());
    279         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
    280         __ push(scratch0());
    281         __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    282         Comment(";;; Deferred code");
    283       }
    284       code->Generate();
    285       if (NeedsDeferredFrame()) {
    286         Comment(";;; Destroy frame");
    287         ASSERT(frame_is_built_);
    288         __ pop(at);
    289         __ MultiPop(cp.bit() | fp.bit() | ra.bit());
    290         frame_is_built_ = false;
    291       }
    292       __ jmp(code->exit());
    293     }
    294   }
    295   // Deferred code is the last part of the instruction sequence. Mark
    296   // the generated code as done unless we bailed out.
    297   if (!is_aborted()) status_ = DONE;
    298   return !is_aborted();
    299 }
    300 
    301 
    302 bool LCodeGen::GenerateDeoptJumpTable() {
    303   if (deopt_jump_table_.length() > 0) {
    304     Comment(";;; -------------------- Jump table --------------------");
    305   }
    306   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
    307   Label table_start;
    308   __ bind(&table_start);
    309   Label needs_frame;
    310   for (int i = 0; i < deopt_jump_table_.length(); i++) {
    311     __ bind(&deopt_jump_table_[i].label);
    312     Address entry = deopt_jump_table_[i].address;
    313     Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
    314     int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
    315     if (id == Deoptimizer::kNotDeoptimizationEntry) {
    316       Comment(";;; jump table entry %d.", i);
    317     } else {
    318       Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
    319     }
    320     __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
    321     if (deopt_jump_table_[i].needs_frame) {
    322       ASSERT(!info()->saves_caller_doubles());
    323       if (needs_frame.is_bound()) {
    324         __ Branch(&needs_frame);
    325       } else {
    326         __ bind(&needs_frame);
    327         __ MultiPush(cp.bit() | fp.bit() | ra.bit());
    328         // This variant of deopt can only be used with stubs. Since we don't
    329         // have a function pointer to install in the stack frame that we're
    330         // building, install a special marker there instead.
    331         ASSERT(info()->IsStub());
    332         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
    333         __ push(scratch0());
    334         __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    335         __ Call(t9);
    336       }
    337     } else {
    338       if (info()->saves_caller_doubles()) {
    339         ASSERT(info()->IsStub());
    340         RestoreCallerDoubles();
    341       }
    342       __ Call(t9);
    343     }
    344   }
    345   __ RecordComment("]");
    346 
    347   // The deoptimization jump table is the last part of the instruction
    348   // sequence. Mark the generated code as done unless we bailed out.
    349   if (!is_aborted()) status_ = DONE;
    350   return !is_aborted();
    351 }
    352 
    353 
    354 bool LCodeGen::GenerateSafepointTable() {
    355   ASSERT(is_done());
    356   safepoints_.Emit(masm(), GetStackSlotCount());
    357   return !is_aborted();
    358 }
    359 
    360 
    361 Register LCodeGen::ToRegister(int index) const {
    362   return Register::FromAllocationIndex(index);
    363 }
    364 
    365 
    366 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
    367   return DoubleRegister::FromAllocationIndex(index);
    368 }
    369 
    370 
    371 Register LCodeGen::ToRegister(LOperand* op) const {
    372   ASSERT(op->IsRegister());
    373   return ToRegister(op->index());
    374 }
    375 
    376 
    377 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    378   if (op->IsRegister()) {
    379     return ToRegister(op->index());
    380   } else if (op->IsConstantOperand()) {
    381     LConstantOperand* const_op = LConstantOperand::cast(op);
    382     HConstant* constant = chunk_->LookupConstant(const_op);
    383     Handle<Object> literal = constant->handle(isolate());
    384     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    385     if (r.IsInteger32()) {
    386       ASSERT(literal->IsNumber());
    387       __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
    388     } else if (r.IsSmi()) {
    389       ASSERT(constant->HasSmiValue());
    390       __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
    391     } else if (r.IsDouble()) {
    392       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    393     } else {
    394       ASSERT(r.IsSmiOrTagged());
    395       __ li(scratch, literal);
    396     }
    397     return scratch;
    398   } else if (op->IsStackSlot() || op->IsArgument()) {
    399     __ lw(scratch, ToMemOperand(op));
    400     return scratch;
    401   }
    402   UNREACHABLE();
    403   return scratch;
    404 }
    405 
    406 
    407 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    408   ASSERT(op->IsDoubleRegister());
    409   return ToDoubleRegister(op->index());
    410 }
    411 
    412 
    413 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
    414                                                 FloatRegister flt_scratch,
    415                                                 DoubleRegister dbl_scratch) {
    416   if (op->IsDoubleRegister()) {
    417     return ToDoubleRegister(op->index());
    418   } else if (op->IsConstantOperand()) {
    419     LConstantOperand* const_op = LConstantOperand::cast(op);
    420     HConstant* constant = chunk_->LookupConstant(const_op);
    421     Handle<Object> literal = constant->handle(isolate());
    422     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    423     if (r.IsInteger32()) {
    424       ASSERT(literal->IsNumber());
    425       __ li(at, Operand(static_cast<int32_t>(literal->Number())));
    426       __ mtc1(at, flt_scratch);
    427       __ cvt_d_w(dbl_scratch, flt_scratch);
    428       return dbl_scratch;
    429     } else if (r.IsDouble()) {
    430       Abort(kUnsupportedDoubleImmediate);
    431     } else if (r.IsTagged()) {
    432       Abort(kUnsupportedTaggedImmediate);
    433     }
    434   } else if (op->IsStackSlot() || op->IsArgument()) {
    435     MemOperand mem_op = ToMemOperand(op);
    436     __ ldc1(dbl_scratch, mem_op);
    437     return dbl_scratch;
    438   }
    439   UNREACHABLE();
    440   return dbl_scratch;
    441 }
    442 
    443 
    444 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    445   HConstant* constant = chunk_->LookupConstant(op);
    446   ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    447   return constant->handle(isolate());
    448 }
    449 
    450 
    451 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    452   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    453 }
    454 
    455 
    456 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    457   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    458 }
    459 
    460 
    461 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    462   return ToRepresentation(op, Representation::Integer32());
    463 }
    464 
    465 
    466 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
    467                                    const Representation& r) const {
    468   HConstant* constant = chunk_->LookupConstant(op);
    469   int32_t value = constant->Integer32Value();
    470   if (r.IsInteger32()) return value;
    471   ASSERT(r.IsSmiOrTagged());
    472   return reinterpret_cast<int32_t>(Smi::FromInt(value));
    473 }
    474 
    475 
    476 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    477   HConstant* constant = chunk_->LookupConstant(op);
    478   return Smi::FromInt(constant->Integer32Value());
    479 }
    480 
    481 
    482 double LCodeGen::ToDouble(LConstantOperand* op) const {
    483   HConstant* constant = chunk_->LookupConstant(op);
    484   ASSERT(constant->HasDoubleValue());
    485   return constant->DoubleValue();
    486 }
    487 
    488 
    489 Operand LCodeGen::ToOperand(LOperand* op) {
    490   if (op->IsConstantOperand()) {
    491     LConstantOperand* const_op = LConstantOperand::cast(op);
    492     HConstant* constant = chunk()->LookupConstant(const_op);
    493     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    494     if (r.IsSmi()) {
    495       ASSERT(constant->HasSmiValue());
    496       return Operand(Smi::FromInt(constant->Integer32Value()));
    497     } else if (r.IsInteger32()) {
    498       ASSERT(constant->HasInteger32Value());
    499       return Operand(constant->Integer32Value());
    500     } else if (r.IsDouble()) {
    501       Abort(kToOperandUnsupportedDoubleImmediate);
    502     }
    503     ASSERT(r.IsTagged());
    504     return Operand(constant->handle(isolate()));
    505   } else if (op->IsRegister()) {
    506     return Operand(ToRegister(op));
    507   } else if (op->IsDoubleRegister()) {
    508     Abort(kToOperandIsDoubleRegisterUnimplemented);
    509     return Operand(0);
    510   }
    511   // Stack slots not implemented, use ToMemOperand instead.
    512   UNREACHABLE();
    513   return Operand(0);
    514 }
    515 
    516 
    517 static int ArgumentsOffsetWithoutFrame(int index) {
    518   ASSERT(index < 0);
    519   return -(index + 1) * kPointerSize;
    520 }
    521 
    522 
    523 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    524   ASSERT(!op->IsRegister());
    525   ASSERT(!op->IsDoubleRegister());
    526   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
    527   if (NeedsEagerFrame()) {
    528     return MemOperand(fp, StackSlotOffset(op->index()));
    529   } else {
    530     // Retrieve parameter without eager stack-frame relative to the
    531     // stack-pointer.
    532     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    533   }
    534 }
    535 
    536 
    537 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    538   ASSERT(op->IsDoubleStackSlot());
    539   if (NeedsEagerFrame()) {
    540     return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
    541   } else {
    542     // Retrieve parameter without eager stack-frame relative to the
    543     // stack-pointer.
    544     return MemOperand(
    545         sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    546   }
    547 }
    548 
    549 
    550 void LCodeGen::WriteTranslation(LEnvironment* environment,
    551                                 Translation* translation) {
    552   if (environment == NULL) return;
    553 
    554   // The translation includes one command per value in the environment.
    555   int translation_size = environment->translation_size();
    556   // The output frame height does not include the parameters.
    557   int height = translation_size - environment->parameter_count();
    558 
    559   WriteTranslation(environment->outer(), translation);
    560   bool has_closure_id = !info()->closure().is_null() &&
    561       !info()->closure().is_identical_to(environment->closure());
    562   int closure_id = has_closure_id
    563       ? DefineDeoptimizationLiteral(environment->closure())
    564       : Translation::kSelfLiteralId;
    565 
    566   switch (environment->frame_type()) {
    567     case JS_FUNCTION:
    568       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
    569       break;
    570     case JS_CONSTRUCT:
    571       translation->BeginConstructStubFrame(closure_id, translation_size);
    572       break;
    573     case JS_GETTER:
    574       ASSERT(translation_size == 1);
    575       ASSERT(height == 0);
    576       translation->BeginGetterStubFrame(closure_id);
    577       break;
    578     case JS_SETTER:
    579       ASSERT(translation_size == 2);
    580       ASSERT(height == 0);
    581       translation->BeginSetterStubFrame(closure_id);
    582       break;
    583     case STUB:
    584       translation->BeginCompiledStubFrame();
    585       break;
    586     case ARGUMENTS_ADAPTOR:
    587       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
    588       break;
    589   }
    590 
    591   int object_index = 0;
    592   int dematerialized_index = 0;
    593   for (int i = 0; i < translation_size; ++i) {
    594     LOperand* value = environment->values()->at(i);
    595     AddToTranslation(environment,
    596                      translation,
    597                      value,
    598                      environment->HasTaggedValueAt(i),
    599                      environment->HasUint32ValueAt(i),
    600                      &object_index,
    601                      &dematerialized_index);
    602   }
    603 }
    604 
    605 
    606 void LCodeGen::AddToTranslation(LEnvironment* environment,
    607                                 Translation* translation,
    608                                 LOperand* op,
    609                                 bool is_tagged,
    610                                 bool is_uint32,
    611                                 int* object_index_pointer,
    612                                 int* dematerialized_index_pointer) {
    613   if (op == LEnvironment::materialization_marker()) {
    614     int object_index = (*object_index_pointer)++;
    615     if (environment->ObjectIsDuplicateAt(object_index)) {
    616       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    617       translation->DuplicateObject(dupe_of);
    618       return;
    619     }
    620     int object_length = environment->ObjectLengthAt(object_index);
    621     if (environment->ObjectIsArgumentsAt(object_index)) {
    622       translation->BeginArgumentsObject(object_length);
    623     } else {
    624       translation->BeginCapturedObject(object_length);
    625     }
    626     int dematerialized_index = *dematerialized_index_pointer;
    627     int env_offset = environment->translation_size() + dematerialized_index;
    628     *dematerialized_index_pointer += object_length;
    629     for (int i = 0; i < object_length; ++i) {
    630       LOperand* value = environment->values()->at(env_offset + i);
    631       AddToTranslation(environment,
    632                        translation,
    633                        value,
    634                        environment->HasTaggedValueAt(env_offset + i),
    635                        environment->HasUint32ValueAt(env_offset + i),
    636                        object_index_pointer,
    637                        dematerialized_index_pointer);
    638     }
    639     return;
    640   }
    641 
    642   if (op->IsStackSlot()) {
    643     if (is_tagged) {
    644       translation->StoreStackSlot(op->index());
    645     } else if (is_uint32) {
    646       translation->StoreUint32StackSlot(op->index());
    647     } else {
    648       translation->StoreInt32StackSlot(op->index());
    649     }
    650   } else if (op->IsDoubleStackSlot()) {
    651     translation->StoreDoubleStackSlot(op->index());
    652   } else if (op->IsArgument()) {
    653     ASSERT(is_tagged);
    654     int src_index = GetStackSlotCount() + op->index();
    655     translation->StoreStackSlot(src_index);
    656   } else if (op->IsRegister()) {
    657     Register reg = ToRegister(op);
    658     if (is_tagged) {
    659       translation->StoreRegister(reg);
    660     } else if (is_uint32) {
    661       translation->StoreUint32Register(reg);
    662     } else {
    663       translation->StoreInt32Register(reg);
    664     }
    665   } else if (op->IsDoubleRegister()) {
    666     DoubleRegister reg = ToDoubleRegister(op);
    667     translation->StoreDoubleRegister(reg);
    668   } else if (op->IsConstantOperand()) {
    669     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    670     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    671     translation->StoreLiteral(src_index);
    672   } else {
    673     UNREACHABLE();
    674   }
    675 }
    676 
    677 
    678 void LCodeGen::CallCode(Handle<Code> code,
    679                         RelocInfo::Mode mode,
    680                         LInstruction* instr) {
    681   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    682 }
    683 
    684 
    685 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    686                                RelocInfo::Mode mode,
    687                                LInstruction* instr,
    688                                SafepointMode safepoint_mode) {
    689   EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    690   ASSERT(instr != NULL);
    691   __ Call(code, mode);
    692   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    693 }
    694 
    695 
    696 void LCodeGen::CallRuntime(const Runtime::Function* function,
    697                            int num_arguments,
    698                            LInstruction* instr,
    699                            SaveFPRegsMode save_doubles) {
    700   ASSERT(instr != NULL);
    701 
    702   __ CallRuntime(function, num_arguments, save_doubles);
    703 
    704   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    705 }
    706 
    707 
    708 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    709   if (context->IsRegister()) {
    710     __ Move(cp, ToRegister(context));
    711   } else if (context->IsStackSlot()) {
    712     __ lw(cp, ToMemOperand(context));
    713   } else if (context->IsConstantOperand()) {
    714     HConstant* constant =
    715         chunk_->LookupConstant(LConstantOperand::cast(context));
    716     __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
    717   } else {
    718     UNREACHABLE();
    719   }
    720 }
    721 
    722 
    723 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    724                                        int argc,
    725                                        LInstruction* instr,
    726                                        LOperand* context) {
    727   LoadContextFromDeferred(context);
    728   __ CallRuntimeSaveDoubles(id);
    729   RecordSafepointWithRegisters(
    730       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    731 }
    732 
    733 
    734 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    735                                                     Safepoint::DeoptMode mode) {
    736   if (!environment->HasBeenRegistered()) {
    737     // Physical stack frame layout:
    738     // -x ............. -4  0 ..................................... y
    739     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    740 
    741     // Layout of the environment:
    742     // 0 ..................................................... size-1
    743     // [parameters] [locals] [expression stack including arguments]
    744 
    745     // Layout of the translation:
    746     // 0 ........................................................ size - 1 + 4
    747     // [expression stack including arguments] [locals] [4 words] [parameters]
    748     // |>------------  translation_size ------------<|
    749 
    750     int frame_count = 0;
    751     int jsframe_count = 0;
    752     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    753       ++frame_count;
    754       if (e->frame_type() == JS_FUNCTION) {
    755         ++jsframe_count;
    756       }
    757     }
    758     Translation translation(&translations_, frame_count, jsframe_count, zone());
    759     WriteTranslation(environment, &translation);
    760     int deoptimization_index = deoptimizations_.length();
    761     int pc_offset = masm()->pc_offset();
    762     environment->Register(deoptimization_index,
    763                           translation.index(),
    764                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    765     deoptimizations_.Add(environment, zone());
    766   }
    767 }
    768 
    769 
    770 void LCodeGen::DeoptimizeIf(Condition condition,
    771                             LEnvironment* environment,
    772                             Deoptimizer::BailoutType bailout_type,
    773                             Register src1,
    774                             const Operand& src2) {
    775   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    776   ASSERT(environment->HasBeenRegistered());
    777   int id = environment->deoptimization_index();
    778   ASSERT(info()->IsOptimizing() || info()->IsStub());
    779   Address entry =
    780       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    781   if (entry == NULL) {
    782     Abort(kBailoutWasNotPrepared);
    783     return;
    784   }
    785 
    786   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    787     Register scratch = scratch0();
    788     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    789     Label no_deopt;
    790     __ Push(a1, scratch);
    791     __ li(scratch, Operand(count));
    792     __ lw(a1, MemOperand(scratch));
    793     __ Subu(a1, a1, Operand(1));
    794     __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
    795     __ li(a1, Operand(FLAG_deopt_every_n_times));
    796     __ sw(a1, MemOperand(scratch));
    797     __ Pop(a1, scratch);
    798 
    799     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    800     __ bind(&no_deopt);
    801     __ sw(a1, MemOperand(scratch));
    802     __ Pop(a1, scratch);
    803   }
    804 
    805   if (info()->ShouldTrapOnDeopt()) {
    806     Label skip;
    807     if (condition != al) {
    808       __ Branch(&skip, NegateCondition(condition), src1, src2);
    809     }
    810     __ stop("trap_on_deopt");
    811     __ bind(&skip);
    812   }
    813 
    814   ASSERT(info()->IsStub() || frame_is_built_);
    815   // Go through jump table if we need to handle condition, build frame, or
    816   // restore caller doubles.
    817   if (condition == al && frame_is_built_ &&
    818       !info()->saves_caller_doubles()) {
    819     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
    820   } else {
    821     // We often have several deopts to the same entry, reuse the last
    822     // jump entry if this is the case.
    823     if (deopt_jump_table_.is_empty() ||
    824         (deopt_jump_table_.last().address != entry) ||
    825         (deopt_jump_table_.last().bailout_type != bailout_type) ||
    826         (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
    827       Deoptimizer::JumpTableEntry table_entry(entry,
    828                                               bailout_type,
    829                                               !frame_is_built_);
    830       deopt_jump_table_.Add(table_entry, zone());
    831     }
    832     __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
    833   }
    834 }
    835 
    836 
    837 void LCodeGen::DeoptimizeIf(Condition condition,
    838                             LEnvironment* environment,
    839                             Register src1,
    840                             const Operand& src2) {
    841   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    842       ? Deoptimizer::LAZY
    843       : Deoptimizer::EAGER;
    844   DeoptimizeIf(condition, environment, bailout_type, src1, src2);
    845 }
    846 
    847 
    848 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
    849   int length = deoptimizations_.length();
    850   if (length == 0) return;
    851   Handle<DeoptimizationInputData> data =
    852       factory()->NewDeoptimizationInputData(length, TENURED);
    853 
    854   Handle<ByteArray> translations =
    855       translations_.CreateByteArray(isolate()->factory());
    856   data->SetTranslationByteArray(*translations);
    857   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
    858 
    859   Handle<FixedArray> literals =
    860       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
    861   { AllowDeferredHandleDereference copy_handles;
    862     for (int i = 0; i < deoptimization_literals_.length(); i++) {
    863       literals->set(i, *deoptimization_literals_[i]);
    864     }
    865     data->SetLiteralArray(*literals);
    866   }
    867 
    868   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
    869   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
    870 
    871   // Populate the deoptimization entries.
    872   for (int i = 0; i < length; i++) {
    873     LEnvironment* env = deoptimizations_[i];
    874     data->SetAstId(i, env->ast_id());
    875     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
    876     data->SetArgumentsStackHeight(i,
    877                                   Smi::FromInt(env->arguments_stack_height()));
    878     data->SetPc(i, Smi::FromInt(env->pc_offset()));
    879   }
    880   code->set_deoptimization_data(*data);
    881 }
    882 
    883 
    884 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
    885   int result = deoptimization_literals_.length();
    886   for (int i = 0; i < deoptimization_literals_.length(); ++i) {
    887     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
    888   }
    889   deoptimization_literals_.Add(literal, zone());
    890   return result;
    891 }
    892 
    893 
    894 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
    895   ASSERT(deoptimization_literals_.length() == 0);
    896 
    897   const ZoneList<Handle<JSFunction> >* inlined_closures =
    898       chunk()->inlined_closures();
    899 
    900   for (int i = 0, length = inlined_closures->length();
    901        i < length;
    902        i++) {
    903     DefineDeoptimizationLiteral(inlined_closures->at(i));
    904   }
    905 
    906   inlined_function_count_ = deoptimization_literals_.length();
    907 }
    908 
    909 
    910 void LCodeGen::RecordSafepointWithLazyDeopt(
    911     LInstruction* instr, SafepointMode safepoint_mode) {
    912   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    913     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    914   } else {
    915     ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    916     RecordSafepointWithRegisters(
    917         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    918   }
    919 }
    920 
    921 
    922 void LCodeGen::RecordSafepoint(
    923     LPointerMap* pointers,
    924     Safepoint::Kind kind,
    925     int arguments,
    926     Safepoint::DeoptMode deopt_mode) {
    927   ASSERT(expected_safepoint_kind_ == kind);
    928 
    929   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    930   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
    931       kind, arguments, deopt_mode);
    932   for (int i = 0; i < operands->length(); i++) {
    933     LOperand* pointer = operands->at(i);
    934     if (pointer->IsStackSlot()) {
    935       safepoint.DefinePointerSlot(pointer->index(), zone());
    936     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    937       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    938     }
    939   }
    940 }
    941 
    942 
    943 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    944                                Safepoint::DeoptMode deopt_mode) {
    945   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    946 }
    947 
    948 
    949 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    950   LPointerMap empty_pointers(zone());
    951   RecordSafepoint(&empty_pointers, deopt_mode);
    952 }
    953 
    954 
    955 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    956                                             int arguments,
    957                                             Safepoint::DeoptMode deopt_mode) {
    958   RecordSafepoint(
    959       pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    960 }
    961 
    962 
    963 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
    964     LPointerMap* pointers,
    965     int arguments,
    966     Safepoint::DeoptMode deopt_mode) {
    967   RecordSafepoint(
    968       pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
    969 }
    970 
    971 
    972 void LCodeGen::RecordAndWritePosition(int position) {
    973   if (position == RelocInfo::kNoPosition) return;
    974   masm()->positions_recorder()->RecordPosition(position);
    975   masm()->positions_recorder()->WriteRecordedPositions();
    976 }
    977 
    978 
    979 static const char* LabelType(LLabel* label) {
    980   if (label->is_loop_header()) return " (loop header)";
    981   if (label->is_osr_entry()) return " (OSR entry)";
    982   return "";
    983 }
    984 
    985 
    986 void LCodeGen::DoLabel(LLabel* label) {
    987   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    988           current_instruction_,
    989           label->hydrogen_value()->id(),
    990           label->block_id(),
    991           LabelType(label));
    992   __ bind(label->label());
    993   current_block_ = label->block_id();
    994   DoGap(label);
    995 }
    996 
    997 
    998 void LCodeGen::DoParallelMove(LParallelMove* move) {
    999   resolver_.Resolve(move);
   1000 }
   1001 
   1002 
   1003 void LCodeGen::DoGap(LGap* gap) {
   1004   for (int i = LGap::FIRST_INNER_POSITION;
   1005        i <= LGap::LAST_INNER_POSITION;
   1006        i++) {
   1007     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
   1008     LParallelMove* move = gap->GetParallelMove(inner_pos);
   1009     if (move != NULL) DoParallelMove(move);
   1010   }
   1011 }
   1012 
   1013 
   1014 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
   1015   DoGap(instr);
   1016 }
   1017 
   1018 
   1019 void LCodeGen::DoParameter(LParameter* instr) {
   1020   // Nothing to do.
   1021 }
   1022 
   1023 
   1024 void LCodeGen::DoCallStub(LCallStub* instr) {
   1025   ASSERT(ToRegister(instr->context()).is(cp));
   1026   ASSERT(ToRegister(instr->result()).is(v0));
   1027   switch (instr->hydrogen()->major_key()) {
   1028     case CodeStub::RegExpConstructResult: {
   1029       RegExpConstructResultStub stub;
   1030       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1031       break;
   1032     }
   1033     case CodeStub::RegExpExec: {
   1034       RegExpExecStub stub;
   1035       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1036       break;
   1037     }
   1038     case CodeStub::SubString: {
   1039       SubStringStub stub;
   1040       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1041       break;
   1042     }
   1043     case CodeStub::StringCompare: {
   1044       StringCompareStub stub;
   1045       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1046       break;
   1047     }
   1048     case CodeStub::TranscendentalCache: {
   1049       __ lw(a0, MemOperand(sp, 0));
   1050       TranscendentalCacheStub stub(instr->transcendental_type(),
   1051                                    TranscendentalCacheStub::TAGGED);
   1052       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1053       break;
   1054     }
   1055     default:
   1056       UNREACHABLE();
   1057   }
   1058 }
   1059 
   1060 
   1061 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   1062   GenerateOsrPrologue();
   1063 }
   1064 
   1065 
   1066 void LCodeGen::DoModI(LModI* instr) {
   1067   HMod* hmod = instr->hydrogen();
   1068   HValue* left = hmod->left();
   1069   HValue* right = hmod->right();
   1070   if (hmod->HasPowerOf2Divisor()) {
   1071     const Register left_reg = ToRegister(instr->left());
   1072     const Register result_reg = ToRegister(instr->result());
   1073 
   1074     // Note: The code below even works when right contains kMinInt.
   1075     int32_t divisor = Abs(right->GetInteger32Constant());
   1076 
   1077     Label left_is_not_negative, done;
   1078     if (left->CanBeNegative()) {
   1079       __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
   1080                 &left_is_not_negative, ge, left_reg, Operand(zero_reg));
   1081       __ subu(result_reg, zero_reg, left_reg);
   1082       __ And(result_reg, result_reg, divisor - 1);
   1083       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1084         DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
   1085       }
   1086       __ Branch(USE_DELAY_SLOT, &done);
   1087       __ subu(result_reg, zero_reg, result_reg);
   1088     }
   1089 
   1090     __ bind(&left_is_not_negative);
   1091     __ And(result_reg, left_reg, divisor - 1);
   1092     __ bind(&done);
   1093   } else {
   1094     const Register scratch = scratch0();
   1095     const Register left_reg = ToRegister(instr->left());
   1096     const Register result_reg = ToRegister(instr->result());
   1097 
   1098     // div runs in the background while we check for special cases.
   1099     Register right_reg = EmitLoadRegister(instr->right(), scratch);
   1100     __ div(left_reg, right_reg);
   1101 
   1102     Label done;
   1103     // Check for x % 0, we have to deopt in this case because we can't return a
   1104     // NaN.
   1105     if (right->CanBeZero()) {
   1106       DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
   1107     }
   1108 
   1109     // Check for kMinInt % -1, we have to deopt if we care about -0, because we
   1110     // can't return that.
   1111     if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
   1112       Label left_not_min_int;
   1113       __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
   1114       // TODO(svenpanne) Don't deopt when we don't care about -0.
   1115       DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
   1116       __ bind(&left_not_min_int);
   1117     }
   1118 
   1119     // TODO(svenpanne) Only emit the test/deopt if we have to.
   1120     __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
   1121     __ mfhi(result_reg);
   1122 
   1123     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1124       DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
   1125     }
   1126     __ bind(&done);
   1127   }
   1128 }
   1129 
   1130 
   1131 void LCodeGen::EmitSignedIntegerDivisionByConstant(
   1132     Register result,
   1133     Register dividend,
   1134     int32_t divisor,
   1135     Register remainder,
   1136     Register scratch,
   1137     LEnvironment* environment) {
   1138   ASSERT(!AreAliased(dividend, scratch, at, no_reg));
   1139 
   1140   uint32_t divisor_abs = abs(divisor);
   1141 
   1142   int32_t power_of_2_factor =
   1143     CompilerIntrinsics::CountTrailingZeros(divisor_abs);
   1144 
   1145   switch (divisor_abs) {
   1146     case 0:
   1147       DeoptimizeIf(al, environment);
   1148       return;
   1149 
   1150     case 1:
   1151       if (divisor > 0) {
   1152         __ Move(result, dividend);
   1153       } else {
   1154         __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
   1155         DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
   1156       }
   1157       // Compute the remainder.
   1158       __ Move(remainder, zero_reg);
   1159       return;
   1160 
   1161     default:
   1162       if (IsPowerOf2(divisor_abs)) {
   1163         // Branch and condition free code for integer division by a power
   1164         // of two.
   1165         int32_t power = WhichPowerOf2(divisor_abs);
   1166         if (power > 1) {
   1167           __ sra(scratch, dividend, power - 1);
   1168         }
   1169         __ srl(scratch, scratch, 32 - power);
   1170         __ Addu(scratch, dividend, Operand(scratch));
   1171         __ sra(result, scratch,  power);
   1172         // Negate if necessary.
   1173         // We don't need to check for overflow because the case '-1' is
   1174         // handled separately.
   1175         if (divisor < 0) {
   1176           ASSERT(divisor != -1);
   1177           __ Subu(result, zero_reg, Operand(result));
   1178         }
   1179         // Compute the remainder.
   1180         if (divisor > 0) {
   1181           __ sll(scratch, result, power);
   1182           __ Subu(remainder, dividend, Operand(scratch));
   1183         } else {
   1184           __ sll(scratch, result, power);
   1185           __ Addu(remainder, dividend, Operand(scratch));
   1186         }
   1187         return;
   1188       } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
   1189         // Use magic numbers for a few specific divisors.
   1190         // Details and proofs can be found in:
   1191         // - Hacker's Delight, Henry S. Warren, Jr.
   1192         // - The PowerPC Compiler Writer's Guide
   1193         // and probably many others.
   1194         //
   1195         // We handle
   1196         //   <divisor with magic numbers> * <power of 2>
   1197         // but not
   1198         //   <divisor with magic numbers> * <other divisor with magic numbers>
   1199         DivMagicNumbers magic_numbers =
   1200           DivMagicNumberFor(divisor_abs >> power_of_2_factor);
   1201         // Branch and condition free code for integer division by a power
   1202         // of two.
   1203         const int32_t M = magic_numbers.M;
   1204         const int32_t s = magic_numbers.s + power_of_2_factor;
   1205 
   1206         __ li(scratch, Operand(M));
   1207         __ mult(dividend, scratch);
   1208         __ mfhi(scratch);
   1209         if (M < 0) {
   1210           __ Addu(scratch, scratch, Operand(dividend));
   1211         }
   1212         if (s > 0) {
   1213           __ sra(scratch, scratch, s);
   1214           __ mov(scratch, scratch);
   1215         }
   1216         __ srl(at, dividend, 31);
   1217         __ Addu(result, scratch, Operand(at));
   1218         if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
   1219         // Compute the remainder.
   1220         __ li(scratch, Operand(divisor));
   1221         __ Mul(scratch, result, Operand(scratch));
   1222         __ Subu(remainder, dividend, Operand(scratch));
   1223       } else {
   1224         __ li(scratch, Operand(divisor));
   1225         __ div(dividend, scratch);
   1226         __ mfhi(remainder);
   1227         __ mflo(result);
   1228       }
   1229   }
   1230 }
   1231 
   1232 
   1233 void LCodeGen::DoDivI(LDivI* instr) {
   1234   const Register left = ToRegister(instr->left());
   1235   const Register right = ToRegister(instr->right());
   1236   const Register result = ToRegister(instr->result());
   1237 
   1238   // On MIPS div is asynchronous - it will run in the background while we
   1239   // check for special cases.
   1240   __ div(left, right);
   1241 
   1242   // Check for x / 0.
   1243   if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
   1244     DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
   1245   }
   1246 
   1247   // Check for (0 / -x) that will produce negative zero.
   1248   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1249     Label left_not_zero;
   1250     __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
   1251     DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
   1252     __ bind(&left_not_zero);
   1253   }
   1254 
   1255   // Check for (kMinInt / -1).
   1256   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1257     Label left_not_min_int;
   1258     __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
   1259     DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
   1260     __ bind(&left_not_min_int);
   1261   }
   1262 
   1263   if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1264     __ mfhi(result);
   1265     DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
   1266   }
   1267   __ mflo(result);
   1268 }
   1269 
   1270 
   1271 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1272   DoubleRegister addend = ToDoubleRegister(instr->addend());
   1273   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1274   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1275 
   1276   // This is computed in-place.
   1277   ASSERT(addend.is(ToDoubleRegister(instr->result())));
   1278 
   1279   __ madd_d(addend, addend, multiplier, multiplicand);
   1280 }
   1281 
   1282 
   1283 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
   1284   const Register result = ToRegister(instr->result());
   1285   const Register left = ToRegister(instr->left());
   1286   const Register remainder = ToRegister(instr->temp());
   1287   const Register scratch = scratch0();
   1288 
   1289   if (instr->right()->IsConstantOperand()) {
   1290     Label done;
   1291     int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
   1292     if (divisor < 0) {
   1293       DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
   1294     }
   1295     EmitSignedIntegerDivisionByConstant(result,
   1296                                         left,
   1297                                         divisor,
   1298                                         remainder,
   1299                                         scratch,
   1300                                         instr->environment());
   1301     // We performed a truncating division. Correct the result if necessary.
   1302     __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
   1303     __ Xor(scratch , remainder, Operand(divisor));
   1304     __ Branch(&done, ge, scratch, Operand(zero_reg));
   1305     __ Subu(result, result, Operand(1));
   1306     __ bind(&done);
   1307   } else {
   1308     Label done;
   1309     const Register right = ToRegister(instr->right());
   1310 
   1311     // On MIPS div is asynchronous - it will run in the background while we
   1312     // check for special cases.
   1313     __ div(left, right);
   1314 
   1315     // Check for x / 0.
   1316     DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
   1317 
   1318     // Check for (0 / -x) that will produce negative zero.
   1319     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1320       Label left_not_zero;
   1321       __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
   1322       DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
   1323       __ bind(&left_not_zero);
   1324     }
   1325 
   1326     // Check for (kMinInt / -1).
   1327     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1328       Label left_not_min_int;
   1329       __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
   1330       DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
   1331       __ bind(&left_not_min_int);
   1332     }
   1333 
   1334     __ mfhi(remainder);
   1335     __ mflo(result);
   1336 
   1337     // We performed a truncating division. Correct the result if necessary.
   1338     __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
   1339     __ Xor(scratch , remainder, Operand(right));
   1340     __ Branch(&done, ge, scratch, Operand(zero_reg));
   1341     __ Subu(result, result, Operand(1));
   1342     __ bind(&done);
   1343   }
   1344 }
   1345 
   1346 
   1347 void LCodeGen::DoMulI(LMulI* instr) {
   1348   Register scratch = scratch0();
   1349   Register result = ToRegister(instr->result());
   1350   // Note that result may alias left.
   1351   Register left = ToRegister(instr->left());
   1352   LOperand* right_op = instr->right();
   1353 
   1354   bool bailout_on_minus_zero =
   1355     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1356   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1357 
   1358   if (right_op->IsConstantOperand()) {
   1359     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1360 
   1361     if (bailout_on_minus_zero && (constant < 0)) {
   1362       // The case of a null constant will be handled separately.
   1363       // If constant is negative and left is null, the result should be -0.
   1364       DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
   1365     }
   1366 
   1367     switch (constant) {
   1368       case -1:
   1369         if (overflow) {
   1370           __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
   1371           DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
   1372         } else {
   1373           __ Subu(result, zero_reg, left);
   1374         }
   1375         break;
   1376       case 0:
   1377         if (bailout_on_minus_zero) {
   1378           // If left is strictly negative and the constant is null, the
   1379           // result is -0. Deoptimize if required, otherwise return 0.
   1380           DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
   1381         }
   1382         __ mov(result, zero_reg);
   1383         break;
   1384       case 1:
   1385         // Nothing to do.
   1386         __ Move(result, left);
   1387         break;
   1388       default:
   1389         // Multiplying by powers of two and powers of two plus or minus
   1390         // one can be done faster with shifted operands.
   1391         // For other constants we emit standard code.
   1392         int32_t mask = constant >> 31;
   1393         uint32_t constant_abs = (constant + mask) ^ mask;
   1394 
   1395         if (IsPowerOf2(constant_abs)) {
   1396           int32_t shift = WhichPowerOf2(constant_abs);
   1397           __ sll(result, left, shift);
   1398           // Correct the sign of the result if the constant is negative.
   1399           if (constant < 0)  __ Subu(result, zero_reg, result);
   1400         } else if (IsPowerOf2(constant_abs - 1)) {
   1401           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1402           __ sll(scratch, left, shift);
   1403           __ Addu(result, scratch, left);
   1404           // Correct the sign of the result if the constant is negative.
   1405           if (constant < 0)  __ Subu(result, zero_reg, result);
   1406         } else if (IsPowerOf2(constant_abs + 1)) {
   1407           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1408           __ sll(scratch, left, shift);
   1409           __ Subu(result, scratch, left);
   1410           // Correct the sign of the result if the constant is negative.
   1411           if (constant < 0)  __ Subu(result, zero_reg, result);
   1412         } else {
   1413           // Generate standard code.
   1414           __ li(at, constant);
   1415           __ Mul(result, left, at);
   1416         }
   1417     }
   1418 
   1419   } else {
   1420     ASSERT(right_op->IsRegister());
   1421     Register right = ToRegister(right_op);
   1422 
   1423     if (overflow) {
   1424       // hi:lo = left * right.
   1425       if (instr->hydrogen()->representation().IsSmi()) {
   1426         __ SmiUntag(result, left);
   1427         __ mult(result, right);
   1428         __ mfhi(scratch);
   1429         __ mflo(result);
   1430       } else {
   1431         __ mult(left, right);
   1432         __ mfhi(scratch);
   1433         __ mflo(result);
   1434       }
   1435       __ sra(at, result, 31);
   1436       DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
   1437     } else {
   1438       if (instr->hydrogen()->representation().IsSmi()) {
   1439         __ SmiUntag(result, left);
   1440         __ Mul(result, result, right);
   1441       } else {
   1442         __ Mul(result, left, right);
   1443       }
   1444     }
   1445 
   1446     if (bailout_on_minus_zero) {
   1447       Label done;
   1448       __ Xor(at, left, right);
   1449       __ Branch(&done, ge, at, Operand(zero_reg));
   1450       // Bail out if the result is minus zero.
   1451       DeoptimizeIf(eq,
   1452                    instr->environment(),
   1453                    result,
   1454                    Operand(zero_reg));
   1455       __ bind(&done);
   1456     }
   1457   }
   1458 }
   1459 
   1460 
   1461 void LCodeGen::DoBitI(LBitI* instr) {
   1462   LOperand* left_op = instr->left();
   1463   LOperand* right_op = instr->right();
   1464   ASSERT(left_op->IsRegister());
   1465   Register left = ToRegister(left_op);
   1466   Register result = ToRegister(instr->result());
   1467   Operand right(no_reg);
   1468 
   1469   if (right_op->IsStackSlot() || right_op->IsArgument()) {
   1470     right = Operand(EmitLoadRegister(right_op, at));
   1471   } else {
   1472     ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
   1473     right = ToOperand(right_op);
   1474   }
   1475 
   1476   switch (instr->op()) {
   1477     case Token::BIT_AND:
   1478       __ And(result, left, right);
   1479       break;
   1480     case Token::BIT_OR:
   1481       __ Or(result, left, right);
   1482       break;
   1483     case Token::BIT_XOR:
   1484       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
   1485         __ Nor(result, zero_reg, left);
   1486       } else {
   1487         __ Xor(result, left, right);
   1488       }
   1489       break;
   1490     default:
   1491       UNREACHABLE();
   1492       break;
   1493   }
   1494 }
   1495 
   1496 
   1497 void LCodeGen::DoShiftI(LShiftI* instr) {
   1498   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1499   // result may alias either of them.
   1500   LOperand* right_op = instr->right();
   1501   Register left = ToRegister(instr->left());
   1502   Register result = ToRegister(instr->result());
   1503   Register scratch = scratch0();
   1504 
   1505   if (right_op->IsRegister()) {
   1506     // No need to mask the right operand on MIPS, it is built into the variable
   1507     // shift instructions.
   1508     switch (instr->op()) {
   1509       case Token::ROR:
   1510         __ Ror(result, left, Operand(ToRegister(right_op)));
   1511         break;
   1512       case Token::SAR:
   1513         __ srav(result, left, ToRegister(right_op));
   1514         break;
   1515       case Token::SHR:
   1516         __ srlv(result, left, ToRegister(right_op));
   1517         if (instr->can_deopt()) {
   1518           DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
   1519         }
   1520         break;
   1521       case Token::SHL:
   1522         __ sllv(result, left, ToRegister(right_op));
   1523         break;
   1524       default:
   1525         UNREACHABLE();
   1526         break;
   1527     }
   1528   } else {
   1529     // Mask the right_op operand.
   1530     int value = ToInteger32(LConstantOperand::cast(right_op));
   1531     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1532     switch (instr->op()) {
   1533       case Token::ROR:
   1534         if (shift_count != 0) {
   1535           __ Ror(result, left, Operand(shift_count));
   1536         } else {
   1537           __ Move(result, left);
   1538         }
   1539         break;
   1540       case Token::SAR:
   1541         if (shift_count != 0) {
   1542           __ sra(result, left, shift_count);
   1543         } else {
   1544           __ Move(result, left);
   1545         }
   1546         break;
   1547       case Token::SHR:
   1548         if (shift_count != 0) {
   1549           __ srl(result, left, shift_count);
   1550         } else {
   1551           if (instr->can_deopt()) {
   1552             __ And(at, left, Operand(0x80000000));
   1553             DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
   1554           }
   1555           __ Move(result, left);
   1556         }
   1557         break;
   1558       case Token::SHL:
   1559         if (shift_count != 0) {
   1560           if (instr->hydrogen_value()->representation().IsSmi() &&
   1561               instr->can_deopt()) {
   1562             if (shift_count != 1) {
   1563               __ sll(result, left, shift_count - 1);
   1564               __ SmiTagCheckOverflow(result, result, scratch);
   1565             } else {
   1566               __ SmiTagCheckOverflow(result, left, scratch);
   1567             }
   1568             DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
   1569           } else {
   1570             __ sll(result, left, shift_count);
   1571           }
   1572         } else {
   1573           __ Move(result, left);
   1574         }
   1575         break;
   1576       default:
   1577         UNREACHABLE();
   1578         break;
   1579     }
   1580   }
   1581 }
   1582 
   1583 
   1584 void LCodeGen::DoSubI(LSubI* instr) {
   1585   LOperand* left = instr->left();
   1586   LOperand* right = instr->right();
   1587   LOperand* result = instr->result();
   1588   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1589 
   1590   if (!can_overflow) {
   1591     if (right->IsStackSlot() || right->IsArgument()) {
   1592       Register right_reg = EmitLoadRegister(right, at);
   1593       __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
   1594     } else {
   1595       ASSERT(right->IsRegister() || right->IsConstantOperand());
   1596       __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
   1597     }
   1598   } else {  // can_overflow.
   1599     Register overflow = scratch0();
   1600     Register scratch = scratch1();
   1601     if (right->IsStackSlot() ||
   1602         right->IsArgument() ||
   1603         right->IsConstantOperand()) {
   1604       Register right_reg = EmitLoadRegister(right, scratch);
   1605       __ SubuAndCheckForOverflow(ToRegister(result),
   1606                                  ToRegister(left),
   1607                                  right_reg,
   1608                                  overflow);  // Reg at also used as scratch.
   1609     } else {
   1610       ASSERT(right->IsRegister());
   1611       // Due to overflow check macros not supporting constant operands,
   1612       // handling the IsConstantOperand case was moved to prev if clause.
   1613       __ SubuAndCheckForOverflow(ToRegister(result),
   1614                                  ToRegister(left),
   1615                                  ToRegister(right),
   1616                                  overflow);  // Reg at also used as scratch.
   1617     }
   1618     DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
   1619   }
   1620 }
   1621 
   1622 
   1623 void LCodeGen::DoConstantI(LConstantI* instr) {
   1624   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1625 }
   1626 
   1627 
   1628 void LCodeGen::DoConstantS(LConstantS* instr) {
   1629   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1630 }
   1631 
   1632 
   1633 void LCodeGen::DoConstantD(LConstantD* instr) {
   1634   ASSERT(instr->result()->IsDoubleRegister());
   1635   DoubleRegister result = ToDoubleRegister(instr->result());
   1636   double v = instr->value();
   1637   __ Move(result, v);
   1638 }
   1639 
   1640 
   1641 void LCodeGen::DoConstantE(LConstantE* instr) {
   1642   __ li(ToRegister(instr->result()), Operand(instr->value()));
   1643 }
   1644 
   1645 
   1646 void LCodeGen::DoConstantT(LConstantT* instr) {
   1647   Handle<Object> value = instr->value(isolate());
   1648   AllowDeferredHandleDereference smi_check;
   1649   __ li(ToRegister(instr->result()), value);
   1650 }
   1651 
   1652 
   1653 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1654   Register result = ToRegister(instr->result());
   1655   Register map = ToRegister(instr->value());
   1656   __ EnumLength(result, map);
   1657 }
   1658 
   1659 
   1660 void LCodeGen::DoElementsKind(LElementsKind* instr) {
   1661   Register result = ToRegister(instr->result());
   1662   Register input = ToRegister(instr->value());
   1663 
   1664   // Load map into |result|.
   1665   __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
   1666   // Load the map's "bit field 2" into |result|. We only need the first byte,
   1667   // but the following bit field extraction takes care of that anyway.
   1668   __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
   1669   // Retrieve elements_kind from bit field 2.
   1670   __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
   1671 }
   1672 
   1673 
   1674 void LCodeGen::DoValueOf(LValueOf* instr) {
   1675   Register input = ToRegister(instr->value());
   1676   Register result = ToRegister(instr->result());
   1677   Register map = ToRegister(instr->temp());
   1678   Label done;
   1679 
   1680   if (!instr->hydrogen()->value()->IsHeapObject()) {
   1681     // If the object is a smi return the object.
   1682     __ Move(result, input);
   1683     __ JumpIfSmi(input, &done);
   1684   }
   1685 
   1686   // If the object is not a value type, return the object.
   1687   __ GetObjectType(input, map, map);
   1688   __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
   1689   __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
   1690 
   1691   __ bind(&done);
   1692 }
   1693 
   1694 
   1695 void LCodeGen::DoDateField(LDateField* instr) {
   1696   Register object = ToRegister(instr->date());
   1697   Register result = ToRegister(instr->result());
   1698   Register scratch = ToRegister(instr->temp());
   1699   Smi* index = instr->index();
   1700   Label runtime, done;
   1701   ASSERT(object.is(a0));
   1702   ASSERT(result.is(v0));
   1703   ASSERT(!scratch.is(scratch0()));
   1704   ASSERT(!scratch.is(object));
   1705 
   1706   __ SmiTst(object, at);
   1707   DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   1708   __ GetObjectType(object, scratch, scratch);
   1709   DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
   1710 
   1711   if (index->value() == 0) {
   1712     __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
   1713   } else {
   1714     if (index->value() < JSDate::kFirstUncachedField) {
   1715       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
   1716       __ li(scratch, Operand(stamp));
   1717       __ lw(scratch, MemOperand(scratch));
   1718       __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
   1719       __ Branch(&runtime, ne, scratch, Operand(scratch0()));
   1720       __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
   1721                                             kPointerSize * index->value()));
   1722       __ jmp(&done);
   1723     }
   1724     __ bind(&runtime);
   1725     __ PrepareCallCFunction(2, scratch);
   1726     __ li(a1, Operand(index));
   1727     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
   1728     __ bind(&done);
   1729   }
   1730 }
   1731 
   1732 
   1733 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
   1734                                            LOperand* index,
   1735                                            String::Encoding encoding) {
   1736   if (index->IsConstantOperand()) {
   1737     int offset = ToInteger32(LConstantOperand::cast(index));
   1738     if (encoding == String::TWO_BYTE_ENCODING) {
   1739       offset *= kUC16Size;
   1740     }
   1741     STATIC_ASSERT(kCharSize == 1);
   1742     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1743   }
   1744   Register scratch = scratch0();
   1745   ASSERT(!scratch.is(string));
   1746   ASSERT(!scratch.is(ToRegister(index)));
   1747   if (encoding == String::ONE_BYTE_ENCODING) {
   1748     __ Addu(scratch, string, ToRegister(index));
   1749   } else {
   1750     STATIC_ASSERT(kUC16Size == 2);
   1751     __ sll(scratch, ToRegister(index), 1);
   1752     __ Addu(scratch, string, scratch);
   1753   }
   1754   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1755 }
   1756 
   1757 
   1758 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1759   String::Encoding encoding = instr->hydrogen()->encoding();
   1760   Register string = ToRegister(instr->string());
   1761   Register result = ToRegister(instr->result());
   1762 
   1763   if (FLAG_debug_code) {
   1764     Register scratch = scratch0();
   1765     __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   1766     __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1767 
   1768     __ And(scratch, scratch,
   1769            Operand(kStringRepresentationMask | kStringEncodingMask));
   1770     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1771     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1772     __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
   1773                                 ? one_byte_seq_type : two_byte_seq_type));
   1774     __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
   1775   }
   1776 
   1777   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1778   if (encoding == String::ONE_BYTE_ENCODING) {
   1779     __ lbu(result, operand);
   1780   } else {
   1781     __ lhu(result, operand);
   1782   }
   1783 }
   1784 
   1785 
   1786 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1787   String::Encoding encoding = instr->hydrogen()->encoding();
   1788   Register string = ToRegister(instr->string());
   1789   Register value = ToRegister(instr->value());
   1790 
   1791   if (FLAG_debug_code) {
   1792     Register scratch = scratch0();
   1793     Register index = ToRegister(instr->index());
   1794     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1795     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1796     int encoding_mask =
   1797         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1798         ? one_byte_seq_type : two_byte_seq_type;
   1799     __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
   1800   }
   1801 
   1802   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1803   if (encoding == String::ONE_BYTE_ENCODING) {
   1804     __ sb(value, operand);
   1805   } else {
   1806     __ sh(value, operand);
   1807   }
   1808 }
   1809 
   1810 
   1811 void LCodeGen::DoThrow(LThrow* instr) {
   1812   __ push(ToRegister(instr->value()));
   1813   ASSERT(ToRegister(instr->context()).is(cp));
   1814   CallRuntime(Runtime::kThrow, 1, instr);
   1815 
   1816   if (FLAG_debug_code) {
   1817     __ stop("Unreachable code.");
   1818   }
   1819 }
   1820 
   1821 
   1822 void LCodeGen::DoAddI(LAddI* instr) {
   1823   LOperand* left = instr->left();
   1824   LOperand* right = instr->right();
   1825   LOperand* result = instr->result();
   1826   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1827 
   1828   if (!can_overflow) {
   1829     if (right->IsStackSlot() || right->IsArgument()) {
   1830       Register right_reg = EmitLoadRegister(right, at);
   1831       __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
   1832     } else {
   1833       ASSERT(right->IsRegister() || right->IsConstantOperand());
   1834       __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
   1835     }
   1836   } else {  // can_overflow.
   1837     Register overflow = scratch0();
   1838     Register scratch = scratch1();
   1839     if (right->IsStackSlot() ||
   1840         right->IsArgument() ||
   1841         right->IsConstantOperand()) {
   1842       Register right_reg = EmitLoadRegister(right, scratch);
   1843       __ AdduAndCheckForOverflow(ToRegister(result),
   1844                                  ToRegister(left),
   1845                                  right_reg,
   1846                                  overflow);  // Reg at also used as scratch.
   1847     } else {
   1848       ASSERT(right->IsRegister());
   1849       // Due to overflow check macros not supporting constant operands,
   1850       // handling the IsConstantOperand case was moved to prev if clause.
   1851       __ AdduAndCheckForOverflow(ToRegister(result),
   1852                                  ToRegister(left),
   1853                                  ToRegister(right),
   1854                                  overflow);  // Reg at also used as scratch.
   1855     }
   1856     DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
   1857   }
   1858 }
   1859 
   1860 
   1861 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1862   LOperand* left = instr->left();
   1863   LOperand* right = instr->right();
   1864   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1865   Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
   1866   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1867     Register left_reg = ToRegister(left);
   1868     Operand right_op = (right->IsRegister() || right->IsConstantOperand())
   1869         ? ToOperand(right)
   1870         : Operand(EmitLoadRegister(right, at));
   1871     Register result_reg = ToRegister(instr->result());
   1872     Label return_right, done;
   1873     if (!result_reg.is(left_reg)) {
   1874       __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
   1875       __ mov(result_reg, left_reg);
   1876       __ Branch(&done);
   1877     }
   1878     __ Branch(&done, condition, left_reg, right_op);
   1879     __ bind(&return_right);
   1880     __ Addu(result_reg, zero_reg, right_op);
   1881     __ bind(&done);
   1882   } else {
   1883     ASSERT(instr->hydrogen()->representation().IsDouble());
   1884     FPURegister left_reg = ToDoubleRegister(left);
   1885     FPURegister right_reg = ToDoubleRegister(right);
   1886     FPURegister result_reg = ToDoubleRegister(instr->result());
   1887     Label check_nan_left, check_zero, return_left, return_right, done;
   1888     __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
   1889     __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
   1890     __ Branch(&return_right);
   1891 
   1892     __ bind(&check_zero);
   1893     // left == right != 0.
   1894     __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
   1895     // At this point, both left and right are either 0 or -0.
   1896     if (operation == HMathMinMax::kMathMin) {
   1897       __ neg_d(left_reg, left_reg);
   1898       __ sub_d(result_reg, left_reg, right_reg);
   1899       __ neg_d(result_reg, result_reg);
   1900     } else {
   1901       __ add_d(result_reg, left_reg, right_reg);
   1902     }
   1903     __ Branch(&done);
   1904 
   1905     __ bind(&check_nan_left);
   1906     // left == NaN.
   1907     __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
   1908     __ bind(&return_right);
   1909     if (!right_reg.is(result_reg)) {
   1910       __ mov_d(result_reg, right_reg);
   1911     }
   1912     __ Branch(&done);
   1913 
   1914     __ bind(&return_left);
   1915     if (!left_reg.is(result_reg)) {
   1916       __ mov_d(result_reg, left_reg);
   1917     }
   1918     __ bind(&done);
   1919   }
   1920 }
   1921 
   1922 
   1923 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1924   DoubleRegister left = ToDoubleRegister(instr->left());
   1925   DoubleRegister right = ToDoubleRegister(instr->right());
   1926   DoubleRegister result = ToDoubleRegister(instr->result());
   1927   switch (instr->op()) {
   1928     case Token::ADD:
   1929       __ add_d(result, left, right);
   1930       break;
   1931     case Token::SUB:
   1932       __ sub_d(result, left, right);
   1933       break;
   1934     case Token::MUL:
   1935       __ mul_d(result, left, right);
   1936       break;
   1937     case Token::DIV:
   1938       __ div_d(result, left, right);
   1939       break;
   1940     case Token::MOD: {
   1941       // Save a0-a3 on the stack.
   1942       RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
   1943       __ MultiPush(saved_regs);
   1944 
   1945       __ PrepareCallCFunction(0, 2, scratch0());
   1946       __ SetCallCDoubleArguments(left, right);
   1947       __ CallCFunction(
   1948           ExternalReference::double_fp_operation(Token::MOD, isolate()),
   1949           0, 2);
   1950       // Move the result in the double result register.
   1951       __ GetCFunctionDoubleResult(result);
   1952 
   1953       // Restore saved register.
   1954       __ MultiPop(saved_regs);
   1955       break;
   1956     }
   1957     default:
   1958       UNREACHABLE();
   1959       break;
   1960   }
   1961 }
   1962 
   1963 
   1964 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1965   ASSERT(ToRegister(instr->context()).is(cp));
   1966   ASSERT(ToRegister(instr->left()).is(a1));
   1967   ASSERT(ToRegister(instr->right()).is(a0));
   1968   ASSERT(ToRegister(instr->result()).is(v0));
   1969 
   1970   BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
   1971   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1972   // Other arch use a nop here, to signal that there is no inlined
   1973   // patchable code. Mips does not need the nop, since our marker
   1974   // instruction (andi zero_reg) will never be used in normal code.
   1975 }
   1976 
   1977 
   1978 template<class InstrType>
   1979 void LCodeGen::EmitBranch(InstrType instr,
   1980                           Condition condition,
   1981                           Register src1,
   1982                           const Operand& src2) {
   1983   int left_block = instr->TrueDestination(chunk_);
   1984   int right_block = instr->FalseDestination(chunk_);
   1985 
   1986   int next_block = GetNextEmittedBlock();
   1987   if (right_block == left_block || condition == al) {
   1988     EmitGoto(left_block);
   1989   } else if (left_block == next_block) {
   1990     __ Branch(chunk_->GetAssemblyLabel(right_block),
   1991               NegateCondition(condition), src1, src2);
   1992   } else if (right_block == next_block) {
   1993     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
   1994   } else {
   1995     __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
   1996     __ Branch(chunk_->GetAssemblyLabel(right_block));
   1997   }
   1998 }
   1999 
   2000 
   2001 template<class InstrType>
   2002 void LCodeGen::EmitBranchF(InstrType instr,
   2003                            Condition condition,
   2004                            FPURegister src1,
   2005                            FPURegister src2) {
   2006   int right_block = instr->FalseDestination(chunk_);
   2007   int left_block = instr->TrueDestination(chunk_);
   2008 
   2009   int next_block = GetNextEmittedBlock();
   2010   if (right_block == left_block) {
   2011     EmitGoto(left_block);
   2012   } else if (left_block == next_block) {
   2013     __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
   2014                NegateCondition(condition), src1, src2);
   2015   } else if (right_block == next_block) {
   2016     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
   2017                condition, src1, src2);
   2018   } else {
   2019     __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
   2020                condition, src1, src2);
   2021     __ Branch(chunk_->GetAssemblyLabel(right_block));
   2022   }
   2023 }
   2024 
   2025 
   2026 template<class InstrType>
   2027 void LCodeGen::EmitFalseBranch(InstrType instr,
   2028                                Condition condition,
   2029                                Register src1,
   2030                                const Operand& src2) {
   2031   int false_block = instr->FalseDestination(chunk_);
   2032   __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
   2033 }
   2034 
   2035 
   2036 template<class InstrType>
   2037 void LCodeGen::EmitFalseBranchF(InstrType instr,
   2038                                 Condition condition,
   2039                                 FPURegister src1,
   2040                                 FPURegister src2) {
   2041   int false_block = instr->FalseDestination(chunk_);
   2042   __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
   2043              condition, src1, src2);
   2044 }
   2045 
   2046 
   2047 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   2048   __ stop("LDebugBreak");
   2049 }
   2050 
   2051 
   2052 void LCodeGen::DoBranch(LBranch* instr) {
   2053   Representation r = instr->hydrogen()->value()->representation();
   2054   if (r.IsInteger32() || r.IsSmi()) {
   2055     ASSERT(!info()->IsStub());
   2056     Register reg = ToRegister(instr->value());
   2057     EmitBranch(instr, ne, reg, Operand(zero_reg));
   2058   } else if (r.IsDouble()) {
   2059     ASSERT(!info()->IsStub());
   2060     DoubleRegister reg = ToDoubleRegister(instr->value());
   2061     // Test the double value. Zero and NaN are false.
   2062     EmitBranchF(instr, nue, reg, kDoubleRegZero);
   2063   } else {
   2064     ASSERT(r.IsTagged());
   2065     Register reg = ToRegister(instr->value());
   2066     HType type = instr->hydrogen()->value()->type();
   2067     if (type.IsBoolean()) {
   2068       ASSERT(!info()->IsStub());
   2069       __ LoadRoot(at, Heap::kTrueValueRootIndex);
   2070       EmitBranch(instr, eq, reg, Operand(at));
   2071     } else if (type.IsSmi()) {
   2072       ASSERT(!info()->IsStub());
   2073       EmitBranch(instr, ne, reg, Operand(zero_reg));
   2074     } else if (type.IsJSArray()) {
   2075       ASSERT(!info()->IsStub());
   2076       EmitBranch(instr, al, zero_reg, Operand(zero_reg));
   2077     } else if (type.IsHeapNumber()) {
   2078       ASSERT(!info()->IsStub());
   2079       DoubleRegister dbl_scratch = double_scratch0();
   2080       __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2081       // Test the double value. Zero and NaN are false.
   2082       EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
   2083     } else if (type.IsString()) {
   2084       ASSERT(!info()->IsStub());
   2085       __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
   2086       EmitBranch(instr, ne, at, Operand(zero_reg));
   2087     } else {
   2088       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   2089       // Avoid deopts in the case where we've never executed this path before.
   2090       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   2091 
   2092       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   2093         // undefined -> false.
   2094         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   2095         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2096       }
   2097       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   2098         // Boolean -> its value.
   2099         __ LoadRoot(at, Heap::kTrueValueRootIndex);
   2100         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
   2101         __ LoadRoot(at, Heap::kFalseValueRootIndex);
   2102         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2103       }
   2104       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   2105         // 'null' -> false.
   2106         __ LoadRoot(at, Heap::kNullValueRootIndex);
   2107         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
   2108       }
   2109 
   2110       if (expected.Contains(ToBooleanStub::SMI)) {
   2111         // Smis: 0 -> false, all other -> true.
   2112         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
   2113         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2114       } else if (expected.NeedsMap()) {
   2115         // If we need a map later and have a Smi -> deopt.
   2116         __ SmiTst(reg, at);
   2117         DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   2118       }
   2119 
   2120       const Register map = scratch0();
   2121       if (expected.NeedsMap()) {
   2122         __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2123         if (expected.CanBeUndetectable()) {
   2124           // Undetectable -> false.
   2125           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
   2126           __ And(at, at, Operand(1 << Map::kIsUndetectable));
   2127           __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
   2128         }
   2129       }
   2130 
   2131       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2132         // spec object -> true.
   2133         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2134         __ Branch(instr->TrueLabel(chunk_),
   2135                   ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
   2136       }
   2137 
   2138       if (expected.Contains(ToBooleanStub::STRING)) {
   2139         // String value -> false iff empty.
   2140         Label not_string;
   2141         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2142         __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
   2143         __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
   2144         __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
   2145         __ Branch(instr->FalseLabel(chunk_));
   2146         __ bind(&not_string);
   2147       }
   2148 
   2149       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2150         // Symbol value -> true.
   2151         const Register scratch = scratch1();
   2152         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2153         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
   2154       }
   2155 
   2156       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2157         // heap number -> false iff +0, -0, or NaN.
   2158         DoubleRegister dbl_scratch = double_scratch0();
   2159         Label not_heap_number;
   2160         __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   2161         __ Branch(&not_heap_number, ne, map, Operand(at));
   2162         __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2163         __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2164                    ne, dbl_scratch, kDoubleRegZero);
   2165         // Falls through if dbl_scratch == 0.
   2166         __ Branch(instr->FalseLabel(chunk_));
   2167         __ bind(&not_heap_number);
   2168       }
   2169 
   2170       if (!expected.IsGeneric()) {
   2171         // We've seen something for the first time -> deopt.
   2172         // This can only happen if we are not generic already.
   2173         DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
   2174       }
   2175     }
   2176   }
   2177 }
   2178 
   2179 
   2180 void LCodeGen::EmitGoto(int block) {
   2181   if (!IsNextEmittedBlock(block)) {
   2182     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2183   }
   2184 }
   2185 
   2186 
   2187 void LCodeGen::DoGoto(LGoto* instr) {
   2188   EmitGoto(instr->block_id());
   2189 }
   2190 
   2191 
   2192 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2193   Condition cond = kNoCondition;
   2194   switch (op) {
   2195     case Token::EQ:
   2196     case Token::EQ_STRICT:
   2197       cond = eq;
   2198       break;
   2199     case Token::NE:
   2200     case Token::NE_STRICT:
   2201       cond = ne;
   2202       break;
   2203     case Token::LT:
   2204       cond = is_unsigned ? lo : lt;
   2205       break;
   2206     case Token::GT:
   2207       cond = is_unsigned ? hi : gt;
   2208       break;
   2209     case Token::LTE:
   2210       cond = is_unsigned ? ls : le;
   2211       break;
   2212     case Token::GTE:
   2213       cond = is_unsigned ? hs : ge;
   2214       break;
   2215     case Token::IN:
   2216     case Token::INSTANCEOF:
   2217     default:
   2218       UNREACHABLE();
   2219   }
   2220   return cond;
   2221 }
   2222 
   2223 
   2224 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2225   LOperand* left = instr->left();
   2226   LOperand* right = instr->right();
   2227   Condition cond = TokenToCondition(instr->op(), false);
   2228 
   2229   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2230     // We can statically evaluate the comparison.
   2231     double left_val = ToDouble(LConstantOperand::cast(left));
   2232     double right_val = ToDouble(LConstantOperand::cast(right));
   2233     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2234         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2235     EmitGoto(next_block);
   2236   } else {
   2237     if (instr->is_double()) {
   2238       // Compare left and right as doubles and load the
   2239       // resulting flags into the normal status register.
   2240       FPURegister left_reg = ToDoubleRegister(left);
   2241       FPURegister right_reg = ToDoubleRegister(right);
   2242 
   2243       // If a NaN is involved, i.e. the result is unordered,
   2244       // jump to false block label.
   2245       __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
   2246                  left_reg, right_reg);
   2247 
   2248       EmitBranchF(instr, cond, left_reg, right_reg);
   2249     } else {
   2250       Register cmp_left;
   2251       Operand cmp_right = Operand(0);
   2252 
   2253       if (right->IsConstantOperand()) {
   2254         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2255         if (instr->hydrogen_value()->representation().IsSmi()) {
   2256           cmp_left = ToRegister(left);
   2257           cmp_right = Operand(Smi::FromInt(value));
   2258         } else {
   2259           cmp_left = ToRegister(left);
   2260           cmp_right = Operand(value);
   2261         }
   2262       } else if (left->IsConstantOperand()) {
   2263         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2264         if (instr->hydrogen_value()->representation().IsSmi()) {
   2265            cmp_left = ToRegister(right);
   2266            cmp_right = Operand(Smi::FromInt(value));
   2267         } else {
   2268           cmp_left = ToRegister(right);
   2269           cmp_right = Operand(value);
   2270         }
   2271         // We transposed the operands. Reverse the condition.
   2272         cond = ReverseCondition(cond);
   2273       } else {
   2274         cmp_left = ToRegister(left);
   2275         cmp_right = Operand(ToRegister(right));
   2276       }
   2277 
   2278       EmitBranch(instr, cond, cmp_left, cmp_right);
   2279     }
   2280   }
   2281 }
   2282 
   2283 
   2284 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2285   Register left = ToRegister(instr->left());
   2286   Register right = ToRegister(instr->right());
   2287 
   2288   EmitBranch(instr, eq, left, Operand(right));
   2289 }
   2290 
   2291 
   2292 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2293   if (instr->hydrogen()->representation().IsTagged()) {
   2294     Register input_reg = ToRegister(instr->object());
   2295     __ li(at, Operand(factory()->the_hole_value()));
   2296     EmitBranch(instr, eq, input_reg, Operand(at));
   2297     return;
   2298   }
   2299 
   2300   DoubleRegister input_reg = ToDoubleRegister(instr->object());
   2301   EmitFalseBranchF(instr, eq, input_reg, input_reg);
   2302 
   2303   Register scratch = scratch0();
   2304   __ FmoveHigh(scratch, input_reg);
   2305   EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
   2306 }
   2307 
   2308 
   2309 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   2310   Representation rep = instr->hydrogen()->value()->representation();
   2311   ASSERT(!rep.IsInteger32());
   2312   Register scratch = ToRegister(instr->temp());
   2313 
   2314   if (rep.IsDouble()) {
   2315     DoubleRegister value = ToDoubleRegister(instr->value());
   2316     EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
   2317     __ FmoveHigh(scratch, value);
   2318     __ li(at, 0x80000000);
   2319   } else {
   2320     Register value = ToRegister(instr->value());
   2321     __ CheckMap(value,
   2322                 scratch,
   2323                 Heap::kHeapNumberMapRootIndex,
   2324                 instr->FalseLabel(chunk()),
   2325                 DO_SMI_CHECK);
   2326     __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
   2327     EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
   2328     __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
   2329     __ mov(at, zero_reg);
   2330   }
   2331   EmitBranch(instr, eq, scratch, Operand(at));
   2332 }
   2333 
   2334 
   2335 Condition LCodeGen::EmitIsObject(Register input,
   2336                                  Register temp1,
   2337                                  Register temp2,
   2338                                  Label* is_not_object,
   2339                                  Label* is_object) {
   2340   __ JumpIfSmi(input, is_not_object);
   2341 
   2342   __ LoadRoot(temp2, Heap::kNullValueRootIndex);
   2343   __ Branch(is_object, eq, input, Operand(temp2));
   2344 
   2345   // Load map.
   2346   __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
   2347   // Undetectable objects behave like undefined.
   2348   __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
   2349   __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
   2350   __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
   2351 
   2352   // Load instance type and check that it is in object type range.
   2353   __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
   2354   __ Branch(is_not_object,
   2355             lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2356 
   2357   return le;
   2358 }
   2359 
   2360 
   2361 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   2362   Register reg = ToRegister(instr->value());
   2363   Register temp1 = ToRegister(instr->temp());
   2364   Register temp2 = scratch0();
   2365 
   2366   Condition true_cond =
   2367       EmitIsObject(reg, temp1, temp2,
   2368           instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
   2369 
   2370   EmitBranch(instr, true_cond, temp2,
   2371              Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2372 }
   2373 
   2374 
   2375 Condition LCodeGen::EmitIsString(Register input,
   2376                                  Register temp1,
   2377                                  Label* is_not_string,
   2378                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2379   if (check_needed == INLINE_SMI_CHECK) {
   2380     __ JumpIfSmi(input, is_not_string);
   2381   }
   2382   __ GetObjectType(input, temp1, temp1);
   2383 
   2384   return lt;
   2385 }
   2386 
   2387 
   2388 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2389   Register reg = ToRegister(instr->value());
   2390   Register temp1 = ToRegister(instr->temp());
   2391 
   2392   SmiCheck check_needed =
   2393       instr->hydrogen()->value()->IsHeapObject()
   2394           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2395   Condition true_cond =
   2396       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2397 
   2398   EmitBranch(instr, true_cond, temp1,
   2399              Operand(FIRST_NONSTRING_TYPE));
   2400 }
   2401 
   2402 
   2403 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2404   Register input_reg = EmitLoadRegister(instr->value(), at);
   2405   __ And(at, input_reg, kSmiTagMask);
   2406   EmitBranch(instr, eq, at, Operand(zero_reg));
   2407 }
   2408 
   2409 
   2410 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2411   Register input = ToRegister(instr->value());
   2412   Register temp = ToRegister(instr->temp());
   2413 
   2414   if (!instr->hydrogen()->value()->IsHeapObject()) {
   2415     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2416   }
   2417   __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2418   __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2419   __ And(at, temp, Operand(1 << Map::kIsUndetectable));
   2420   EmitBranch(instr, ne, at, Operand(zero_reg));
   2421 }
   2422 
   2423 
   2424 static Condition ComputeCompareCondition(Token::Value op) {
   2425   switch (op) {
   2426     case Token::EQ_STRICT:
   2427     case Token::EQ:
   2428       return eq;
   2429     case Token::LT:
   2430       return lt;
   2431     case Token::GT:
   2432       return gt;
   2433     case Token::LTE:
   2434       return le;
   2435     case Token::GTE:
   2436       return ge;
   2437     default:
   2438       UNREACHABLE();
   2439       return kNoCondition;
   2440   }
   2441 }
   2442 
   2443 
   2444 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2445   ASSERT(ToRegister(instr->context()).is(cp));
   2446   Token::Value op = instr->op();
   2447 
   2448   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2449   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2450 
   2451   Condition condition = ComputeCompareCondition(op);
   2452 
   2453   EmitBranch(instr, condition, v0, Operand(zero_reg));
   2454 }
   2455 
   2456 
   2457 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2458   InstanceType from = instr->from();
   2459   InstanceType to = instr->to();
   2460   if (from == FIRST_TYPE) return to;
   2461   ASSERT(from == to || to == LAST_TYPE);
   2462   return from;
   2463 }
   2464 
   2465 
   2466 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2467   InstanceType from = instr->from();
   2468   InstanceType to = instr->to();
   2469   if (from == to) return eq;
   2470   if (to == LAST_TYPE) return hs;
   2471   if (from == FIRST_TYPE) return ls;
   2472   UNREACHABLE();
   2473   return eq;
   2474 }
   2475 
   2476 
   2477 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2478   Register scratch = scratch0();
   2479   Register input = ToRegister(instr->value());
   2480 
   2481   if (!instr->hydrogen()->value()->IsHeapObject()) {
   2482     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2483   }
   2484 
   2485   __ GetObjectType(input, scratch, scratch);
   2486   EmitBranch(instr,
   2487              BranchCondition(instr->hydrogen()),
   2488              scratch,
   2489              Operand(TestType(instr->hydrogen())));
   2490 }
   2491 
   2492 
   2493 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2494   Register input = ToRegister(instr->value());
   2495   Register result = ToRegister(instr->result());
   2496 
   2497   __ AssertString(input);
   2498 
   2499   __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
   2500   __ IndexFromHash(result, result);
   2501 }
   2502 
   2503 
   2504 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2505     LHasCachedArrayIndexAndBranch* instr) {
   2506   Register input = ToRegister(instr->value());
   2507   Register scratch = scratch0();
   2508 
   2509   __ lw(scratch,
   2510          FieldMemOperand(input, String::kHashFieldOffset));
   2511   __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
   2512   EmitBranch(instr, eq, at, Operand(zero_reg));
   2513 }
   2514 
   2515 
   2516 // Branches to a label or falls through with the answer in flags.  Trashes
   2517 // the temp registers, but not the input.
   2518 void LCodeGen::EmitClassOfTest(Label* is_true,
   2519                                Label* is_false,
   2520                                Handle<String>class_name,
   2521                                Register input,
   2522                                Register temp,
   2523                                Register temp2) {
   2524   ASSERT(!input.is(temp));
   2525   ASSERT(!input.is(temp2));
   2526   ASSERT(!temp.is(temp2));
   2527 
   2528   __ JumpIfSmi(input, is_false);
   2529 
   2530   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
   2531     // Assuming the following assertions, we can use the same compares to test
   2532     // for both being a function type and being in the object type range.
   2533     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   2534     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2535                   FIRST_SPEC_OBJECT_TYPE + 1);
   2536     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2537                   LAST_SPEC_OBJECT_TYPE - 1);
   2538     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
   2539 
   2540     __ GetObjectType(input, temp, temp2);
   2541     __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
   2542     __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
   2543     __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
   2544   } else {
   2545     // Faster code path to avoid two compares: subtract lower bound from the
   2546     // actual type and do a signed compare with the width of the type range.
   2547     __ GetObjectType(input, temp, temp2);
   2548     __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2549     __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
   2550                                            FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2551   }
   2552 
   2553   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   2554   // Check if the constructor in the map is a function.
   2555   __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
   2556 
   2557   // Objects with a non-function constructor have class 'Object'.
   2558   __ GetObjectType(temp, temp2, temp2);
   2559   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
   2560     __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
   2561   } else {
   2562     __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
   2563   }
   2564 
   2565   // temp now contains the constructor function. Grab the
   2566   // instance class name from there.
   2567   __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2568   __ lw(temp, FieldMemOperand(temp,
   2569                                SharedFunctionInfo::kInstanceClassNameOffset));
   2570   // The class name we are testing against is internalized since it's a literal.
   2571   // The name in the constructor is internalized because of the way the context
   2572   // is booted.  This routine isn't expected to work for random API-created
   2573   // classes and it doesn't have to because you can't access it with natives
   2574   // syntax.  Since both sides are internalized it is sufficient to use an
   2575   // identity comparison.
   2576 
   2577   // End with the address of this class_name instance in temp register.
   2578   // On MIPS, the caller must do the comparison with Handle<String>class_name.
   2579 }
   2580 
   2581 
   2582 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2583   Register input = ToRegister(instr->value());
   2584   Register temp = scratch0();
   2585   Register temp2 = ToRegister(instr->temp());
   2586   Handle<String> class_name = instr->hydrogen()->class_name();
   2587 
   2588   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2589                   class_name, input, temp, temp2);
   2590 
   2591   EmitBranch(instr, eq, temp, Operand(class_name));
   2592 }
   2593 
   2594 
   2595 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2596   Register reg = ToRegister(instr->value());
   2597   Register temp = ToRegister(instr->temp());
   2598 
   2599   __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2600   EmitBranch(instr, eq, temp, Operand(instr->map()));
   2601 }
   2602 
   2603 
   2604 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2605   ASSERT(ToRegister(instr->context()).is(cp));
   2606   Label true_label, done;
   2607   ASSERT(ToRegister(instr->left()).is(a0));  // Object is in a0.
   2608   ASSERT(ToRegister(instr->right()).is(a1));  // Function is in a1.
   2609   Register result = ToRegister(instr->result());
   2610   ASSERT(result.is(v0));
   2611 
   2612   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
   2613   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   2614 
   2615   __ Branch(&true_label, eq, result, Operand(zero_reg));
   2616   __ li(result, Operand(factory()->false_value()));
   2617   __ Branch(&done);
   2618   __ bind(&true_label);
   2619   __ li(result, Operand(factory()->true_value()));
   2620   __ bind(&done);
   2621 }
   2622 
   2623 
   2624 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   2625   class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
   2626    public:
   2627     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
   2628                                   LInstanceOfKnownGlobal* instr)
   2629         : LDeferredCode(codegen), instr_(instr) { }
   2630     virtual void Generate() V8_OVERRIDE {
   2631       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
   2632     }
   2633     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   2634     Label* map_check() { return &map_check_; }
   2635 
   2636    private:
   2637     LInstanceOfKnownGlobal* instr_;
   2638     Label map_check_;
   2639   };
   2640 
   2641   DeferredInstanceOfKnownGlobal* deferred;
   2642   deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
   2643 
   2644   Label done, false_result;
   2645   Register object = ToRegister(instr->value());
   2646   Register temp = ToRegister(instr->temp());
   2647   Register result = ToRegister(instr->result());
   2648 
   2649   ASSERT(object.is(a0));
   2650   ASSERT(result.is(v0));
   2651 
   2652   // A Smi is not instance of anything.
   2653   __ JumpIfSmi(object, &false_result);
   2654 
   2655   // This is the inlined call site instanceof cache. The two occurences of the
   2656   // hole value will be patched to the last map/result pair generated by the
   2657   // instanceof stub.
   2658   Label cache_miss;
   2659   Register map = temp;
   2660   __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
   2661 
   2662   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   2663   __ bind(deferred->map_check());  // Label for calculating code patching.
   2664   // We use Factory::the_hole_value() on purpose instead of loading from the
   2665   // root array to force relocation to be able to later patch with
   2666   // the cached map.
   2667   Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
   2668   __ li(at, Operand(Handle<Object>(cell)));
   2669   __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
   2670   __ Branch(&cache_miss, ne, map, Operand(at));
   2671   // We use Factory::the_hole_value() on purpose instead of loading from the
   2672   // root array to force relocation to be able to later patch
   2673   // with true or false.
   2674   __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
   2675   __ Branch(&done);
   2676 
   2677   // The inlined call site cache did not match. Check null and string before
   2678   // calling the deferred code.
   2679   __ bind(&cache_miss);
   2680   // Null is not instance of anything.
   2681   __ LoadRoot(temp, Heap::kNullValueRootIndex);
   2682   __ Branch(&false_result, eq, object, Operand(temp));
   2683 
   2684   // String values is not instance of anything.
   2685   Condition cc = __ IsObjectStringType(object, temp, temp);
   2686   __ Branch(&false_result, cc, temp, Operand(zero_reg));
   2687 
   2688   // Go to the deferred code.
   2689   __ Branch(deferred->entry());
   2690 
   2691   __ bind(&false_result);
   2692   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   2693 
   2694   // Here result has either true or false. Deferred code also produces true or
   2695   // false object.
   2696   __ bind(deferred->exit());
   2697   __ bind(&done);
   2698 }
   2699 
   2700 
   2701 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
   2702                                                Label* map_check) {
   2703   Register result = ToRegister(instr->result());
   2704   ASSERT(result.is(v0));
   2705 
   2706   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   2707   flags = static_cast<InstanceofStub::Flags>(
   2708       flags | InstanceofStub::kArgsInRegisters);
   2709   flags = static_cast<InstanceofStub::Flags>(
   2710       flags | InstanceofStub::kCallSiteInlineCheck);
   2711   flags = static_cast<InstanceofStub::Flags>(
   2712       flags | InstanceofStub::kReturnTrueFalseObject);
   2713   InstanceofStub stub(flags);
   2714 
   2715   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   2716   LoadContextFromDeferred(instr->context());
   2717 
   2718   // Get the temp register reserved by the instruction. This needs to be t0 as
   2719   // its slot of the pushing of safepoint registers is used to communicate the
   2720   // offset to the location of the map check.
   2721   Register temp = ToRegister(instr->temp());
   2722   ASSERT(temp.is(t0));
   2723   __ li(InstanceofStub::right(), instr->function());
   2724   static const int kAdditionalDelta = 7;
   2725   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
   2726   Label before_push_delta;
   2727   __ bind(&before_push_delta);
   2728   {
   2729     Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   2730     __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
   2731     __ StoreToSafepointRegisterSlot(temp, temp);
   2732   }
   2733   CallCodeGeneric(stub.GetCode(isolate()),
   2734                   RelocInfo::CODE_TARGET,
   2735                   instr,
   2736                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   2737   LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   2738   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2739   // Put the result value into the result register slot and
   2740   // restore all registers.
   2741   __ StoreToSafepointRegisterSlot(result, result);
   2742 }
   2743 
   2744 
   2745 void LCodeGen::DoCmpT(LCmpT* instr) {
   2746   ASSERT(ToRegister(instr->context()).is(cp));
   2747   Token::Value op = instr->op();
   2748 
   2749   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2750   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2751   // On MIPS there is no need for a "no inlined smi code" marker (nop).
   2752 
   2753   Condition condition = ComputeCompareCondition(op);
   2754   // A minor optimization that relies on LoadRoot always emitting one
   2755   // instruction.
   2756   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
   2757   Label done, check;
   2758   __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
   2759   __ bind(&check);
   2760   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2761   ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
   2762   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2763   __ bind(&done);
   2764 }
   2765 
   2766 
   2767 void LCodeGen::DoReturn(LReturn* instr) {
   2768   if (FLAG_trace && info()->IsOptimizing()) {
   2769     // Push the return value on the stack as the parameter.
   2770     // Runtime::TraceExit returns its parameter in v0. We're leaving the code
   2771     // managed by the register allocator and tearing down the frame, it's
   2772     // safe to write to the context register.
   2773     __ push(v0);
   2774     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2775     __ CallRuntime(Runtime::kTraceExit, 1);
   2776   }
   2777   if (info()->saves_caller_doubles()) {
   2778     RestoreCallerDoubles();
   2779   }
   2780   int no_frame_start = -1;
   2781   if (NeedsEagerFrame()) {
   2782     __ mov(sp, fp);
   2783     no_frame_start = masm_->pc_offset();
   2784     __ Pop(ra, fp);
   2785   }
   2786   if (instr->has_constant_parameter_count()) {
   2787     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2788     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2789     if (sp_delta != 0) {
   2790       __ Addu(sp, sp, Operand(sp_delta));
   2791     }
   2792   } else {
   2793     Register reg = ToRegister(instr->parameter_count());
   2794     // The argument count parameter is a smi
   2795     __ SmiUntag(reg);
   2796     __ sll(at, reg, kPointerSizeLog2);
   2797     __ Addu(sp, sp, at);
   2798   }
   2799 
   2800   __ Jump(ra);
   2801 
   2802   if (no_frame_start != -1) {
   2803     info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
   2804   }
   2805 }
   2806 
   2807 
   2808 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   2809   Register result = ToRegister(instr->result());
   2810   __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
   2811   __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
   2812   if (instr->hydrogen()->RequiresHoleCheck()) {
   2813     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2814     DeoptimizeIf(eq, instr->environment(), result, Operand(at));
   2815   }
   2816 }
   2817 
   2818 
   2819 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   2820   ASSERT(ToRegister(instr->context()).is(cp));
   2821   ASSERT(ToRegister(instr->global_object()).is(a0));
   2822   ASSERT(ToRegister(instr->result()).is(v0));
   2823 
   2824   __ li(a2, Operand(instr->name()));
   2825   RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
   2826                                              : RelocInfo::CODE_TARGET_CONTEXT;
   2827   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   2828   CallCode(ic, mode, instr);
   2829 }
   2830 
   2831 
   2832 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   2833   Register value = ToRegister(instr->value());
   2834   Register cell = scratch0();
   2835 
   2836   // Load the cell.
   2837   __ li(cell, Operand(instr->hydrogen()->cell().handle()));
   2838 
   2839   // If the cell we are storing to contains the hole it could have
   2840   // been deleted from the property dictionary. In that case, we need
   2841   // to update the property details in the property dictionary to mark
   2842   // it as no longer deleted.
   2843   if (instr->hydrogen()->RequiresHoleCheck()) {
   2844     // We use a temp to check the payload.
   2845     Register payload = ToRegister(instr->temp());
   2846     __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
   2847     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2848     DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
   2849   }
   2850 
   2851   // Store the value.
   2852   __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
   2853   // Cells are always rescanned, so no write barrier here.
   2854 }
   2855 
   2856 
   2857 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
   2858   ASSERT(ToRegister(instr->context()).is(cp));
   2859   ASSERT(ToRegister(instr->global_object()).is(a1));
   2860   ASSERT(ToRegister(instr->value()).is(a0));
   2861 
   2862   __ li(a2, Operand(instr->name()));
   2863   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   2864       ? isolate()->builtins()->StoreIC_Initialize_Strict()
   2865       : isolate()->builtins()->StoreIC_Initialize();
   2866   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
   2867 }
   2868 
   2869 
   2870 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2871   Register context = ToRegister(instr->context());
   2872   Register result = ToRegister(instr->result());
   2873 
   2874   __ lw(result, ContextOperand(context, instr->slot_index()));
   2875   if (instr->hydrogen()->RequiresHoleCheck()) {
   2876     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2877 
   2878     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2879       DeoptimizeIf(eq, instr->environment(), result, Operand(at));
   2880     } else {
   2881       Label is_not_hole;
   2882       __ Branch(&is_not_hole, ne, result, Operand(at));
   2883       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2884       __ bind(&is_not_hole);
   2885     }
   2886   }
   2887 }
   2888 
   2889 
   2890 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2891   Register context = ToRegister(instr->context());
   2892   Register value = ToRegister(instr->value());
   2893   Register scratch = scratch0();
   2894   MemOperand target = ContextOperand(context, instr->slot_index());
   2895 
   2896   Label skip_assignment;
   2897 
   2898   if (instr->hydrogen()->RequiresHoleCheck()) {
   2899     __ lw(scratch, target);
   2900     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2901 
   2902     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2903       DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
   2904     } else {
   2905       __ Branch(&skip_assignment, ne, scratch, Operand(at));
   2906     }
   2907   }
   2908 
   2909   __ sw(value, target);
   2910   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2911     SmiCheck check_needed =
   2912         instr->hydrogen()->value()->IsHeapObject()
   2913             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2914     __ RecordWriteContextSlot(context,
   2915                               target.offset(),
   2916                               value,
   2917                               scratch0(),
   2918                               GetRAState(),
   2919                               kSaveFPRegs,
   2920                               EMIT_REMEMBERED_SET,
   2921                               check_needed);
   2922   }
   2923 
   2924   __ bind(&skip_assignment);
   2925 }
   2926 
   2927 
   2928 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2929   HObjectAccess access = instr->hydrogen()->access();
   2930   int offset = access.offset();
   2931   Register object = ToRegister(instr->object());
   2932 
   2933   if (access.IsExternalMemory()) {
   2934     Register result = ToRegister(instr->result());
   2935     MemOperand operand = MemOperand(object, offset);
   2936     __ Load(result, operand, access.representation());
   2937     return;
   2938   }
   2939 
   2940   if (instr->hydrogen()->representation().IsDouble()) {
   2941     DoubleRegister result = ToDoubleRegister(instr->result());
   2942     __ ldc1(result, FieldMemOperand(object, offset));
   2943     return;
   2944   }
   2945 
   2946   Register result = ToRegister(instr->result());
   2947   if (!access.IsInobject()) {
   2948     __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   2949     object = result;
   2950   }
   2951   MemOperand operand = FieldMemOperand(object, offset);
   2952   __ Load(result, operand, access.representation());
   2953 }
   2954 
   2955 
   2956 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   2957   ASSERT(ToRegister(instr->context()).is(cp));
   2958   ASSERT(ToRegister(instr->object()).is(a0));
   2959   ASSERT(ToRegister(instr->result()).is(v0));
   2960 
   2961   // Name is always in a2.
   2962   __ li(a2, Operand(instr->name()));
   2963   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   2964   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2965 }
   2966 
   2967 
   2968 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2969   Register scratch = scratch0();
   2970   Register function = ToRegister(instr->function());
   2971   Register result = ToRegister(instr->result());
   2972 
   2973   // Check that the function really is a function. Load map into the
   2974   // result register.
   2975   __ GetObjectType(function, result, scratch);
   2976   DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
   2977 
   2978   // Make sure that the function has an instance prototype.
   2979   Label non_instance;
   2980   __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
   2981   __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
   2982   __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
   2983 
   2984   // Get the prototype or initial map from the function.
   2985   __ lw(result,
   2986          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2987 
   2988   // Check that the function has a prototype or an initial map.
   2989   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   2990   DeoptimizeIf(eq, instr->environment(), result, Operand(at));
   2991 
   2992   // If the function does not have an initial map, we're done.
   2993   Label done;
   2994   __ GetObjectType(result, scratch, scratch);
   2995   __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
   2996 
   2997   // Get the prototype from the initial map.
   2998   __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
   2999   __ Branch(&done);
   3000 
   3001   // Non-instance prototype: Fetch prototype from constructor field
   3002   // in initial map.
   3003   __ bind(&non_instance);
   3004   __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
   3005 
   3006   // All done.
   3007   __ bind(&done);
   3008 }
   3009 
   3010 
   3011 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   3012   Register result = ToRegister(instr->result());
   3013   __ LoadRoot(result, instr->index());
   3014 }
   3015 
   3016 
   3017 void LCodeGen::DoLoadExternalArrayPointer(
   3018     LLoadExternalArrayPointer* instr) {
   3019   Register to_reg = ToRegister(instr->result());
   3020   Register from_reg  = ToRegister(instr->object());
   3021   __ lw(to_reg, FieldMemOperand(from_reg,
   3022                                 ExternalArray::kExternalPointerOffset));
   3023 }
   3024 
   3025 
   3026 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   3027   Register arguments = ToRegister(instr->arguments());
   3028   Register result = ToRegister(instr->result());
   3029   // There are two words between the frame pointer and the last argument.
   3030   // Subtracting from length accounts for one of them add one more.
   3031   if (instr->length()->IsConstantOperand()) {
   3032     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   3033     if (instr->index()->IsConstantOperand()) {
   3034       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3035       int index = (const_length - const_index) + 1;
   3036       __ lw(result, MemOperand(arguments, index * kPointerSize));
   3037     } else {
   3038       Register index = ToRegister(instr->index());
   3039       __ li(at, Operand(const_length + 1));
   3040       __ Subu(result, at, index);
   3041       __ sll(at, result, kPointerSizeLog2);
   3042       __ Addu(at, arguments, at);
   3043       __ lw(result, MemOperand(at));
   3044     }
   3045   } else if (instr->index()->IsConstantOperand()) {
   3046     Register length = ToRegister(instr->length());
   3047     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3048     int loc = const_index - 1;
   3049     if (loc != 0) {
   3050       __ Subu(result, length, Operand(loc));
   3051       __ sll(at, result, kPointerSizeLog2);
   3052       __ Addu(at, arguments, at);
   3053       __ lw(result, MemOperand(at));
   3054     } else {
   3055       __ sll(at, length, kPointerSizeLog2);
   3056       __ Addu(at, arguments, at);
   3057       __ lw(result, MemOperand(at));
   3058     }
   3059   } else {
   3060     Register length = ToRegister(instr->length());
   3061     Register index = ToRegister(instr->index());
   3062     __ Subu(result, length, index);
   3063     __ Addu(result, result, 1);
   3064     __ sll(at, result, kPointerSizeLog2);
   3065     __ Addu(at, arguments, at);
   3066     __ lw(result, MemOperand(at));
   3067   }
   3068 }
   3069 
   3070 
   3071 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   3072   Register external_pointer = ToRegister(instr->elements());
   3073   Register key = no_reg;
   3074   ElementsKind elements_kind = instr->elements_kind();
   3075   bool key_is_constant = instr->key()->IsConstantOperand();
   3076   int constant_key = 0;
   3077   if (key_is_constant) {
   3078     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3079     if (constant_key & 0xF0000000) {
   3080       Abort(kArrayIndexConstantValueTooBig);
   3081     }
   3082   } else {
   3083     key = ToRegister(instr->key());
   3084   }
   3085   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3086   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3087       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3088   int additional_offset = instr->additional_index() << element_size_shift;
   3089 
   3090   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
   3091       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
   3092     FPURegister result = ToDoubleRegister(instr->result());
   3093     if (key_is_constant) {
   3094       __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
   3095     } else {
   3096       __ sll(scratch0(), key, shift_size);
   3097       __ Addu(scratch0(), scratch0(), external_pointer);
   3098     }
   3099     if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
   3100       __ lwc1(result, MemOperand(scratch0(), additional_offset));
   3101       __ cvt_d_s(result, result);
   3102     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   3103       __ ldc1(result, MemOperand(scratch0(), additional_offset));
   3104     }
   3105   } else {
   3106     Register result = ToRegister(instr->result());
   3107     MemOperand mem_operand = PrepareKeyedOperand(
   3108         key, external_pointer, key_is_constant, constant_key,
   3109         element_size_shift, shift_size,
   3110         instr->additional_index(), additional_offset);
   3111     switch (elements_kind) {
   3112       case EXTERNAL_BYTE_ELEMENTS:
   3113         __ lb(result, mem_operand);
   3114         break;
   3115       case EXTERNAL_PIXEL_ELEMENTS:
   3116       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
   3117         __ lbu(result, mem_operand);
   3118         break;
   3119       case EXTERNAL_SHORT_ELEMENTS:
   3120         __ lh(result, mem_operand);
   3121         break;
   3122       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
   3123         __ lhu(result, mem_operand);
   3124         break;
   3125       case EXTERNAL_INT_ELEMENTS:
   3126         __ lw(result, mem_operand);
   3127         break;
   3128       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
   3129         __ lw(result, mem_operand);
   3130         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   3131           DeoptimizeIf(Ugreater_equal, instr->environment(),
   3132               result, Operand(0x80000000));
   3133         }
   3134         break;
   3135       case EXTERNAL_FLOAT_ELEMENTS:
   3136       case EXTERNAL_DOUBLE_ELEMENTS:
   3137       case FAST_DOUBLE_ELEMENTS:
   3138       case FAST_ELEMENTS:
   3139       case FAST_SMI_ELEMENTS:
   3140       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3141       case FAST_HOLEY_ELEMENTS:
   3142       case FAST_HOLEY_SMI_ELEMENTS:
   3143       case DICTIONARY_ELEMENTS:
   3144       case NON_STRICT_ARGUMENTS_ELEMENTS:
   3145         UNREACHABLE();
   3146         break;
   3147     }
   3148   }
   3149 }
   3150 
   3151 
   3152 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   3153   Register elements = ToRegister(instr->elements());
   3154   bool key_is_constant = instr->key()->IsConstantOperand();
   3155   Register key = no_reg;
   3156   DoubleRegister result = ToDoubleRegister(instr->result());
   3157   Register scratch = scratch0();
   3158 
   3159   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   3160 
   3161   int base_offset =
   3162       FixedDoubleArray::kHeaderSize - kHeapObjectTag +
   3163       (instr->additional_index() << element_size_shift);
   3164   if (key_is_constant) {
   3165     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3166     if (constant_key & 0xF0000000) {
   3167       Abort(kArrayIndexConstantValueTooBig);
   3168     }
   3169     base_offset += constant_key << element_size_shift;
   3170   }
   3171   __ Addu(scratch, elements, Operand(base_offset));
   3172 
   3173   if (!key_is_constant) {
   3174     key = ToRegister(instr->key());
   3175     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3176         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3177     __ sll(at, key, shift_size);
   3178     __ Addu(scratch, scratch, at);
   3179   }
   3180 
   3181   __ ldc1(result, MemOperand(scratch));
   3182 
   3183   if (instr->hydrogen()->RequiresHoleCheck()) {
   3184     __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
   3185     DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
   3186   }
   3187 }
   3188 
   3189 
   3190 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   3191   Register elements = ToRegister(instr->elements());
   3192   Register result = ToRegister(instr->result());
   3193   Register scratch = scratch0();
   3194   Register store_base = scratch;
   3195   int offset = 0;
   3196 
   3197   if (instr->key()->IsConstantOperand()) {
   3198     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   3199     offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
   3200                                            instr->additional_index());
   3201     store_base = elements;
   3202   } else {
   3203     Register key = ToRegister(instr->key());
   3204     // Even though the HLoadKeyed instruction forces the input
   3205     // representation for the key to be an integer, the input gets replaced
   3206     // during bound check elimination with the index argument to the bounds
   3207     // check, which can be tagged, so that case must be handled here, too.
   3208     if (instr->hydrogen()->key()->representation().IsSmi()) {
   3209       __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
   3210       __ addu(scratch, elements, scratch);
   3211     } else {
   3212       __ sll(scratch, key, kPointerSizeLog2);
   3213       __ addu(scratch, elements, scratch);
   3214     }
   3215     offset = FixedArray::OffsetOfElementAt(instr->additional_index());
   3216   }
   3217   __ lw(result, FieldMemOperand(store_base, offset));
   3218 
   3219   // Check for the hole value.
   3220   if (instr->hydrogen()->RequiresHoleCheck()) {
   3221     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   3222       __ SmiTst(result, scratch);
   3223       DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
   3224     } else {
   3225       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   3226       DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
   3227     }
   3228   }
   3229 }
   3230 
   3231 
   3232 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3233   if (instr->is_external()) {
   3234     DoLoadKeyedExternalArray(instr);
   3235   } else if (instr->hydrogen()->representation().IsDouble()) {
   3236     DoLoadKeyedFixedDoubleArray(instr);
   3237   } else {
   3238     DoLoadKeyedFixedArray(instr);
   3239   }
   3240 }
   3241 
   3242 
   3243 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
   3244                                          Register base,
   3245                                          bool key_is_constant,
   3246                                          int constant_key,
   3247                                          int element_size,
   3248                                          int shift_size,
   3249                                          int additional_index,
   3250                                          int additional_offset) {
   3251   if (additional_index != 0 && !key_is_constant) {
   3252     additional_index *= 1 << (element_size - shift_size);
   3253     __ Addu(scratch0(), key, Operand(additional_index));
   3254   }
   3255 
   3256   if (key_is_constant) {
   3257     return MemOperand(base,
   3258                       (constant_key << element_size) + additional_offset);
   3259   }
   3260 
   3261   if (additional_index == 0) {
   3262     if (shift_size >= 0) {
   3263       __ sll(scratch0(), key, shift_size);
   3264       __ Addu(scratch0(), base, scratch0());
   3265       return MemOperand(scratch0());
   3266     } else {
   3267       ASSERT_EQ(-1, shift_size);
   3268       __ srl(scratch0(), key, 1);
   3269       __ Addu(scratch0(), base, scratch0());
   3270       return MemOperand(scratch0());
   3271     }
   3272   }
   3273 
   3274   if (shift_size >= 0) {
   3275     __ sll(scratch0(), scratch0(), shift_size);
   3276     __ Addu(scratch0(), base, scratch0());
   3277     return MemOperand(scratch0());
   3278   } else {
   3279     ASSERT_EQ(-1, shift_size);
   3280     __ srl(scratch0(), scratch0(), 1);
   3281     __ Addu(scratch0(), base, scratch0());
   3282     return MemOperand(scratch0());
   3283   }
   3284 }
   3285 
   3286 
   3287 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3288   ASSERT(ToRegister(instr->context()).is(cp));
   3289   ASSERT(ToRegister(instr->object()).is(a1));
   3290   ASSERT(ToRegister(instr->key()).is(a0));
   3291 
   3292   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
   3293   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3294 }
   3295 
   3296 
   3297 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3298   Register scratch = scratch0();
   3299   Register temp = scratch1();
   3300   Register result = ToRegister(instr->result());
   3301 
   3302   if (instr->hydrogen()->from_inlined()) {
   3303     __ Subu(result, sp, 2 * kPointerSize);
   3304   } else {
   3305     // Check if the calling frame is an arguments adaptor frame.
   3306     Label done, adapted;
   3307     __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3308     __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
   3309     __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3310 
   3311     // Result is the frame pointer for the frame if not adapted and for the real
   3312     // frame below the adaptor frame if adapted.
   3313     __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
   3314     __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
   3315   }
   3316 }
   3317 
   3318 
   3319 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3320   Register elem = ToRegister(instr->elements());
   3321   Register result = ToRegister(instr->result());
   3322 
   3323   Label done;
   3324 
   3325   // If no arguments adaptor frame the number of arguments is fixed.
   3326   __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
   3327   __ Branch(&done, eq, fp, Operand(elem));
   3328 
   3329   // Arguments adaptor frame present. Get argument length from there.
   3330   __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3331   __ lw(result,
   3332         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3333   __ SmiUntag(result);
   3334 
   3335   // Argument length is in result register.
   3336   __ bind(&done);
   3337 }
   3338 
   3339 
   3340 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3341   Register receiver = ToRegister(instr->receiver());
   3342   Register function = ToRegister(instr->function());
   3343   Register result = ToRegister(instr->result());
   3344   Register scratch = scratch0();
   3345 
   3346   // If the receiver is null or undefined, we have to pass the global
   3347   // object as a receiver to normal functions. Values have to be
   3348   // passed unchanged to builtins and strict-mode functions.
   3349   Label global_object, result_in_receiver;
   3350 
   3351   // Do not transform the receiver to object for strict mode
   3352   // functions.
   3353   __ lw(scratch,
   3354          FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3355   __ lw(scratch,
   3356          FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3357 
   3358   // Do not transform the receiver to object for builtins.
   3359   int32_t strict_mode_function_mask =
   3360                   1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
   3361   int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
   3362   __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
   3363   __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
   3364 
   3365   // Normal function. Replace undefined or null with global receiver.
   3366   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
   3367   __ Branch(&global_object, eq, receiver, Operand(scratch));
   3368   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3369   __ Branch(&global_object, eq, receiver, Operand(scratch));
   3370 
   3371   // Deoptimize if the receiver is not a JS object.
   3372   __ SmiTst(receiver, scratch);
   3373   DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
   3374 
   3375   __ GetObjectType(receiver, scratch, scratch);
   3376   DeoptimizeIf(lt, instr->environment(),
   3377                scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
   3378   __ Branch(&result_in_receiver);
   3379 
   3380   __ bind(&global_object);
   3381 
   3382   __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3383   __ lw(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
   3384   __ lw(result,
   3385          FieldMemOperand(result, JSGlobalObject::kGlobalReceiverOffset));
   3386   if (result.is(receiver)) {
   3387     __ bind(&result_in_receiver);
   3388   } else {
   3389     Label result_ok;
   3390     __ Branch(&result_ok);
   3391     __ bind(&result_in_receiver);
   3392     __ mov(result, receiver);
   3393     __ bind(&result_ok);
   3394   }
   3395 }
   3396 
   3397 
   3398 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3399   Register receiver = ToRegister(instr->receiver());
   3400   Register function = ToRegister(instr->function());
   3401   Register length = ToRegister(instr->length());
   3402   Register elements = ToRegister(instr->elements());
   3403   Register scratch = scratch0();
   3404   ASSERT(receiver.is(a0));  // Used for parameter count.
   3405   ASSERT(function.is(a1));  // Required by InvokeFunction.
   3406   ASSERT(ToRegister(instr->result()).is(v0));
   3407 
   3408   // Copy the arguments to this function possibly from the
   3409   // adaptor frame below it.
   3410   const uint32_t kArgumentsLimit = 1 * KB;
   3411   DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
   3412 
   3413   // Push the receiver and use the register to keep the original
   3414   // number of arguments.
   3415   __ push(receiver);
   3416   __ Move(receiver, length);
   3417   // The arguments are at a one pointer size offset from elements.
   3418   __ Addu(elements, elements, Operand(1 * kPointerSize));
   3419 
   3420   // Loop through the arguments pushing them onto the execution
   3421   // stack.
   3422   Label invoke, loop;
   3423   // length is a small non-negative integer, due to the test above.
   3424   __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
   3425   __ sll(scratch, length, 2);
   3426   __ bind(&loop);
   3427   __ Addu(scratch, elements, scratch);
   3428   __ lw(scratch, MemOperand(scratch));
   3429   __ push(scratch);
   3430   __ Subu(length, length, Operand(1));
   3431   __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
   3432   __ sll(scratch, length, 2);
   3433 
   3434   __ bind(&invoke);
   3435   ASSERT(instr->HasPointerMap());
   3436   LPointerMap* pointers = instr->pointer_map();
   3437   SafepointGenerator safepoint_generator(
   3438       this, pointers, Safepoint::kLazyDeopt);
   3439   // The number of arguments is stored in receiver which is a0, as expected
   3440   // by InvokeFunction.
   3441   ParameterCount actual(receiver);
   3442   __ InvokeFunction(function, actual, CALL_FUNCTION,
   3443                     safepoint_generator, CALL_AS_METHOD);
   3444 }
   3445 
   3446 
   3447 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3448   LOperand* argument = instr->value();
   3449   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3450     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3451   } else {
   3452     Register argument_reg = EmitLoadRegister(argument, at);
   3453     __ push(argument_reg);
   3454   }
   3455 }
   3456 
   3457 
   3458 void LCodeGen::DoDrop(LDrop* instr) {
   3459   __ Drop(instr->count());
   3460 }
   3461 
   3462 
   3463 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3464   Register result = ToRegister(instr->result());
   3465   __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3466 }
   3467 
   3468 
   3469 void LCodeGen::DoContext(LContext* instr) {
   3470   // If there is a non-return use, the context must be moved to a register.
   3471   Register result = ToRegister(instr->result());
   3472   if (info()->IsOptimizing()) {
   3473     __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3474   } else {
   3475     // If there is no frame, the context must be in cp.
   3476     ASSERT(result.is(cp));
   3477   }
   3478 }
   3479 
   3480 
   3481 void LCodeGen::DoOuterContext(LOuterContext* instr) {
   3482   Register context = ToRegister(instr->context());
   3483   Register result = ToRegister(instr->result());
   3484   __ lw(result,
   3485         MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   3486 }
   3487 
   3488 
   3489 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3490   ASSERT(ToRegister(instr->context()).is(cp));
   3491   __ li(scratch0(), instr->hydrogen()->pairs());
   3492   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   3493   // The context is the first argument.
   3494   __ Push(cp, scratch0(), scratch1());
   3495   CallRuntime(Runtime::kDeclareGlobals, 3, instr);
   3496 }
   3497 
   3498 
   3499 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   3500   Register context = ToRegister(instr->context());
   3501   Register result = ToRegister(instr->result());
   3502   __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
   3503 }
   3504 
   3505 
   3506 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
   3507   Register global = ToRegister(instr->global_object());
   3508   Register result = ToRegister(instr->result());
   3509   __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
   3510 }
   3511 
   3512 
   3513 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3514                                  int formal_parameter_count,
   3515                                  int arity,
   3516                                  LInstruction* instr,
   3517                                  CallKind call_kind,
   3518                                  A1State a1_state) {
   3519   bool dont_adapt_arguments =
   3520       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3521   bool can_invoke_directly =
   3522       dont_adapt_arguments || formal_parameter_count == arity;
   3523 
   3524   LPointerMap* pointers = instr->pointer_map();
   3525 
   3526   if (can_invoke_directly) {
   3527     if (a1_state == A1_UNINITIALIZED) {
   3528       __ li(a1, function);
   3529     }
   3530 
   3531     // Change context.
   3532     __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
   3533 
   3534     // Set r0 to arguments count if adaption is not needed. Assumes that r0
   3535     // is available to write to at this point.
   3536     if (dont_adapt_arguments) {
   3537       __ li(a0, Operand(arity));
   3538     }
   3539 
   3540     // Invoke function.
   3541     __ SetCallKind(t1, call_kind);
   3542     __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
   3543     __ Call(at);
   3544 
   3545     // Set up deoptimization.
   3546     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3547   } else {
   3548     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3549     ParameterCount count(arity);
   3550     ParameterCount expected(formal_parameter_count);
   3551     __ InvokeFunction(
   3552         function, expected, count, CALL_FUNCTION, generator, call_kind);
   3553   }
   3554 }
   3555 
   3556 
   3557 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   3558   ASSERT(ToRegister(instr->result()).is(v0));
   3559   __ mov(a0, v0);
   3560   CallKnownFunction(instr->hydrogen()->function(),
   3561                     instr->hydrogen()->formal_parameter_count(),
   3562                     instr->arity(),
   3563                     instr,
   3564                     CALL_AS_METHOD,
   3565                     A1_UNINITIALIZED);
   3566 }
   3567 
   3568 
   3569 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3570   ASSERT(instr->context() != NULL);
   3571   ASSERT(ToRegister(instr->context()).is(cp));
   3572   Register input = ToRegister(instr->value());
   3573   Register result = ToRegister(instr->result());
   3574   Register scratch = scratch0();
   3575 
   3576   // Deoptimize if not a heap number.
   3577   __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3578   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   3579   DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
   3580 
   3581   Label done;
   3582   Register exponent = scratch0();
   3583   scratch = no_reg;
   3584   __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3585   // Check the sign of the argument. If the argument is positive, just
   3586   // return it.
   3587   __ Move(result, input);
   3588   __ And(at, exponent, Operand(HeapNumber::kSignMask));
   3589   __ Branch(&done, eq, at, Operand(zero_reg));
   3590 
   3591   // Input is negative. Reverse its sign.
   3592   // Preserve the value of all registers.
   3593   {
   3594     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   3595 
   3596     // Registers were saved at the safepoint, so we can use
   3597     // many scratch registers.
   3598     Register tmp1 = input.is(a1) ? a0 : a1;
   3599     Register tmp2 = input.is(a2) ? a0 : a2;
   3600     Register tmp3 = input.is(a3) ? a0 : a3;
   3601     Register tmp4 = input.is(t0) ? a0 : t0;
   3602 
   3603     // exponent: floating point exponent value.
   3604 
   3605     Label allocated, slow;
   3606     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3607     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3608     __ Branch(&allocated);
   3609 
   3610     // Slow case: Call the runtime system to do the number allocation.
   3611     __ bind(&slow);
   3612 
   3613     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3614                             instr->context());
   3615     // Set the pointer to the new heap number in tmp.
   3616     if (!tmp1.is(v0))
   3617       __ mov(tmp1, v0);
   3618     // Restore input_reg after call to runtime.
   3619     __ LoadFromSafepointRegisterSlot(input, input);
   3620     __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3621 
   3622     __ bind(&allocated);
   3623     // exponent: floating point exponent value.
   3624     // tmp1: allocated heap number.
   3625     __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
   3626     __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3627     __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3628     __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3629 
   3630     __ StoreToSafepointRegisterSlot(tmp1, result);
   3631   }
   3632 
   3633   __ bind(&done);
   3634 }
   3635 
   3636 
   3637 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3638   Register input = ToRegister(instr->value());
   3639   Register result = ToRegister(instr->result());
   3640   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   3641   Label done;
   3642   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
   3643   __ mov(result, input);
   3644   __ subu(result, zero_reg, input);
   3645   // Overflow if result is still negative, i.e. 0x80000000.
   3646   DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
   3647   __ bind(&done);
   3648 }
   3649 
   3650 
   3651 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3652   // Class for deferred case.
   3653   class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
   3654    public:
   3655     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3656         : LDeferredCode(codegen), instr_(instr) { }
   3657     virtual void Generate() V8_OVERRIDE {
   3658       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3659     }
   3660     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   3661    private:
   3662     LMathAbs* instr_;
   3663   };
   3664 
   3665   Representation r = instr->hydrogen()->value()->representation();
   3666   if (r.IsDouble()) {
   3667     FPURegister input = ToDoubleRegister(instr->value());
   3668     FPURegister result = ToDoubleRegister(instr->result());
   3669     __ abs_d(result, input);
   3670   } else if (r.IsSmiOrInteger32()) {
   3671     EmitIntegerMathAbs(instr);
   3672   } else {
   3673     // Representation is tagged.
   3674     DeferredMathAbsTaggedHeapNumber* deferred =
   3675         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3676     Register input = ToRegister(instr->value());
   3677     // Smi check.
   3678     __ JumpIfNotSmi(input, deferred->entry());
   3679     // If smi, handle it directly.
   3680     EmitIntegerMathAbs(instr);
   3681     __ bind(deferred->exit());
   3682   }
   3683 }
   3684 
   3685 
   3686 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3687   DoubleRegister input = ToDoubleRegister(instr->value());
   3688   Register result = ToRegister(instr->result());
   3689   Register scratch1 = scratch0();
   3690   Register except_flag = ToRegister(instr->temp());
   3691 
   3692   __ EmitFPUTruncate(kRoundToMinusInf,
   3693                      result,
   3694                      input,
   3695                      scratch1,
   3696                      double_scratch0(),
   3697                      except_flag);
   3698 
   3699   // Deopt if the operation did not succeed.
   3700   DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   3701 
   3702   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3703     // Test for -0.
   3704     Label done;
   3705     __ Branch(&done, ne, result, Operand(zero_reg));
   3706     __ mfc1(scratch1, input.high());
   3707     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   3708     DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
   3709     __ bind(&done);
   3710   }
   3711 }
   3712 
   3713 
   3714 void LCodeGen::DoMathRound(LMathRound* instr) {
   3715   DoubleRegister input = ToDoubleRegister(instr->value());
   3716   Register result = ToRegister(instr->result());
   3717   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3718   Register scratch = scratch0();
   3719   Label done, check_sign_on_zero;
   3720 
   3721   // Extract exponent bits.
   3722   __ mfc1(result, input.high());
   3723   __ Ext(scratch,
   3724          result,
   3725          HeapNumber::kExponentShift,
   3726          HeapNumber::kExponentBits);
   3727 
   3728   // If the number is in ]-0.5, +0.5[, the result is +/- 0.
   3729   Label skip1;
   3730   __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
   3731   __ mov(result, zero_reg);
   3732   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3733     __ Branch(&check_sign_on_zero);
   3734   } else {
   3735     __ Branch(&done);
   3736   }
   3737   __ bind(&skip1);
   3738 
   3739   // The following conversion will not work with numbers
   3740   // outside of ]-2^32, 2^32[.
   3741   DeoptimizeIf(ge, instr->environment(), scratch,
   3742                Operand(HeapNumber::kExponentBias + 32));
   3743 
   3744   // Save the original sign for later comparison.
   3745   __ And(scratch, result, Operand(HeapNumber::kSignMask));
   3746 
   3747   __ Move(double_scratch0(), 0.5);
   3748   __ add_d(double_scratch0(), input, double_scratch0());
   3749 
   3750   // Check sign of the result: if the sign changed, the input
   3751   // value was in ]0.5, 0[ and the result should be -0.
   3752   __ mfc1(result, double_scratch0().high());
   3753   __ Xor(result, result, Operand(scratch));
   3754   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3755     // ARM uses 'mi' here, which is 'lt'
   3756     DeoptimizeIf(lt, instr->environment(), result,
   3757                  Operand(zero_reg));
   3758   } else {
   3759     Label skip2;
   3760     // ARM uses 'mi' here, which is 'lt'
   3761     // Negating it results in 'ge'
   3762     __ Branch(&skip2, ge, result, Operand(zero_reg));
   3763     __ mov(result, zero_reg);
   3764     __ Branch(&done);
   3765     __ bind(&skip2);
   3766   }
   3767 
   3768   Register except_flag = scratch;
   3769   __ EmitFPUTruncate(kRoundToMinusInf,
   3770                      result,
   3771                      double_scratch0(),
   3772                      at,
   3773                      double_scratch1,
   3774                      except_flag);
   3775 
   3776   DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   3777 
   3778   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3779     // Test for -0.
   3780     __ Branch(&done, ne, result, Operand(zero_reg));
   3781     __ bind(&check_sign_on_zero);
   3782     __ mfc1(scratch, input.high());
   3783     __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
   3784     DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
   3785   }
   3786   __ bind(&done);
   3787 }
   3788 
   3789 
   3790 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3791   DoubleRegister input = ToDoubleRegister(instr->value());
   3792   DoubleRegister result = ToDoubleRegister(instr->result());
   3793   __ sqrt_d(result, input);
   3794 }
   3795 
   3796 
   3797 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3798   DoubleRegister input = ToDoubleRegister(instr->value());
   3799   DoubleRegister result = ToDoubleRegister(instr->result());
   3800   DoubleRegister temp = ToDoubleRegister(instr->temp());
   3801 
   3802   ASSERT(!input.is(result));
   3803 
   3804   // Note that according to ECMA-262 15.8.2.13:
   3805   // Math.pow(-Infinity, 0.5) == Infinity
   3806   // Math.sqrt(-Infinity) == NaN
   3807   Label done;
   3808   __ Move(temp, -V8_INFINITY);
   3809   __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
   3810   // Set up Infinity in the delay slot.
   3811   // result is overwritten if the branch is not taken.
   3812   __ neg_d(result, temp);
   3813 
   3814   // Add +0 to convert -0 to +0.
   3815   __ add_d(result, input, kDoubleRegZero);
   3816   __ sqrt_d(result, result);
   3817   __ bind(&done);
   3818 }
   3819 
   3820 
   3821 void LCodeGen::DoPower(LPower* instr) {
   3822   Representation exponent_type = instr->hydrogen()->right()->representation();
   3823   // Having marked this as a call, we can use any registers.
   3824   // Just make sure that the input/output registers are the expected ones.
   3825   ASSERT(!instr->right()->IsDoubleRegister() ||
   3826          ToDoubleRegister(instr->right()).is(f4));
   3827   ASSERT(!instr->right()->IsRegister() ||
   3828          ToRegister(instr->right()).is(a2));
   3829   ASSERT(ToDoubleRegister(instr->left()).is(f2));
   3830   ASSERT(ToDoubleRegister(instr->result()).is(f0));
   3831 
   3832   if (exponent_type.IsSmi()) {
   3833     MathPowStub stub(MathPowStub::TAGGED);
   3834     __ CallStub(&stub);
   3835   } else if (exponent_type.IsTagged()) {
   3836     Label no_deopt;
   3837     __ JumpIfSmi(a2, &no_deopt);
   3838     __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
   3839     DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
   3840     __ bind(&no_deopt);
   3841     MathPowStub stub(MathPowStub::TAGGED);
   3842     __ CallStub(&stub);
   3843   } else if (exponent_type.IsInteger32()) {
   3844     MathPowStub stub(MathPowStub::INTEGER);
   3845     __ CallStub(&stub);
   3846   } else {
   3847     ASSERT(exponent_type.IsDouble());
   3848     MathPowStub stub(MathPowStub::DOUBLE);
   3849     __ CallStub(&stub);
   3850   }
   3851 }
   3852 
   3853 
   3854 void LCodeGen::DoMathExp(LMathExp* instr) {
   3855   DoubleRegister input = ToDoubleRegister(instr->value());
   3856   DoubleRegister result = ToDoubleRegister(instr->result());
   3857   DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
   3858   DoubleRegister double_scratch2 = double_scratch0();
   3859   Register temp1 = ToRegister(instr->temp1());
   3860   Register temp2 = ToRegister(instr->temp2());
   3861 
   3862   MathExpGenerator::EmitMathExp(
   3863       masm(), input, result, double_scratch1, double_scratch2,
   3864       temp1, temp2, scratch0());
   3865 }
   3866 
   3867 
   3868 void LCodeGen::DoMathLog(LMathLog* instr) {
   3869   ASSERT(ToDoubleRegister(instr->result()).is(f4));
   3870   // Set the context register to a GC-safe fake value. Clobbering it is
   3871   // OK because this instruction is marked as a call.
   3872   __ mov(cp, zero_reg);
   3873   TranscendentalCacheStub stub(TranscendentalCache::LOG,
   3874                                TranscendentalCacheStub::UNTAGGED);
   3875   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3876 }
   3877 
   3878 
   3879 void LCodeGen::DoMathTan(LMathTan* instr) {
   3880   ASSERT(ToDoubleRegister(instr->result()).is(f4));
   3881   // Set the context register to a GC-safe fake value. Clobbering it is
   3882   // OK because this instruction is marked as a call.
   3883   __ mov(cp, zero_reg);
   3884   TranscendentalCacheStub stub(TranscendentalCache::TAN,
   3885                                TranscendentalCacheStub::UNTAGGED);
   3886   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3887 }
   3888 
   3889 
   3890 void LCodeGen::DoMathCos(LMathCos* instr) {
   3891   ASSERT(ToDoubleRegister(instr->result()).is(f4));
   3892   // Set the context register to a GC-safe fake value. Clobbering it is
   3893   // OK because this instruction is marked as a call.
   3894   __ mov(cp, zero_reg);
   3895   TranscendentalCacheStub stub(TranscendentalCache::COS,
   3896                                TranscendentalCacheStub::UNTAGGED);
   3897   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3898 }
   3899 
   3900 
   3901 void LCodeGen::DoMathSin(LMathSin* instr) {
   3902   ASSERT(ToDoubleRegister(instr->result()).is(f4));
   3903   // Set the context register to a GC-safe fake value. Clobbering it is
   3904   // OK because this instruction is marked as a call.
   3905   __ mov(cp, zero_reg);
   3906   TranscendentalCacheStub stub(TranscendentalCache::SIN,
   3907                                TranscendentalCacheStub::UNTAGGED);
   3908   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3909 }
   3910 
   3911 
   3912 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3913   ASSERT(ToRegister(instr->context()).is(cp));
   3914   ASSERT(ToRegister(instr->function()).is(a1));
   3915   ASSERT(instr->HasPointerMap());
   3916 
   3917   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   3918   if (known_function.is_null()) {
   3919     LPointerMap* pointers = instr->pointer_map();
   3920     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3921     ParameterCount count(instr->arity());
   3922     __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
   3923   } else {
   3924     CallKnownFunction(known_function,
   3925                       instr->hydrogen()->formal_parameter_count(),
   3926                       instr->arity(),
   3927                       instr,
   3928                       CALL_AS_METHOD,
   3929                       A1_CONTAINS_TARGET);
   3930   }
   3931 }
   3932 
   3933 
   3934 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   3935   ASSERT(ToRegister(instr->context()).is(cp));
   3936   ASSERT(ToRegister(instr->result()).is(v0));
   3937 
   3938   int arity = instr->arity();
   3939   Handle<Code> ic =
   3940       isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
   3941   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3942 }
   3943 
   3944 
   3945 void LCodeGen::DoCallNamed(LCallNamed* instr) {
   3946   ASSERT(ToRegister(instr->context()).is(cp));
   3947   ASSERT(ToRegister(instr->result()).is(v0));
   3948 
   3949   int arity = instr->arity();
   3950   RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
   3951   Handle<Code> ic =
   3952       isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   3953   __ li(a2, Operand(instr->name()));
   3954   CallCode(ic, mode, instr);
   3955 }
   3956 
   3957 
   3958 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   3959   ASSERT(ToRegister(instr->context()).is(cp));
   3960   ASSERT(ToRegister(instr->function()).is(a1));
   3961   ASSERT(ToRegister(instr->result()).is(v0));
   3962 
   3963   int arity = instr->arity();
   3964   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   3965   if (instr->hydrogen()->IsTailCall()) {
   3966     if (NeedsEagerFrame()) __ mov(sp, fp);
   3967     __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
   3968   } else {
   3969     CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3970   }
   3971 }
   3972 
   3973 
   3974 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
   3975   ASSERT(ToRegister(instr->context()).is(cp));
   3976   ASSERT(ToRegister(instr->result()).is(v0));
   3977 
   3978   int arity = instr->arity();
   3979   RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
   3980   Handle<Code> ic =
   3981       isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   3982   __ li(a2, Operand(instr->name()));
   3983   CallCode(ic, mode, instr);
   3984 }
   3985 
   3986 
   3987 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   3988   ASSERT(ToRegister(instr->result()).is(v0));
   3989   CallKnownFunction(instr->hydrogen()->target(),
   3990                     instr->hydrogen()->formal_parameter_count(),
   3991                     instr->arity(),
   3992                     instr,
   3993                     CALL_AS_FUNCTION,
   3994                     A1_UNINITIALIZED);
   3995 }
   3996 
   3997 
   3998 void LCodeGen::DoCallNew(LCallNew* instr) {
   3999   ASSERT(ToRegister(instr->context()).is(cp));
   4000   ASSERT(ToRegister(instr->constructor()).is(a1));
   4001   ASSERT(ToRegister(instr->result()).is(v0));
   4002 
   4003   __ li(a0, Operand(instr->arity()));
   4004   // No cell in a2 for construct type feedback in optimized code
   4005   Handle<Object> undefined_value(isolate()->factory()->undefined_value());
   4006   __ li(a2, Operand(undefined_value));
   4007   CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   4008   CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4009 }
   4010 
   4011 
   4012 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   4013   ASSERT(ToRegister(instr->context()).is(cp));
   4014   ASSERT(ToRegister(instr->constructor()).is(a1));
   4015   ASSERT(ToRegister(instr->result()).is(v0));
   4016 
   4017   __ li(a0, Operand(instr->arity()));
   4018   __ li(a2, Operand(instr->hydrogen()->property_cell()));
   4019   ElementsKind kind = instr->hydrogen()->elements_kind();
   4020   AllocationSiteOverrideMode override_mode =
   4021       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   4022           ? DISABLE_ALLOCATION_SITES
   4023           : DONT_OVERRIDE;
   4024   ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
   4025 
   4026   if (instr->arity() == 0) {
   4027     ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
   4028     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4029   } else if (instr->arity() == 1) {
   4030     Label done;
   4031     if (IsFastPackedElementsKind(kind)) {
   4032       Label packed_case;
   4033       // We might need a change here,
   4034       // look at the first argument.
   4035       __ lw(t1, MemOperand(sp, 0));
   4036       __ Branch(&packed_case, eq, t1, Operand(zero_reg));
   4037 
   4038       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   4039       ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
   4040                                               override_mode);
   4041       CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4042       __ jmp(&done);
   4043       __ bind(&packed_case);
   4044     }
   4045 
   4046     ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
   4047     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4048     __ bind(&done);
   4049   } else {
   4050     ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
   4051     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4052   }
   4053 }
   4054 
   4055 
   4056 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   4057   CallRuntime(instr->function(), instr->arity(), instr);
   4058 }
   4059 
   4060 
   4061 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   4062   Register function = ToRegister(instr->function());
   4063   Register code_object = ToRegister(instr->code_object());
   4064   __ Addu(code_object, code_object,
   4065           Operand(Code::kHeaderSize - kHeapObjectTag));
   4066   __ sw(code_object,
   4067         FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   4068 }
   4069 
   4070 
   4071 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   4072   Register result = ToRegister(instr->result());
   4073   Register base = ToRegister(instr->base_object());
   4074   if (instr->offset()->IsConstantOperand()) {
   4075     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   4076     __ Addu(result, base, Operand(ToInteger32(offset)));
   4077   } else {
   4078     Register offset = ToRegister(instr->offset());
   4079     __ Addu(result, base, offset);
   4080   }
   4081 }
   4082 
   4083 
   4084 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   4085   Representation representation = instr->representation();
   4086 
   4087   Register object = ToRegister(instr->object());
   4088   Register scratch = scratch0();
   4089   HObjectAccess access = instr->hydrogen()->access();
   4090   int offset = access.offset();
   4091 
   4092   if (access.IsExternalMemory()) {
   4093     Register value = ToRegister(instr->value());
   4094     MemOperand operand = MemOperand(object, offset);
   4095     __ Store(value, operand, representation);
   4096     return;
   4097   }
   4098 
   4099   Handle<Map> transition = instr->transition();
   4100 
   4101   if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
   4102     Register value = ToRegister(instr->value());
   4103     if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   4104       __ SmiTst(value, scratch);
   4105       DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
   4106     }
   4107   } else if (FLAG_track_double_fields && representation.IsDouble()) {
   4108     ASSERT(transition.is_null());
   4109     ASSERT(access.IsInobject());
   4110     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4111     DoubleRegister value = ToDoubleRegister(instr->value());
   4112     __ sdc1(value, FieldMemOperand(object, offset));
   4113     return;
   4114   }
   4115 
   4116   if (!transition.is_null()) {
   4117     __ li(scratch, Operand(transition));
   4118     __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   4119     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   4120       Register temp = ToRegister(instr->temp());
   4121       // Update the write barrier for the map field.
   4122       __ RecordWriteField(object,
   4123                           HeapObject::kMapOffset,
   4124                           scratch,
   4125                           temp,
   4126                           GetRAState(),
   4127                           kSaveFPRegs,
   4128                           OMIT_REMEMBERED_SET,
   4129                           OMIT_SMI_CHECK);
   4130     }
   4131   }
   4132 
   4133   // Do the store.
   4134   Register value = ToRegister(instr->value());
   4135   ASSERT(!object.is(value));
   4136   SmiCheck check_needed =
   4137       instr->hydrogen()->value()->IsHeapObject()
   4138           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4139   if (access.IsInobject()) {
   4140     MemOperand operand = FieldMemOperand(object, offset);
   4141     __ Store(value, operand, representation);
   4142     if (instr->hydrogen()->NeedsWriteBarrier()) {
   4143       // Update the write barrier for the object for in-object properties.
   4144       __ RecordWriteField(object,
   4145                           offset,
   4146                           value,
   4147                           scratch,
   4148                           GetRAState(),
   4149                           kSaveFPRegs,
   4150                           EMIT_REMEMBERED_SET,
   4151                           check_needed);
   4152     }
   4153   } else {
   4154     __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
   4155     MemOperand operand = FieldMemOperand(scratch, offset);
   4156     __ Store(value, operand, representation);
   4157     if (instr->hydrogen()->NeedsWriteBarrier()) {
   4158       // Update the write barrier for the properties array.
   4159       // object is used as a scratch register.
   4160       __ RecordWriteField(scratch,
   4161                           offset,
   4162                           value,
   4163                           object,
   4164                           GetRAState(),
   4165                           kSaveFPRegs,
   4166                           EMIT_REMEMBERED_SET,
   4167                           check_needed);
   4168     }
   4169   }
   4170 }
   4171 
   4172 
   4173 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   4174   ASSERT(ToRegister(instr->context()).is(cp));
   4175   ASSERT(ToRegister(instr->object()).is(a1));
   4176   ASSERT(ToRegister(instr->value()).is(a0));
   4177 
   4178   // Name is always in a2.
   4179   __ li(a2, Operand(instr->name()));
   4180   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   4181       ? isolate()->builtins()->StoreIC_Initialize_Strict()
   4182       : isolate()->builtins()->StoreIC_Initialize();
   4183   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4184 }
   4185 
   4186 
   4187 void LCodeGen::ApplyCheckIf(Condition condition,
   4188                             LBoundsCheck* check,
   4189                             Register src1,
   4190                             const Operand& src2) {
   4191   if (FLAG_debug_code && check->hydrogen()->skip_check()) {
   4192     Label done;
   4193     __ Branch(&done, NegateCondition(condition), src1, src2);
   4194     __ stop("eliminated bounds check failed");
   4195     __ bind(&done);
   4196   } else {
   4197     DeoptimizeIf(condition, check->environment(), src1, src2);
   4198   }
   4199 }
   4200 
   4201 
   4202 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   4203   if (instr->hydrogen()->skip_check()) return;
   4204 
   4205   Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
   4206   if (instr->index()->IsConstantOperand()) {
   4207     int constant_index =
   4208         ToInteger32(LConstantOperand::cast(instr->index()));
   4209     if (instr->hydrogen()->length()->representation().IsSmi()) {
   4210       __ li(at, Operand(Smi::FromInt(constant_index)));
   4211     } else {
   4212       __ li(at, Operand(constant_index));
   4213     }
   4214     ApplyCheckIf(condition,
   4215                  instr,
   4216                  at,
   4217                  Operand(ToRegister(instr->length())));
   4218   } else {
   4219     ApplyCheckIf(condition,
   4220                  instr,
   4221                  ToRegister(instr->index()),
   4222                  Operand(ToRegister(instr->length())));
   4223   }
   4224 }
   4225 
   4226 
   4227 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   4228   Register external_pointer = ToRegister(instr->elements());
   4229   Register key = no_reg;
   4230   ElementsKind elements_kind = instr->elements_kind();
   4231   bool key_is_constant = instr->key()->IsConstantOperand();
   4232   int constant_key = 0;
   4233   if (key_is_constant) {
   4234     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4235     if (constant_key & 0xF0000000) {
   4236       Abort(kArrayIndexConstantValueTooBig);
   4237     }
   4238   } else {
   4239     key = ToRegister(instr->key());
   4240   }
   4241   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   4242   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4243       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4244   int additional_offset = instr->additional_index() << element_size_shift;
   4245 
   4246   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
   4247       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
   4248     Register address = scratch0();
   4249     FPURegister value(ToDoubleRegister(instr->value()));
   4250     if (key_is_constant) {
   4251       if (constant_key != 0) {
   4252         __ Addu(address, external_pointer,
   4253                 Operand(constant_key << element_size_shift));
   4254       } else {
   4255         address = external_pointer;
   4256       }
   4257     } else {
   4258       __ sll(address, key, shift_size);
   4259       __ Addu(address, external_pointer, address);
   4260     }
   4261 
   4262     if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
   4263       __ cvt_s_d(double_scratch0(), value);
   4264       __ swc1(double_scratch0(), MemOperand(address, additional_offset));
   4265     } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   4266       __ sdc1(value, MemOperand(address, additional_offset));
   4267     }
   4268   } else {
   4269     Register value(ToRegister(instr->value()));
   4270     MemOperand mem_operand = PrepareKeyedOperand(
   4271         key, external_pointer, key_is_constant, constant_key,
   4272         element_size_shift, shift_size,
   4273         instr->additional_index(), additional_offset);
   4274     switch (elements_kind) {
   4275       case EXTERNAL_PIXEL_ELEMENTS:
   4276       case EXTERNAL_BYTE_ELEMENTS:
   4277       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
   4278         __ sb(value, mem_operand);
   4279         break;
   4280       case EXTERNAL_SHORT_ELEMENTS:
   4281       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
   4282         __ sh(value, mem_operand);
   4283         break;
   4284       case EXTERNAL_INT_ELEMENTS:
   4285       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
   4286         __ sw(value, mem_operand);
   4287         break;
   4288       case EXTERNAL_FLOAT_ELEMENTS:
   4289       case EXTERNAL_DOUBLE_ELEMENTS:
   4290       case FAST_DOUBLE_ELEMENTS:
   4291       case FAST_ELEMENTS:
   4292       case FAST_SMI_ELEMENTS:
   4293       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4294       case FAST_HOLEY_ELEMENTS:
   4295       case FAST_HOLEY_SMI_ELEMENTS:
   4296       case DICTIONARY_ELEMENTS:
   4297       case NON_STRICT_ARGUMENTS_ELEMENTS:
   4298         UNREACHABLE();
   4299         break;
   4300     }
   4301   }
   4302 }
   4303 
   4304 
   4305 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4306   DoubleRegister value = ToDoubleRegister(instr->value());
   4307   Register elements = ToRegister(instr->elements());
   4308   Register scratch = scratch0();
   4309   DoubleRegister double_scratch = double_scratch0();
   4310   bool key_is_constant = instr->key()->IsConstantOperand();
   4311   Label not_nan, done;
   4312 
   4313   // Calculate the effective address of the slot in the array to store the
   4314   // double value.
   4315   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4316   if (key_is_constant) {
   4317     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4318     if (constant_key & 0xF0000000) {
   4319       Abort(kArrayIndexConstantValueTooBig);
   4320     }
   4321     __ Addu(scratch, elements,
   4322             Operand((constant_key << element_size_shift) +
   4323                     FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   4324   } else {
   4325     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4326         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4327     __ Addu(scratch, elements,
   4328             Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   4329     __ sll(at, ToRegister(instr->key()), shift_size);
   4330     __ Addu(scratch, scratch, at);
   4331   }
   4332 
   4333   if (instr->NeedsCanonicalization()) {
   4334     Label is_nan;
   4335     // Check for NaN. All NaNs must be canonicalized.
   4336     __ BranchF(NULL, &is_nan, eq, value, value);
   4337     __ Branch(&not_nan);
   4338 
   4339     // Only load canonical NaN if the comparison above set the overflow.
   4340     __ bind(&is_nan);
   4341     __ Move(double_scratch,
   4342             FixedDoubleArray::canonical_not_the_hole_nan_as_double());
   4343     __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
   4344         element_size_shift));
   4345     __ Branch(&done);
   4346   }
   4347 
   4348   __ bind(&not_nan);
   4349   __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
   4350       element_size_shift));
   4351   __ bind(&done);
   4352 }
   4353 
   4354 
   4355 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4356   Register value = ToRegister(instr->value());
   4357   Register elements = ToRegister(instr->elements());
   4358   Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
   4359       : no_reg;
   4360   Register scratch = scratch0();
   4361   Register store_base = scratch;
   4362   int offset = 0;
   4363 
   4364   // Do the store.
   4365   if (instr->key()->IsConstantOperand()) {
   4366     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4367     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4368     offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
   4369                                            instr->additional_index());
   4370     store_base = elements;
   4371   } else {
   4372     // Even though the HLoadKeyed instruction forces the input
   4373     // representation for the key to be an integer, the input gets replaced
   4374     // during bound check elimination with the index argument to the bounds
   4375     // check, which can be tagged, so that case must be handled here, too.
   4376     if (instr->hydrogen()->key()->representation().IsSmi()) {
   4377       __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
   4378       __ addu(scratch, elements, scratch);
   4379     } else {
   4380       __ sll(scratch, key, kPointerSizeLog2);
   4381       __ addu(scratch, elements, scratch);
   4382     }
   4383     offset = FixedArray::OffsetOfElementAt(instr->additional_index());
   4384   }
   4385   __ sw(value, FieldMemOperand(store_base, offset));
   4386 
   4387   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4388     SmiCheck check_needed =
   4389         instr->hydrogen()->value()->IsHeapObject()
   4390             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4391     // Compute address of modified element and store it into key register.
   4392     __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
   4393     __ RecordWrite(elements,
   4394                    key,
   4395                    value,
   4396                    GetRAState(),
   4397                    kSaveFPRegs,
   4398                    EMIT_REMEMBERED_SET,
   4399                    check_needed);
   4400   }
   4401 }
   4402 
   4403 
   4404 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4405   // By cases: external, fast double
   4406   if (instr->is_external()) {
   4407     DoStoreKeyedExternalArray(instr);
   4408   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4409     DoStoreKeyedFixedDoubleArray(instr);
   4410   } else {
   4411     DoStoreKeyedFixedArray(instr);
   4412   }
   4413 }
   4414 
   4415 
   4416 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4417   ASSERT(ToRegister(instr->context()).is(cp));
   4418   ASSERT(ToRegister(instr->object()).is(a2));
   4419   ASSERT(ToRegister(instr->key()).is(a1));
   4420   ASSERT(ToRegister(instr->value()).is(a0));
   4421 
   4422   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   4423       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
   4424       : isolate()->builtins()->KeyedStoreIC_Initialize();
   4425   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4426 }
   4427 
   4428 
   4429 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4430   Register object_reg = ToRegister(instr->object());
   4431   Register scratch = scratch0();
   4432 
   4433   Handle<Map> from_map = instr->original_map();
   4434   Handle<Map> to_map = instr->transitioned_map();
   4435   ElementsKind from_kind = instr->from_kind();
   4436   ElementsKind to_kind = instr->to_kind();
   4437 
   4438   Label not_applicable;
   4439   __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4440   __ Branch(&not_applicable, ne, scratch, Operand(from_map));
   4441 
   4442   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4443     Register new_map_reg = ToRegister(instr->new_map_temp());
   4444     __ li(new_map_reg, Operand(to_map));
   4445     __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4446     // Write barrier.
   4447     __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
   4448                         scratch, GetRAState(), kDontSaveFPRegs);
   4449   } else {
   4450     ASSERT(ToRegister(instr->context()).is(cp));
   4451     PushSafepointRegistersScope scope(
   4452         this, Safepoint::kWithRegistersAndDoubles);
   4453     __ mov(a0, object_reg);
   4454     __ li(a1, Operand(to_map));
   4455     TransitionElementsKindStub stub(from_kind, to_kind);
   4456     __ CallStub(&stub);
   4457     RecordSafepointWithRegistersAndDoubles(
   4458         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4459   }
   4460   __ bind(&not_applicable);
   4461 }
   4462 
   4463 
   4464 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4465   Register object = ToRegister(instr->object());
   4466   Register temp = ToRegister(instr->temp());
   4467   Label no_memento_found;
   4468   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
   4469                                      ne, &no_memento_found);
   4470   DeoptimizeIf(al, instr->environment());
   4471   __ bind(&no_memento_found);
   4472 }
   4473 
   4474 
   4475 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4476   ASSERT(ToRegister(instr->context()).is(cp));
   4477   if (FLAG_new_string_add) {
   4478     ASSERT(ToRegister(instr->left()).is(a1));
   4479     ASSERT(ToRegister(instr->right()).is(a0));
   4480     NewStringAddStub stub(instr->hydrogen()->flags(),
   4481                           isolate()->heap()->GetPretenureMode());
   4482     CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   4483   } else {
   4484     __ push(ToRegister(instr->left()));
   4485     __ push(ToRegister(instr->right()));
   4486     StringAddStub stub(instr->hydrogen()->flags());
   4487     CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   4488   }
   4489 }
   4490 
   4491 
   4492 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4493   class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
   4494    public:
   4495     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4496         : LDeferredCode(codegen), instr_(instr) { }
   4497     virtual void Generate() V8_OVERRIDE {
   4498       codegen()->DoDeferredStringCharCodeAt(instr_);
   4499     }
   4500     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4501    private:
   4502     LStringCharCodeAt* instr_;
   4503   };
   4504 
   4505   DeferredStringCharCodeAt* deferred =
   4506       new(zone()) DeferredStringCharCodeAt(this, instr);
   4507   StringCharLoadGenerator::Generate(masm(),
   4508                                     ToRegister(instr->string()),
   4509                                     ToRegister(instr->index()),
   4510                                     ToRegister(instr->result()),
   4511                                     deferred->entry());
   4512   __ bind(deferred->exit());
   4513 }
   4514 
   4515 
   4516 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4517   Register string = ToRegister(instr->string());
   4518   Register result = ToRegister(instr->result());
   4519   Register scratch = scratch0();
   4520 
   4521   // TODO(3095996): Get rid of this. For now, we need to make the
   4522   // result register contain a valid pointer because it is already
   4523   // contained in the register pointer map.
   4524   __ mov(result, zero_reg);
   4525 
   4526   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4527   __ push(string);
   4528   // Push the index as a smi. This is safe because of the checks in
   4529   // DoStringCharCodeAt above.
   4530   if (instr->index()->IsConstantOperand()) {
   4531     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4532     __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
   4533     __ push(scratch);
   4534   } else {
   4535     Register index = ToRegister(instr->index());
   4536     __ SmiTag(index);
   4537     __ push(index);
   4538   }
   4539   CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
   4540                           instr->context());
   4541   __ AssertSmi(v0);
   4542   __ SmiUntag(v0);
   4543   __ StoreToSafepointRegisterSlot(v0, result);
   4544 }
   4545 
   4546 
   4547 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4548   class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
   4549    public:
   4550     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4551         : LDeferredCode(codegen), instr_(instr) { }
   4552     virtual void Generate() V8_OVERRIDE {
   4553       codegen()->DoDeferredStringCharFromCode(instr_);
   4554     }
   4555     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4556    private:
   4557     LStringCharFromCode* instr_;
   4558   };
   4559 
   4560   DeferredStringCharFromCode* deferred =
   4561       new(zone()) DeferredStringCharFromCode(this, instr);
   4562 
   4563   ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
   4564   Register char_code = ToRegister(instr->char_code());
   4565   Register result = ToRegister(instr->result());
   4566   Register scratch = scratch0();
   4567   ASSERT(!char_code.is(result));
   4568 
   4569   __ Branch(deferred->entry(), hi,
   4570             char_code, Operand(String::kMaxOneByteCharCode));
   4571   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4572   __ sll(scratch, char_code, kPointerSizeLog2);
   4573   __ Addu(result, result, scratch);
   4574   __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4575   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   4576   __ Branch(deferred->entry(), eq, result, Operand(scratch));
   4577   __ bind(deferred->exit());
   4578 }
   4579 
   4580 
   4581 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4582   Register char_code = ToRegister(instr->char_code());
   4583   Register result = ToRegister(instr->result());
   4584 
   4585   // TODO(3095996): Get rid of this. For now, we need to make the
   4586   // result register contain a valid pointer because it is already
   4587   // contained in the register pointer map.
   4588   __ mov(result, zero_reg);
   4589 
   4590   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4591   __ SmiTag(char_code);
   4592   __ push(char_code);
   4593   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
   4594   __ StoreToSafepointRegisterSlot(v0, result);
   4595 }
   4596 
   4597 
   4598 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4599   LOperand* input = instr->value();
   4600   ASSERT(input->IsRegister() || input->IsStackSlot());
   4601   LOperand* output = instr->result();
   4602   ASSERT(output->IsDoubleRegister());
   4603   FPURegister single_scratch = double_scratch0().low();
   4604   if (input->IsStackSlot()) {
   4605     Register scratch = scratch0();
   4606     __ lw(scratch, ToMemOperand(input));
   4607     __ mtc1(scratch, single_scratch);
   4608   } else {
   4609     __ mtc1(ToRegister(input), single_scratch);
   4610   }
   4611   __ cvt_d_w(ToDoubleRegister(output), single_scratch);
   4612 }
   4613 
   4614 
   4615 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
   4616   LOperand* input = instr->value();
   4617   LOperand* output = instr->result();
   4618   Register scratch = scratch0();
   4619 
   4620   ASSERT(output->IsRegister());
   4621   if (!instr->hydrogen()->value()->HasRange() ||
   4622       !instr->hydrogen()->value()->range()->IsInSmiRange()) {
   4623     __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
   4624     DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
   4625   } else {
   4626     __ SmiTag(ToRegister(output), ToRegister(input));
   4627   }
   4628 }
   4629 
   4630 
   4631 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4632   LOperand* input = instr->value();
   4633   LOperand* output = instr->result();
   4634 
   4635   FPURegister dbl_scratch = double_scratch0();
   4636   __ mtc1(ToRegister(input), dbl_scratch);
   4637   __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
   4638 }
   4639 
   4640 
   4641 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
   4642   LOperand* input = instr->value();
   4643   LOperand* output = instr->result();
   4644   if (!instr->hydrogen()->value()->HasRange() ||
   4645       !instr->hydrogen()->value()->range()->IsInSmiRange()) {
   4646     Register scratch = scratch0();
   4647     __ And(scratch, ToRegister(input), Operand(0xc0000000));
   4648     DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
   4649   }
   4650   __ SmiTag(ToRegister(output), ToRegister(input));
   4651 }
   4652 
   4653 
   4654 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4655   class DeferredNumberTagI V8_FINAL : public LDeferredCode {
   4656    public:
   4657     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4658         : LDeferredCode(codegen), instr_(instr) { }
   4659     virtual void Generate() V8_OVERRIDE {
   4660       codegen()->DoDeferredNumberTagI(instr_,
   4661                                       instr_->value(),
   4662                                       SIGNED_INT32);
   4663     }
   4664     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4665    private:
   4666     LNumberTagI* instr_;
   4667   };
   4668 
   4669   Register src = ToRegister(instr->value());
   4670   Register dst = ToRegister(instr->result());
   4671   Register overflow = scratch0();
   4672 
   4673   DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
   4674   __ SmiTagCheckOverflow(dst, src, overflow);
   4675   __ BranchOnOverflow(deferred->entry(), overflow);
   4676   __ bind(deferred->exit());
   4677 }
   4678 
   4679 
   4680 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4681   class DeferredNumberTagU V8_FINAL : public LDeferredCode {
   4682    public:
   4683     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4684         : LDeferredCode(codegen), instr_(instr) { }
   4685     virtual void Generate() V8_OVERRIDE {
   4686       codegen()->DoDeferredNumberTagI(instr_,
   4687                                       instr_->value(),
   4688                                       UNSIGNED_INT32);
   4689     }
   4690     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4691    private:
   4692     LNumberTagU* instr_;
   4693   };
   4694 
   4695   Register input = ToRegister(instr->value());
   4696   Register result = ToRegister(instr->result());
   4697 
   4698   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4699   __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
   4700   __ SmiTag(result, input);
   4701   __ bind(deferred->exit());
   4702 }
   4703 
   4704 
   4705 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
   4706                                     LOperand* value,
   4707                                     IntegerSignedness signedness) {
   4708   Label slow;
   4709   Register src = ToRegister(value);
   4710   Register dst = ToRegister(instr->result());
   4711   DoubleRegister dbl_scratch = double_scratch0();
   4712 
   4713   // Preserve the value of all registers.
   4714   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4715 
   4716   Label done;
   4717   if (signedness == SIGNED_INT32) {
   4718     // There was overflow, so bits 30 and 31 of the original integer
   4719     // disagree. Try to allocate a heap number in new space and store
   4720     // the value in there. If that fails, call the runtime system.
   4721     if (dst.is(src)) {
   4722       __ SmiUntag(src, dst);
   4723       __ Xor(src, src, Operand(0x80000000));
   4724     }
   4725     __ mtc1(src, dbl_scratch);
   4726     __ cvt_d_w(dbl_scratch, dbl_scratch);
   4727   } else {
   4728     __ mtc1(src, dbl_scratch);
   4729     __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
   4730   }
   4731 
   4732   if (FLAG_inline_new) {
   4733     __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
   4734     __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
   4735     __ Move(dst, t1);
   4736     __ Branch(&done);
   4737   }
   4738 
   4739   // Slow case: Call the runtime system to do the number allocation.
   4740   __ bind(&slow);
   4741 
   4742   // TODO(3095996): Put a valid pointer value in the stack slot where the result
   4743   // register is stored, as this register is in the pointer map, but contains an
   4744   // integer value.
   4745   __ StoreToSafepointRegisterSlot(zero_reg, dst);
   4746   // NumberTagI and NumberTagD use the context from the frame, rather than
   4747   // the environment's HContext or HInlinedContext value.
   4748   // They only call Runtime::kAllocateHeapNumber.
   4749   // The corresponding HChange instructions are added in a phase that does
   4750   // not have easy access to the local context.
   4751   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4752   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4753   RecordSafepointWithRegisters(
   4754       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4755   __ Move(dst, v0);
   4756   __ Subu(dst, dst, kHeapObjectTag);
   4757 
   4758   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4759   // number.
   4760   __ bind(&done);
   4761   __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
   4762   __ Addu(dst, dst, kHeapObjectTag);
   4763   __ StoreToSafepointRegisterSlot(dst, dst);
   4764 }
   4765 
   4766 
   4767 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4768   class DeferredNumberTagD V8_FINAL : public LDeferredCode {
   4769    public:
   4770     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4771         : LDeferredCode(codegen), instr_(instr) { }
   4772     virtual void Generate() V8_OVERRIDE {
   4773       codegen()->DoDeferredNumberTagD(instr_);
   4774     }
   4775     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4776    private:
   4777     LNumberTagD* instr_;
   4778   };
   4779 
   4780   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   4781   Register scratch = scratch0();
   4782   Register reg = ToRegister(instr->result());
   4783   Register temp1 = ToRegister(instr->temp());
   4784   Register temp2 = ToRegister(instr->temp2());
   4785 
   4786   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4787   if (FLAG_inline_new) {
   4788     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4789     // We want the untagged address first for performance
   4790     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
   4791                           DONT_TAG_RESULT);
   4792   } else {
   4793     __ Branch(deferred->entry());
   4794   }
   4795   __ bind(deferred->exit());
   4796   __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
   4797   // Now that we have finished with the object's real address tag it
   4798   __ Addu(reg, reg, kHeapObjectTag);
   4799 }
   4800 
   4801 
   4802 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4803   // TODO(3095996): Get rid of this. For now, we need to make the
   4804   // result register contain a valid pointer because it is already
   4805   // contained in the register pointer map.
   4806   Register reg = ToRegister(instr->result());
   4807   __ mov(reg, zero_reg);
   4808 
   4809   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4810   // NumberTagI and NumberTagD use the context from the frame, rather than
   4811   // the environment's HContext or HInlinedContext value.
   4812   // They only call Runtime::kAllocateHeapNumber.
   4813   // The corresponding HChange instructions are added in a phase that does
   4814   // not have easy access to the local context.
   4815   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4816   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4817   RecordSafepointWithRegisters(
   4818       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4819   __ Subu(v0, v0, kHeapObjectTag);
   4820   __ StoreToSafepointRegisterSlot(v0, reg);
   4821 }
   4822 
   4823 
   4824 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4825   ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
   4826   __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
   4827 }
   4828 
   4829 
   4830 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4831   Register scratch = scratch0();
   4832   Register input = ToRegister(instr->value());
   4833   Register result = ToRegister(instr->result());
   4834   if (instr->needs_check()) {
   4835     STATIC_ASSERT(kHeapObjectTag == 1);
   4836     // If the input is a HeapObject, value of scratch won't be zero.
   4837     __ And(scratch, input, Operand(kHeapObjectTag));
   4838     __ SmiUntag(result, input);
   4839     DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
   4840   } else {
   4841     __ SmiUntag(result, input);
   4842   }
   4843 }
   4844 
   4845 
   4846 void LCodeGen::EmitNumberUntagD(Register input_reg,
   4847                                 DoubleRegister result_reg,
   4848                                 bool can_convert_undefined_to_nan,
   4849                                 bool deoptimize_on_minus_zero,
   4850                                 LEnvironment* env,
   4851                                 NumberUntagDMode mode) {
   4852   Register scratch = scratch0();
   4853   Label convert, load_smi, done;
   4854   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4855     // Smi check.
   4856     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4857     // Heap number map check.
   4858     __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4859     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   4860     if (can_convert_undefined_to_nan) {
   4861       __ Branch(&convert, ne, scratch, Operand(at));
   4862     } else {
   4863       DeoptimizeIf(ne, env, scratch, Operand(at));
   4864     }
   4865     // Load heap number.
   4866     __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4867     if (deoptimize_on_minus_zero) {
   4868       __ mfc1(at, result_reg.low());
   4869       __ Branch(&done, ne, at, Operand(zero_reg));
   4870       __ mfc1(scratch, result_reg.high());
   4871       DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
   4872     }
   4873     __ Branch(&done);
   4874     if (can_convert_undefined_to_nan) {
   4875       __ bind(&convert);
   4876       // Convert undefined (and hole) to NaN.
   4877       __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   4878       DeoptimizeIf(ne, env, input_reg, Operand(at));
   4879       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4880       __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4881       __ Branch(&done);
   4882     }
   4883   } else {
   4884     __ SmiUntag(scratch, input_reg);
   4885     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   4886   }
   4887   // Smi to double register conversion
   4888   __ bind(&load_smi);
   4889   // scratch: untagged value of input_reg
   4890   __ mtc1(scratch, result_reg);
   4891   __ cvt_d_w(result_reg, result_reg);
   4892   __ bind(&done);
   4893 }
   4894 
   4895 
   4896 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4897   Register input_reg = ToRegister(instr->value());
   4898   Register scratch1 = scratch0();
   4899   Register scratch2 = ToRegister(instr->temp());
   4900   DoubleRegister double_scratch = double_scratch0();
   4901   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4902 
   4903   ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4904   ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4905 
   4906   Label done;
   4907 
   4908   // The input is a tagged HeapObject.
   4909   // Heap number map check.
   4910   __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4911   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   4912   // This 'at' value and scratch1 map value are used for tests in both clauses
   4913   // of the if.
   4914 
   4915   if (instr->truncating()) {
   4916     // Performs a truncating conversion of a floating point number as used by
   4917     // the JS bitwise operations.
   4918     Label no_heap_number, check_bools, check_false;
   4919     __ Branch(&no_heap_number, ne, scratch1, Operand(at));  // HeapNumber map?
   4920     __ mov(scratch2, input_reg);
   4921     __ TruncateHeapNumberToI(input_reg, scratch2);
   4922     __ Branch(&done);
   4923 
   4924     // Check for Oddballs. Undefined/False is converted to zero and True to one
   4925     // for truncating conversions.
   4926     __ bind(&no_heap_number);
   4927     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   4928     __ Branch(&check_bools, ne, input_reg, Operand(at));
   4929     ASSERT(ToRegister(instr->result()).is(input_reg));
   4930     __ Branch(USE_DELAY_SLOT, &done);
   4931     __ mov(input_reg, zero_reg);  // In delay slot.
   4932 
   4933     __ bind(&check_bools);
   4934     __ LoadRoot(at, Heap::kTrueValueRootIndex);
   4935     __ Branch(&check_false, ne, scratch2, Operand(at));
   4936     __ Branch(USE_DELAY_SLOT, &done);
   4937     __ li(input_reg, Operand(1));  // In delay slot.
   4938 
   4939     __ bind(&check_false);
   4940     __ LoadRoot(at, Heap::kFalseValueRootIndex);
   4941     DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
   4942     __ Branch(USE_DELAY_SLOT, &done);
   4943     __ mov(input_reg, zero_reg);  // In delay slot.
   4944   } else {
   4945     // Deoptimize if we don't have a heap number.
   4946     DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
   4947 
   4948     // Load the double value.
   4949     __ ldc1(double_scratch,
   4950             FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4951 
   4952     Register except_flag = scratch2;
   4953     __ EmitFPUTruncate(kRoundToZero,
   4954                        input_reg,
   4955                        double_scratch,
   4956                        scratch1,
   4957                        double_scratch2,
   4958                        except_flag,
   4959                        kCheckForInexactConversion);
   4960 
   4961     // Deopt if the operation did not succeed.
   4962     DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   4963 
   4964     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4965       __ Branch(&done, ne, input_reg, Operand(zero_reg));
   4966 
   4967       __ mfc1(scratch1, double_scratch.high());
   4968       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   4969       DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
   4970     }
   4971   }
   4972   __ bind(&done);
   4973 }
   4974 
   4975 
   4976 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4977   class DeferredTaggedToI V8_FINAL : public LDeferredCode {
   4978    public:
   4979     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4980         : LDeferredCode(codegen), instr_(instr) { }
   4981     virtual void Generate() V8_OVERRIDE {
   4982       codegen()->DoDeferredTaggedToI(instr_);
   4983     }
   4984     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4985    private:
   4986     LTaggedToI* instr_;
   4987   };
   4988 
   4989   LOperand* input = instr->value();
   4990   ASSERT(input->IsRegister());
   4991   ASSERT(input->Equals(instr->result()));
   4992 
   4993   Register input_reg = ToRegister(input);
   4994 
   4995   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4996     __ SmiUntag(input_reg);
   4997   } else {
   4998     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   4999 
   5000     // Let the deferred code handle the HeapObject case.
   5001     __ JumpIfNotSmi(input_reg, deferred->entry());
   5002 
   5003     // Smi to int32 conversion.
   5004     __ SmiUntag(input_reg);
   5005     __ bind(deferred->exit());
   5006   }
   5007 }
   5008 
   5009 
   5010 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   5011   LOperand* input = instr->value();
   5012   ASSERT(input->IsRegister());
   5013   LOperand* result = instr->result();
   5014   ASSERT(result->IsDoubleRegister());
   5015 
   5016   Register input_reg = ToRegister(input);
   5017   DoubleRegister result_reg = ToDoubleRegister(result);
   5018 
   5019   HValue* value = instr->hydrogen()->value();
   5020   NumberUntagDMode mode = value->representation().IsSmi()
   5021       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   5022 
   5023   EmitNumberUntagD(input_reg, result_reg,
   5024                    instr->hydrogen()->can_convert_undefined_to_nan(),
   5025                    instr->hydrogen()->deoptimize_on_minus_zero(),
   5026                    instr->environment(),
   5027                    mode);
   5028 }
   5029 
   5030 
   5031 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   5032   Register result_reg = ToRegister(instr->result());
   5033   Register scratch1 = scratch0();
   5034   DoubleRegister double_input = ToDoubleRegister(instr->value());
   5035 
   5036   if (instr->truncating()) {
   5037     __ TruncateDoubleToI(result_reg, double_input);
   5038   } else {
   5039     Register except_flag = LCodeGen::scratch1();
   5040 
   5041     __ EmitFPUTruncate(kRoundToMinusInf,
   5042                        result_reg,
   5043                        double_input,
   5044                        scratch1,
   5045                        double_scratch0(),
   5046                        except_flag,
   5047                        kCheckForInexactConversion);
   5048 
   5049     // Deopt if the operation did not succeed (except_flag != 0).
   5050     DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   5051 
   5052     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5053       Label done;
   5054       __ Branch(&done, ne, result_reg, Operand(zero_reg));
   5055       __ mfc1(scratch1, double_input.high());
   5056       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   5057       DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
   5058       __ bind(&done);
   5059     }
   5060   }
   5061 }
   5062 
   5063 
   5064 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   5065   Register result_reg = ToRegister(instr->result());
   5066   Register scratch1 = LCodeGen::scratch0();
   5067   DoubleRegister double_input = ToDoubleRegister(instr->value());
   5068 
   5069   if (instr->truncating()) {
   5070     __ TruncateDoubleToI(result_reg, double_input);
   5071   } else {
   5072     Register except_flag = LCodeGen::scratch1();
   5073 
   5074     __ EmitFPUTruncate(kRoundToMinusInf,
   5075                        result_reg,
   5076                        double_input,
   5077                        scratch1,
   5078                        double_scratch0(),
   5079                        except_flag,
   5080                        kCheckForInexactConversion);
   5081 
   5082     // Deopt if the operation did not succeed (except_flag != 0).
   5083     DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
   5084 
   5085     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5086       Label done;
   5087       __ Branch(&done, ne, result_reg, Operand(zero_reg));
   5088       __ mfc1(scratch1, double_input.high());
   5089       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
   5090       DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
   5091       __ bind(&done);
   5092     }
   5093   }
   5094   __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
   5095   DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
   5096 }
   5097 
   5098 
   5099 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   5100   LOperand* input = instr->value();
   5101   __ SmiTst(ToRegister(input), at);
   5102   DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
   5103 }
   5104 
   5105 
   5106 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   5107   if (!instr->hydrogen()->value()->IsHeapObject()) {
   5108     LOperand* input = instr->value();
   5109     __ SmiTst(ToRegister(input), at);
   5110     DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   5111   }
   5112 }
   5113 
   5114 
   5115 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   5116   Register input = ToRegister(instr->value());
   5117   Register scratch = scratch0();
   5118 
   5119   __ GetObjectType(input, scratch, scratch);
   5120 
   5121   if (instr->hydrogen()->is_interval_check()) {
   5122     InstanceType first;
   5123     InstanceType last;
   5124     instr->hydrogen()->GetCheckInterval(&first, &last);
   5125 
   5126     // If there is only one type in the interval check for equality.
   5127     if (first == last) {
   5128       DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
   5129     } else {
   5130       DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
   5131       // Omit check for the last type.
   5132       if (last != LAST_TYPE) {
   5133         DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
   5134       }
   5135     }
   5136   } else {
   5137     uint8_t mask;
   5138     uint8_t tag;
   5139     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   5140 
   5141     if (IsPowerOf2(mask)) {
   5142       ASSERT(tag == 0 || IsPowerOf2(tag));
   5143       __ And(at, scratch, mask);
   5144       DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
   5145           at, Operand(zero_reg));
   5146     } else {
   5147       __ And(scratch, scratch, Operand(mask));
   5148       DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
   5149     }
   5150   }
   5151 }
   5152 
   5153 
   5154 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   5155   Register reg = ToRegister(instr->value());
   5156   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   5157   AllowDeferredHandleDereference smi_check;
   5158   if (isolate()->heap()->InNewSpace(*object)) {
   5159     Register reg = ToRegister(instr->value());
   5160     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   5161     __ li(at, Operand(Handle<Object>(cell)));
   5162     __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
   5163     DeoptimizeIf(ne, instr->environment(), reg,
   5164                  Operand(at));
   5165   } else {
   5166     DeoptimizeIf(ne, instr->environment(), reg,
   5167                  Operand(object));
   5168   }
   5169 }
   5170 
   5171 
   5172 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   5173   {
   5174     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5175     __ push(object);
   5176     __ mov(cp, zero_reg);
   5177     __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
   5178     RecordSafepointWithRegisters(
   5179         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   5180     __ StoreToSafepointRegisterSlot(v0, scratch0());
   5181   }
   5182   __ SmiTst(scratch0(), at);
   5183   DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   5184 }
   5185 
   5186 
   5187 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   5188   class DeferredCheckMaps V8_FINAL : public LDeferredCode {
   5189    public:
   5190     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   5191         : LDeferredCode(codegen), instr_(instr), object_(object) {
   5192       SetExit(check_maps());
   5193     }
   5194     virtual void Generate() V8_OVERRIDE {
   5195       codegen()->DoDeferredInstanceMigration(instr_, object_);
   5196     }
   5197     Label* check_maps() { return &check_maps_; }
   5198     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5199    private:
   5200     LCheckMaps* instr_;
   5201     Label check_maps_;
   5202     Register object_;
   5203   };
   5204 
   5205   if (instr->hydrogen()->CanOmitMapChecks()) return;
   5206   Register map_reg = scratch0();
   5207   LOperand* input = instr->value();
   5208   ASSERT(input->IsRegister());
   5209   Register reg = ToRegister(input);
   5210   __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
   5211 
   5212   DeferredCheckMaps* deferred = NULL;
   5213   if (instr->hydrogen()->has_migration_target()) {
   5214     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   5215     __ bind(deferred->check_maps());
   5216   }
   5217 
   5218   UniqueSet<Map> map_set = instr->hydrogen()->map_set();
   5219   Label success;
   5220   for (int i = 0; i < map_set.size() - 1; i++) {
   5221     Handle<Map> map = map_set.at(i).handle();
   5222     __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
   5223   }
   5224   Handle<Map> map = map_set.at(map_set.size() - 1).handle();
   5225   // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
   5226   if (instr->hydrogen()->has_migration_target()) {
   5227     __ Branch(deferred->entry(), ne, map_reg, Operand(map));
   5228   } else {
   5229     DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
   5230   }
   5231 
   5232   __ bind(&success);
   5233 }
   5234 
   5235 
   5236 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5237   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
   5238   Register result_reg = ToRegister(instr->result());
   5239   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5240   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
   5241 }
   5242 
   5243 
   5244 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5245   Register unclamped_reg = ToRegister(instr->unclamped());
   5246   Register result_reg = ToRegister(instr->result());
   5247   __ ClampUint8(result_reg, unclamped_reg);
   5248 }
   5249 
   5250 
   5251 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5252   Register scratch = scratch0();
   5253   Register input_reg = ToRegister(instr->unclamped());
   5254   Register result_reg = ToRegister(instr->result());
   5255   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5256   Label is_smi, done, heap_number;
   5257 
   5258   // Both smi and heap number cases are handled.
   5259   __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
   5260 
   5261   // Check for heap number
   5262   __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5263   __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
   5264 
   5265   // Check for undefined. Undefined is converted to zero for clamping
   5266   // conversions.
   5267   DeoptimizeIf(ne, instr->environment(), input_reg,
   5268                Operand(factory()->undefined_value()));
   5269   __ mov(result_reg, zero_reg);
   5270   __ jmp(&done);
   5271 
   5272   // Heap number
   5273   __ bind(&heap_number);
   5274   __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
   5275                                              HeapNumber::kValueOffset));
   5276   __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
   5277   __ jmp(&done);
   5278 
   5279   __ bind(&is_smi);
   5280   __ ClampUint8(result_reg, scratch);
   5281 
   5282   __ bind(&done);
   5283 }
   5284 
   5285 
   5286 void LCodeGen::DoAllocate(LAllocate* instr) {
   5287   class DeferredAllocate V8_FINAL : public LDeferredCode {
   5288    public:
   5289     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5290         : LDeferredCode(codegen), instr_(instr) { }
   5291     virtual void Generate() V8_OVERRIDE {
   5292       codegen()->DoDeferredAllocate(instr_);
   5293     }
   5294     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5295    private:
   5296     LAllocate* instr_;
   5297   };
   5298 
   5299   DeferredAllocate* deferred =
   5300       new(zone()) DeferredAllocate(this, instr);
   5301 
   5302   Register result = ToRegister(instr->result());
   5303   Register scratch = ToRegister(instr->temp1());
   5304   Register scratch2 = ToRegister(instr->temp2());
   5305 
   5306   // Allocate memory for the object.
   5307   AllocationFlags flags = TAG_OBJECT;
   5308   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5309     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5310   }
   5311   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5312     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5313     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5314     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   5315   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5316     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5317     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   5318   }
   5319   if (instr->size()->IsConstantOperand()) {
   5320     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5321     if (size <= Page::kMaxRegularHeapObjectSize) {
   5322       __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5323     } else {
   5324       __ jmp(deferred->entry());
   5325     }
   5326   } else {
   5327     Register size = ToRegister(instr->size());
   5328     __ Allocate(size,
   5329                 result,
   5330                 scratch,
   5331                 scratch2,
   5332                 deferred->entry(),
   5333                 flags);
   5334   }
   5335 
   5336   __ bind(deferred->exit());
   5337 
   5338   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5339     if (instr->size()->IsConstantOperand()) {
   5340       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5341       __ li(scratch, Operand(size));
   5342     } else {
   5343       scratch = ToRegister(instr->size());
   5344     }
   5345     __ Subu(scratch, scratch, Operand(kPointerSize));
   5346     __ Subu(result, result, Operand(kHeapObjectTag));
   5347     Label loop;
   5348     __ bind(&loop);
   5349     __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5350     __ Addu(at, result, Operand(scratch));
   5351     __ sw(scratch2, MemOperand(at));
   5352     __ Subu(scratch, scratch, Operand(kPointerSize));
   5353     __ Branch(&loop, ge, scratch, Operand(zero_reg));
   5354     __ Addu(result, result, Operand(kHeapObjectTag));
   5355   }
   5356 }
   5357 
   5358 
   5359 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5360   Register result = ToRegister(instr->result());
   5361 
   5362   // TODO(3095996): Get rid of this. For now, we need to make the
   5363   // result register contain a valid pointer because it is already
   5364   // contained in the register pointer map.
   5365   __ mov(result, zero_reg);
   5366 
   5367   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5368   if (instr->size()->IsRegister()) {
   5369     Register size = ToRegister(instr->size());
   5370     ASSERT(!size.is(result));
   5371     __ SmiTag(size);
   5372     __ push(size);
   5373   } else {
   5374     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5375     __ Push(Smi::FromInt(size));
   5376   }
   5377 
   5378   int flags = AllocateDoubleAlignFlag::encode(
   5379       instr->hydrogen()->MustAllocateDoubleAligned());
   5380   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5381     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5382     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5383     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   5384   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5385     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5386     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   5387   } else {
   5388     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5389   }
   5390   __ Push(Smi::FromInt(flags));
   5391 
   5392   CallRuntimeFromDeferred(
   5393       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   5394   __ StoreToSafepointRegisterSlot(v0, result);
   5395 }
   5396 
   5397 
   5398 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5399   ASSERT(ToRegister(instr->value()).is(a0));
   5400   ASSERT(ToRegister(instr->result()).is(v0));
   5401   __ push(a0);
   5402   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5403 }
   5404 
   5405 
   5406 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
   5407   ASSERT(ToRegister(instr->context()).is(cp));
   5408   Label materialized;
   5409   // Registers will be used as follows:
   5410   // t3 = literals array.
   5411   // a1 = regexp literal.
   5412   // a0 = regexp literal clone.
   5413   // a2 and t0-t2 are used as temporaries.
   5414   int literal_offset =
   5415       FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
   5416   __ li(t3, instr->hydrogen()->literals());
   5417   __ lw(a1, FieldMemOperand(t3, literal_offset));
   5418   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   5419   __ Branch(&materialized, ne, a1, Operand(at));
   5420 
   5421   // Create regexp literal using runtime function
   5422   // Result will be in v0.
   5423   __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
   5424   __ li(t1, Operand(instr->hydrogen()->pattern()));
   5425   __ li(t0, Operand(instr->hydrogen()->flags()));
   5426   __ Push(t3, t2, t1, t0);
   5427   CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   5428   __ mov(a1, v0);
   5429 
   5430   __ bind(&materialized);
   5431   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
   5432   Label allocated, runtime_allocate;
   5433 
   5434   __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
   5435   __ jmp(&allocated);
   5436 
   5437   __ bind(&runtime_allocate);
   5438   __ li(a0, Operand(Smi::FromInt(size)));
   5439   __ Push(a1, a0);
   5440   CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   5441   __ pop(a1);
   5442 
   5443   __ bind(&allocated);
   5444   // Copy the content into the newly allocated memory.
   5445   // (Unroll copy loop once for better throughput).
   5446   for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
   5447     __ lw(a3, FieldMemOperand(a1, i));
   5448     __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
   5449     __ sw(a3, FieldMemOperand(v0, i));
   5450     __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
   5451   }
   5452   if ((size % (2 * kPointerSize)) != 0) {
   5453     __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
   5454     __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
   5455   }
   5456 }
   5457 
   5458 
   5459 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
   5460   ASSERT(ToRegister(instr->context()).is(cp));
   5461   // Use the fast case closure allocation code that allocates in new
   5462   // space for nested functions that don't need literals cloning.
   5463   bool pretenure = instr->hydrogen()->pretenure();
   5464   if (!pretenure && instr->hydrogen()->has_no_literals()) {
   5465     FastNewClosureStub stub(instr->hydrogen()->language_mode(),
   5466                             instr->hydrogen()->is_generator());
   5467     __ li(a2, Operand(instr->hydrogen()->shared_info()));
   5468     CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   5469   } else {
   5470     __ li(a2, Operand(instr->hydrogen()->shared_info()));
   5471     __ li(a1, Operand(pretenure ? factory()->true_value()
   5472                                 : factory()->false_value()));
   5473     __ Push(cp, a2, a1);
   5474     CallRuntime(Runtime::kNewClosure, 3, instr);
   5475   }
   5476 }
   5477 
   5478 
   5479 void LCodeGen::DoTypeof(LTypeof* instr) {
   5480   ASSERT(ToRegister(instr->result()).is(v0));
   5481   Register input = ToRegister(instr->value());
   5482   __ push(input);
   5483   CallRuntime(Runtime::kTypeof, 1, instr);
   5484 }
   5485 
   5486 
   5487 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5488   Register input = ToRegister(instr->value());
   5489 
   5490   Register cmp1 = no_reg;
   5491   Operand cmp2 = Operand(no_reg);
   5492 
   5493   Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
   5494                                                   instr->FalseLabel(chunk_),
   5495                                                   input,
   5496                                                   instr->type_literal(),
   5497                                                   cmp1,
   5498                                                   cmp2);
   5499 
   5500   ASSERT(cmp1.is_valid());
   5501   ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
   5502 
   5503   if (final_branch_condition != kNoCondition) {
   5504     EmitBranch(instr, final_branch_condition, cmp1, cmp2);
   5505   }
   5506 }
   5507 
   5508 
   5509 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   5510                                  Label* false_label,
   5511                                  Register input,
   5512                                  Handle<String> type_name,
   5513                                  Register& cmp1,
   5514                                  Operand& cmp2) {
   5515   // This function utilizes the delay slot heavily. This is used to load
   5516   // values that are always usable without depending on the type of the input
   5517   // register.
   5518   Condition final_branch_condition = kNoCondition;
   5519   Register scratch = scratch0();
   5520   if (type_name->Equals(heap()->number_string())) {
   5521     __ JumpIfSmi(input, true_label);
   5522     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
   5523     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
   5524     cmp1 = input;
   5525     cmp2 = Operand(at);
   5526     final_branch_condition = eq;
   5527 
   5528   } else if (type_name->Equals(heap()->string_string())) {
   5529     __ JumpIfSmi(input, false_label);
   5530     __ GetObjectType(input, input, scratch);
   5531     __ Branch(USE_DELAY_SLOT, false_label,
   5532               ge, scratch, Operand(FIRST_NONSTRING_TYPE));
   5533     // input is an object so we can load the BitFieldOffset even if we take the
   5534     // other branch.
   5535     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
   5536     __ And(at, at, 1 << Map::kIsUndetectable);
   5537     cmp1 = at;
   5538     cmp2 = Operand(zero_reg);
   5539     final_branch_condition = eq;
   5540 
   5541   } else if (type_name->Equals(heap()->symbol_string())) {
   5542     __ JumpIfSmi(input, false_label);
   5543     __ GetObjectType(input, input, scratch);
   5544     cmp1 = scratch;
   5545     cmp2 = Operand(SYMBOL_TYPE);
   5546     final_branch_condition = eq;
   5547 
   5548   } else if (type_name->Equals(heap()->boolean_string())) {
   5549     __ LoadRoot(at, Heap::kTrueValueRootIndex);
   5550     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5551     __ LoadRoot(at, Heap::kFalseValueRootIndex);
   5552     cmp1 = at;
   5553     cmp2 = Operand(input);
   5554     final_branch_condition = eq;
   5555 
   5556   } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
   5557     __ LoadRoot(at, Heap::kNullValueRootIndex);
   5558     cmp1 = at;
   5559     cmp2 = Operand(input);
   5560     final_branch_condition = eq;
   5561 
   5562   } else if (type_name->Equals(heap()->undefined_string())) {
   5563     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   5564     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5565     // The first instruction of JumpIfSmi is an And - it is safe in the delay
   5566     // slot.
   5567     __ JumpIfSmi(input, false_label);
   5568     // Check for undetectable objects => true.
   5569     __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
   5570     __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
   5571     __ And(at, at, 1 << Map::kIsUndetectable);
   5572     cmp1 = at;
   5573     cmp2 = Operand(zero_reg);
   5574     final_branch_condition = ne;
   5575 
   5576   } else if (type_name->Equals(heap()->function_string())) {
   5577     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   5578     __ JumpIfSmi(input, false_label);
   5579     __ GetObjectType(input, scratch, input);
   5580     __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
   5581     cmp1 = input;
   5582     cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
   5583     final_branch_condition = eq;
   5584 
   5585   } else if (type_name->Equals(heap()->object_string())) {
   5586     __ JumpIfSmi(input, false_label);
   5587     if (!FLAG_harmony_typeof) {
   5588       __ LoadRoot(at, Heap::kNullValueRootIndex);
   5589       __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
   5590     }
   5591     Register map = input;
   5592     __ GetObjectType(input, map, scratch);
   5593     __ Branch(false_label,
   5594               lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   5595     __ Branch(USE_DELAY_SLOT, false_label,
   5596               gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   5597     // map is still valid, so the BitField can be loaded in delay slot.
   5598     // Check for undetectable objects => false.
   5599     __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
   5600     __ And(at, at, 1 << Map::kIsUndetectable);
   5601     cmp1 = at;
   5602     cmp2 = Operand(zero_reg);
   5603     final_branch_condition = eq;
   5604 
   5605   } else {
   5606     cmp1 = at;
   5607     cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
   5608     __ Branch(false_label);
   5609   }
   5610 
   5611   return final_branch_condition;
   5612 }
   5613 
   5614 
   5615 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   5616   Register temp1 = ToRegister(instr->temp());
   5617 
   5618   EmitIsConstructCall(temp1, scratch0());
   5619 
   5620   EmitBranch(instr, eq, temp1,
   5621              Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
   5622 }
   5623 
   5624 
   5625 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
   5626   ASSERT(!temp1.is(temp2));
   5627   // Get the frame pointer for the calling frame.
   5628   __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   5629 
   5630   // Skip the arguments adaptor frame if it exists.
   5631   Label check_frame_marker;
   5632   __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
   5633   __ Branch(&check_frame_marker, ne, temp2,
   5634             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   5635   __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
   5636 
   5637   // Check the marker in the calling frame.
   5638   __ bind(&check_frame_marker);
   5639   __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
   5640 }
   5641 
   5642 
   5643 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5644   if (info()->IsStub()) return;
   5645   // Ensure that we have enough space after the previous lazy-bailout
   5646   // instruction for patching the code here.
   5647   int current_pc = masm()->pc_offset();
   5648   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5649     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5650     ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
   5651     while (padding_size > 0) {
   5652       __ nop();
   5653       padding_size -= Assembler::kInstrSize;
   5654     }
   5655   }
   5656 }
   5657 
   5658 
   5659 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5660   EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5661   last_lazy_deopt_pc_ = masm()->pc_offset();
   5662   ASSERT(instr->HasEnvironment());
   5663   LEnvironment* env = instr->environment();
   5664   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5665   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5666 }
   5667 
   5668 
   5669 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5670   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5671   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5672   // needed return address), even though the implementation of LAZY and EAGER is
   5673   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5674   // the special case below.
   5675   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5676     type = Deoptimizer::LAZY;
   5677   }
   5678 
   5679   Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
   5680   DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
   5681 }
   5682 
   5683 
   5684 void LCodeGen::DoDummy(LDummy* instr) {
   5685   // Nothing to see here, move on!
   5686 }
   5687 
   5688 
   5689 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5690   // Nothing to see here, move on!
   5691 }
   5692 
   5693 
   5694 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5695   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5696   LoadContextFromDeferred(instr->context());
   5697   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5698   RecordSafepointWithLazyDeopt(
   5699       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5700   ASSERT(instr->HasEnvironment());
   5701   LEnvironment* env = instr->environment();
   5702   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5703 }
   5704 
   5705 
   5706 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5707   class DeferredStackCheck V8_FINAL : public LDeferredCode {
   5708    public:
   5709     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5710         : LDeferredCode(codegen), instr_(instr) { }
   5711     virtual void Generate() V8_OVERRIDE {
   5712       codegen()->DoDeferredStackCheck(instr_);
   5713     }
   5714     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5715    private:
   5716     LStackCheck* instr_;
   5717   };
   5718 
   5719   ASSERT(instr->HasEnvironment());
   5720   LEnvironment* env = instr->environment();
   5721   // There is no LLazyBailout instruction for stack-checks. We have to
   5722   // prepare for lazy deoptimization explicitly here.
   5723   if (instr->hydrogen()->is_function_entry()) {
   5724     // Perform stack overflow check.
   5725     Label done;
   5726     __ LoadRoot(at, Heap::kStackLimitRootIndex);
   5727     __ Branch(&done, hs, sp, Operand(at));
   5728     ASSERT(instr->context()->IsRegister());
   5729     ASSERT(ToRegister(instr->context()).is(cp));
   5730     CallCode(isolate()->builtins()->StackCheck(),
   5731              RelocInfo::CODE_TARGET,
   5732              instr);
   5733     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5734     last_lazy_deopt_pc_ = masm()->pc_offset();
   5735     __ bind(&done);
   5736     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5737     safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5738   } else {
   5739     ASSERT(instr->hydrogen()->is_backwards_branch());
   5740     // Perform stack overflow check if this goto needs it before jumping.
   5741     DeferredStackCheck* deferred_stack_check =
   5742         new(zone()) DeferredStackCheck(this, instr);
   5743     __ LoadRoot(at, Heap::kStackLimitRootIndex);
   5744     __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
   5745     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5746     last_lazy_deopt_pc_ = masm()->pc_offset();
   5747     __ bind(instr->done_label());
   5748     deferred_stack_check->SetExit(instr->done_label());
   5749     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5750     // Don't record a deoptimization index for the safepoint here.
   5751     // This will be done explicitly when emitting call and the safepoint in
   5752     // the deferred code.
   5753   }
   5754 }
   5755 
   5756 
   5757 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5758   // This is a pseudo-instruction that ensures that the environment here is
   5759   // properly registered for deoptimization and records the assembler's PC
   5760   // offset.
   5761   LEnvironment* environment = instr->environment();
   5762 
   5763   // If the environment were already registered, we would have no way of
   5764   // backpatching it with the spill slot operands.
   5765   ASSERT(!environment->HasBeenRegistered());
   5766   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5767 
   5768   GenerateOsrPrologue();
   5769 }
   5770 
   5771 
   5772 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5773   Register result = ToRegister(instr->result());
   5774   Register object = ToRegister(instr->object());
   5775   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   5776   DeoptimizeIf(eq, instr->environment(), object, Operand(at));
   5777 
   5778   Register null_value = t1;
   5779   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   5780   DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
   5781 
   5782   __ And(at, object, kSmiTagMask);
   5783   DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
   5784 
   5785   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   5786   __ GetObjectType(object, a1, a1);
   5787   DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
   5788 
   5789   Label use_cache, call_runtime;
   5790   ASSERT(object.is(a0));
   5791   __ CheckEnumCache(null_value, &call_runtime);
   5792 
   5793   __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
   5794   __ Branch(&use_cache);
   5795 
   5796   // Get the set of properties to enumerate.
   5797   __ bind(&call_runtime);
   5798   __ push(object);
   5799   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
   5800 
   5801   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   5802   ASSERT(result.is(v0));
   5803   __ LoadRoot(at, Heap::kMetaMapRootIndex);
   5804   DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
   5805   __ bind(&use_cache);
   5806 }
   5807 
   5808 
   5809 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5810   Register map = ToRegister(instr->map());
   5811   Register result = ToRegister(instr->result());
   5812   Label load_cache, done;
   5813   __ EnumLength(result, map);
   5814   __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
   5815   __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
   5816   __ jmp(&done);
   5817 
   5818   __ bind(&load_cache);
   5819   __ LoadInstanceDescriptors(map, result);
   5820   __ lw(result,
   5821         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5822   __ lw(result,
   5823         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5824   DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
   5825 
   5826   __ bind(&done);
   5827 }
   5828 
   5829 
   5830 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5831   Register object = ToRegister(instr->value());
   5832   Register map = ToRegister(instr->map());
   5833   __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5834   DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
   5835 }
   5836 
   5837 
   5838 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5839   Register object = ToRegister(instr->object());
   5840   Register index = ToRegister(instr->index());
   5841   Register result = ToRegister(instr->result());
   5842   Register scratch = scratch0();
   5843 
   5844   Label out_of_object, done;
   5845   __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
   5846   __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
   5847 
   5848   STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
   5849   __ Addu(scratch, object, scratch);
   5850   __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5851 
   5852   __ Branch(&done);
   5853 
   5854   __ bind(&out_of_object);
   5855   __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5856   // Index is equal to negated out of object property index plus 1.
   5857   __ Subu(scratch, result, scratch);
   5858   __ lw(result, FieldMemOperand(scratch,
   5859                                 FixedArray::kHeaderSize - kPointerSize));
   5860   __ bind(&done);
   5861 }
   5862 
   5863 
   5864 #undef __
   5865 
   5866 } }  // namespace v8::internal
   5867