Home | History | Annotate | Download | only in arm
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #include "src/arm/lithium-codegen-arm.h"
      8 #include "src/arm/lithium-gap-resolver-arm.h"
      9 #include "src/base/bits.h"
     10 #include "src/code-factory.h"
     11 #include "src/code-stubs.h"
     12 #include "src/hydrogen-osr.h"
     13 #include "src/ic/ic.h"
     14 #include "src/ic/stub-cache.h"
     15 
     16 namespace v8 {
     17 namespace internal {
     18 
     19 
     20 class SafepointGenerator FINAL : public CallWrapper {
     21  public:
     22   SafepointGenerator(LCodeGen* codegen,
     23                      LPointerMap* pointers,
     24                      Safepoint::DeoptMode mode)
     25       : codegen_(codegen),
     26         pointers_(pointers),
     27         deopt_mode_(mode) { }
     28   virtual ~SafepointGenerator() {}
     29 
     30   virtual void BeforeCall(int call_size) const OVERRIDE {}
     31 
     32   virtual void AfterCall() const OVERRIDE {
     33     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     34   }
     35 
     36  private:
     37   LCodeGen* codegen_;
     38   LPointerMap* pointers_;
     39   Safepoint::DeoptMode deopt_mode_;
     40 };
     41 
     42 
     43 #define __ masm()->
     44 
     45 bool LCodeGen::GenerateCode() {
     46   LPhase phase("Z_Code generation", chunk());
     47   DCHECK(is_unused());
     48   status_ = GENERATING;
     49 
     50   // Open a frame scope to indicate that there is a frame on the stack.  The
     51   // NONE indicates that the scope shouldn't actually generate code to set up
     52   // the frame (that is done in GeneratePrologue).
     53   FrameScope frame_scope(masm_, StackFrame::NONE);
     54 
     55   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
     56          GenerateJumpTable() && GenerateSafepointTable();
     57 }
     58 
     59 
     60 void LCodeGen::FinishCode(Handle<Code> code) {
     61   DCHECK(is_done());
     62   code->set_stack_slots(GetStackSlotCount());
     63   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     64   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
     65   PopulateDeoptimizationData(code);
     66 }
     67 
     68 
     69 void LCodeGen::SaveCallerDoubles() {
     70   DCHECK(info()->saves_caller_doubles());
     71   DCHECK(NeedsEagerFrame());
     72   Comment(";;; Save clobbered callee double registers");
     73   int count = 0;
     74   BitVector* doubles = chunk()->allocated_double_registers();
     75   BitVector::Iterator save_iterator(doubles);
     76   while (!save_iterator.Done()) {
     77     __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
     78             MemOperand(sp, count * kDoubleSize));
     79     save_iterator.Advance();
     80     count++;
     81   }
     82 }
     83 
     84 
     85 void LCodeGen::RestoreCallerDoubles() {
     86   DCHECK(info()->saves_caller_doubles());
     87   DCHECK(NeedsEagerFrame());
     88   Comment(";;; Restore clobbered callee double registers");
     89   BitVector* doubles = chunk()->allocated_double_registers();
     90   BitVector::Iterator save_iterator(doubles);
     91   int count = 0;
     92   while (!save_iterator.Done()) {
     93     __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
     94              MemOperand(sp, count * kDoubleSize));
     95     save_iterator.Advance();
     96     count++;
     97   }
     98 }
     99 
    100 
    101 bool LCodeGen::GeneratePrologue() {
    102   DCHECK(is_generating());
    103 
    104   if (info()->IsOptimizing()) {
    105     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    106 
    107 #ifdef DEBUG
    108     if (strlen(FLAG_stop_at) > 0 &&
    109         info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    110       __ stop("stop_at");
    111     }
    112 #endif
    113 
    114     // r1: Callee's JS function.
    115     // cp: Callee's context.
    116     // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
    117     // fp: Caller's frame pointer.
    118     // lr: Caller's pc.
    119 
    120     // Sloppy mode functions and builtins need to replace the receiver with the
    121     // global proxy when called as functions (without an explicit receiver
    122     // object).
    123     if (info_->this_has_uses() &&
    124         info_->strict_mode() == SLOPPY &&
    125         !info_->is_native()) {
    126       Label ok;
    127       int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
    128       __ ldr(r2, MemOperand(sp, receiver_offset));
    129       __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
    130       __ b(ne, &ok);
    131 
    132       __ ldr(r2, GlobalObjectOperand());
    133       __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
    134 
    135       __ str(r2, MemOperand(sp, receiver_offset));
    136 
    137       __ bind(&ok);
    138     }
    139   }
    140 
    141   info()->set_prologue_offset(masm_->pc_offset());
    142   if (NeedsEagerFrame()) {
    143     if (info()->IsStub()) {
    144       __ StubPrologue();
    145     } else {
    146       __ Prologue(info()->IsCodePreAgingActive());
    147     }
    148     frame_is_built_ = true;
    149     info_->AddNoFrameRange(0, masm_->pc_offset());
    150   }
    151 
    152   // Reserve space for the stack slots needed by the code.
    153   int slots = GetStackSlotCount();
    154   if (slots > 0) {
    155     if (FLAG_debug_code) {
    156       __ sub(sp,  sp, Operand(slots * kPointerSize));
    157       __ push(r0);
    158       __ push(r1);
    159       __ add(r0, sp, Operand(slots *  kPointerSize));
    160       __ mov(r1, Operand(kSlotsZapValue));
    161       Label loop;
    162       __ bind(&loop);
    163       __ sub(r0, r0, Operand(kPointerSize));
    164       __ str(r1, MemOperand(r0, 2 * kPointerSize));
    165       __ cmp(r0, sp);
    166       __ b(ne, &loop);
    167       __ pop(r1);
    168       __ pop(r0);
    169     } else {
    170       __ sub(sp,  sp, Operand(slots * kPointerSize));
    171     }
    172   }
    173 
    174   if (info()->saves_caller_doubles()) {
    175     SaveCallerDoubles();
    176   }
    177 
    178   // Possibly allocate a local context.
    179   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    180   if (heap_slots > 0) {
    181     Comment(";;; Allocate local context");
    182     bool need_write_barrier = true;
    183     // Argument to NewContext is the function, which is in r1.
    184     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
    185       FastNewContextStub stub(isolate(), heap_slots);
    186       __ CallStub(&stub);
    187       // Result of FastNewContextStub is always in new space.
    188       need_write_barrier = false;
    189     } else {
    190       __ push(r1);
    191       __ CallRuntime(Runtime::kNewFunctionContext, 1);
    192     }
    193     RecordSafepoint(Safepoint::kNoLazyDeopt);
    194     // Context is returned in both r0 and cp.  It replaces the context
    195     // passed to us.  It's saved in the stack and kept live in cp.
    196     __ mov(cp, r0);
    197     __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
    198     // Copy any necessary parameters into the context.
    199     int num_parameters = scope()->num_parameters();
    200     for (int i = 0; i < num_parameters; i++) {
    201       Variable* var = scope()->parameter(i);
    202       if (var->IsContextSlot()) {
    203         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    204             (num_parameters - 1 - i) * kPointerSize;
    205         // Load parameter from stack.
    206         __ ldr(r0, MemOperand(fp, parameter_offset));
    207         // Store it in the context.
    208         MemOperand target = ContextOperand(cp, var->index());
    209         __ str(r0, target);
    210         // Update the write barrier. This clobbers r3 and r0.
    211         if (need_write_barrier) {
    212           __ RecordWriteContextSlot(
    213               cp,
    214               target.offset(),
    215               r0,
    216               r3,
    217               GetLinkRegisterState(),
    218               kSaveFPRegs);
    219         } else if (FLAG_debug_code) {
    220           Label done;
    221           __ JumpIfInNewSpace(cp, r0, &done);
    222           __ Abort(kExpectedNewSpaceObject);
    223           __ bind(&done);
    224         }
    225       }
    226     }
    227     Comment(";;; End allocate local context");
    228   }
    229 
    230   // Trace the call.
    231   if (FLAG_trace && info()->IsOptimizing()) {
    232     // We have not executed any compiled code yet, so cp still holds the
    233     // incoming context.
    234     __ CallRuntime(Runtime::kTraceEnter, 0);
    235   }
    236   return !is_aborted();
    237 }
    238 
    239 
    240 void LCodeGen::GenerateOsrPrologue() {
    241   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    242   // are none, at the OSR entrypoint instruction.
    243   if (osr_pc_offset_ >= 0) return;
    244 
    245   osr_pc_offset_ = masm()->pc_offset();
    246 
    247   // Adjust the frame size, subsuming the unoptimized frame into the
    248   // optimized frame.
    249   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    250   DCHECK(slots >= 0);
    251   __ sub(sp, sp, Operand(slots * kPointerSize));
    252 }
    253 
    254 
    255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    256   if (instr->IsCall()) {
    257     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    258   }
    259   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    260     safepoints_.BumpLastLazySafepointIndex();
    261   }
    262 }
    263 
    264 
    265 bool LCodeGen::GenerateDeferredCode() {
    266   DCHECK(is_generating());
    267   if (deferred_.length() > 0) {
    268     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    269       LDeferredCode* code = deferred_[i];
    270 
    271       HValue* value =
    272           instructions_->at(code->instruction_index())->hydrogen_value();
    273       RecordAndWritePosition(
    274           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
    275 
    276       Comment(";;; <@%d,#%d> "
    277               "-------------------- Deferred %s --------------------",
    278               code->instruction_index(),
    279               code->instr()->hydrogen_value()->id(),
    280               code->instr()->Mnemonic());
    281       __ bind(code->entry());
    282       if (NeedsDeferredFrame()) {
    283         Comment(";;; Build frame");
    284         DCHECK(!frame_is_built_);
    285         DCHECK(info()->IsStub());
    286         frame_is_built_ = true;
    287         __ PushFixedFrame();
    288         __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
    289         __ push(scratch0());
    290         __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    291         Comment(";;; Deferred code");
    292       }
    293       code->Generate();
    294       if (NeedsDeferredFrame()) {
    295         Comment(";;; Destroy frame");
    296         DCHECK(frame_is_built_);
    297         __ pop(ip);
    298         __ PopFixedFrame();
    299         frame_is_built_ = false;
    300       }
    301       __ jmp(code->exit());
    302     }
    303   }
    304 
    305   // Force constant pool emission at the end of the deferred code to make
    306   // sure that no constant pools are emitted after.
    307   masm()->CheckConstPool(true, false);
    308 
    309   return !is_aborted();
    310 }
    311 
    312 
    313 bool LCodeGen::GenerateJumpTable() {
    314   // Check that the jump table is accessible from everywhere in the function
    315   // code, i.e. that offsets to the table can be encoded in the 24bit signed
    316   // immediate of a branch instruction.
    317   // To simplify we consider the code size from the first instruction to the
    318   // end of the jump table. We also don't consider the pc load delta.
    319   // Each entry in the jump table generates one instruction and inlines one
    320   // 32bit data after it.
    321   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
    322                 jump_table_.length() * 7)) {
    323     Abort(kGeneratedCodeIsTooLarge);
    324   }
    325 
    326   if (jump_table_.length() > 0) {
    327     Label needs_frame, call_deopt_entry;
    328 
    329     Comment(";;; -------------------- Jump table --------------------");
    330     Address base = jump_table_[0].address;
    331 
    332     Register entry_offset = scratch0();
    333 
    334     int length = jump_table_.length();
    335     for (int i = 0; i < length; i++) {
    336       Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
    337       __ bind(&table_entry->label);
    338 
    339       DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
    340       Address entry = table_entry->address;
    341       DeoptComment(table_entry->reason);
    342 
    343       // Second-level deopt table entries are contiguous and small, so instead
    344       // of loading the full, absolute address of each one, load an immediate
    345       // offset which will be added to the base address later.
    346       __ mov(entry_offset, Operand(entry - base));
    347 
    348       if (table_entry->needs_frame) {
    349         DCHECK(!info()->saves_caller_doubles());
    350         if (needs_frame.is_bound()) {
    351           __ b(&needs_frame);
    352         } else {
    353           __ bind(&needs_frame);
    354           Comment(";;; call deopt with frame");
    355           __ PushFixedFrame();
    356           // This variant of deopt can only be used with stubs. Since we don't
    357           // have a function pointer to install in the stack frame that we're
    358           // building, install a special marker there instead.
    359           DCHECK(info()->IsStub());
    360           __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
    361           __ push(ip);
    362           __ add(fp, sp,
    363                  Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    364           __ bind(&call_deopt_entry);
    365           // Add the base address to the offset previously loaded in
    366           // entry_offset.
    367           __ add(entry_offset, entry_offset,
    368                  Operand(ExternalReference::ForDeoptEntry(base)));
    369           __ blx(entry_offset);
    370         }
    371 
    372         masm()->CheckConstPool(false, false);
    373       } else {
    374         // The last entry can fall through into `call_deopt_entry`, avoiding a
    375         // branch.
    376         bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
    377 
    378         if (need_branch) __ b(&call_deopt_entry);
    379 
    380         masm()->CheckConstPool(false, !need_branch);
    381       }
    382     }
    383 
    384     if (!call_deopt_entry.is_bound()) {
    385       Comment(";;; call deopt");
    386       __ bind(&call_deopt_entry);
    387 
    388       if (info()->saves_caller_doubles()) {
    389         DCHECK(info()->IsStub());
    390         RestoreCallerDoubles();
    391       }
    392 
    393       // Add the base address to the offset previously loaded in entry_offset.
    394       __ add(entry_offset, entry_offset,
    395              Operand(ExternalReference::ForDeoptEntry(base)));
    396       __ blx(entry_offset);
    397     }
    398   }
    399 
    400   // Force constant pool emission at the end of the deopt jump table to make
    401   // sure that no constant pools are emitted after.
    402   masm()->CheckConstPool(true, false);
    403 
    404   // The deoptimization jump table is the last part of the instruction
    405   // sequence. Mark the generated code as done unless we bailed out.
    406   if (!is_aborted()) status_ = DONE;
    407   return !is_aborted();
    408 }
    409 
    410 
    411 bool LCodeGen::GenerateSafepointTable() {
    412   DCHECK(is_done());
    413   safepoints_.Emit(masm(), GetStackSlotCount());
    414   return !is_aborted();
    415 }
    416 
    417 
    418 Register LCodeGen::ToRegister(int index) const {
    419   return Register::FromAllocationIndex(index);
    420 }
    421 
    422 
    423 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
    424   return DwVfpRegister::FromAllocationIndex(index);
    425 }
    426 
    427 
    428 Register LCodeGen::ToRegister(LOperand* op) const {
    429   DCHECK(op->IsRegister());
    430   return ToRegister(op->index());
    431 }
    432 
    433 
    434 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    435   if (op->IsRegister()) {
    436     return ToRegister(op->index());
    437   } else if (op->IsConstantOperand()) {
    438     LConstantOperand* const_op = LConstantOperand::cast(op);
    439     HConstant* constant = chunk_->LookupConstant(const_op);
    440     Handle<Object> literal = constant->handle(isolate());
    441     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    442     if (r.IsInteger32()) {
    443       DCHECK(literal->IsNumber());
    444       __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
    445     } else if (r.IsDouble()) {
    446       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    447     } else {
    448       DCHECK(r.IsSmiOrTagged());
    449       __ Move(scratch, literal);
    450     }
    451     return scratch;
    452   } else if (op->IsStackSlot()) {
    453     __ ldr(scratch, ToMemOperand(op));
    454     return scratch;
    455   }
    456   UNREACHABLE();
    457   return scratch;
    458 }
    459 
    460 
    461 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    462   DCHECK(op->IsDoubleRegister());
    463   return ToDoubleRegister(op->index());
    464 }
    465 
    466 
    467 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
    468                                                SwVfpRegister flt_scratch,
    469                                                DwVfpRegister dbl_scratch) {
    470   if (op->IsDoubleRegister()) {
    471     return ToDoubleRegister(op->index());
    472   } else if (op->IsConstantOperand()) {
    473     LConstantOperand* const_op = LConstantOperand::cast(op);
    474     HConstant* constant = chunk_->LookupConstant(const_op);
    475     Handle<Object> literal = constant->handle(isolate());
    476     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    477     if (r.IsInteger32()) {
    478       DCHECK(literal->IsNumber());
    479       __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
    480       __ vmov(flt_scratch, ip);
    481       __ vcvt_f64_s32(dbl_scratch, flt_scratch);
    482       return dbl_scratch;
    483     } else if (r.IsDouble()) {
    484       Abort(kUnsupportedDoubleImmediate);
    485     } else if (r.IsTagged()) {
    486       Abort(kUnsupportedTaggedImmediate);
    487     }
    488   } else if (op->IsStackSlot()) {
    489     // TODO(regis): Why is vldr not taking a MemOperand?
    490     // __ vldr(dbl_scratch, ToMemOperand(op));
    491     MemOperand mem_op = ToMemOperand(op);
    492     __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
    493     return dbl_scratch;
    494   }
    495   UNREACHABLE();
    496   return dbl_scratch;
    497 }
    498 
    499 
    500 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    501   HConstant* constant = chunk_->LookupConstant(op);
    502   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    503   return constant->handle(isolate());
    504 }
    505 
    506 
    507 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    508   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    509 }
    510 
    511 
    512 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    513   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    514 }
    515 
    516 
    517 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    518   return ToRepresentation(op, Representation::Integer32());
    519 }
    520 
    521 
    522 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
    523                                    const Representation& r) const {
    524   HConstant* constant = chunk_->LookupConstant(op);
    525   int32_t value = constant->Integer32Value();
    526   if (r.IsInteger32()) return value;
    527   DCHECK(r.IsSmiOrTagged());
    528   return reinterpret_cast<int32_t>(Smi::FromInt(value));
    529 }
    530 
    531 
    532 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    533   HConstant* constant = chunk_->LookupConstant(op);
    534   return Smi::FromInt(constant->Integer32Value());
    535 }
    536 
    537 
    538 double LCodeGen::ToDouble(LConstantOperand* op) const {
    539   HConstant* constant = chunk_->LookupConstant(op);
    540   DCHECK(constant->HasDoubleValue());
    541   return constant->DoubleValue();
    542 }
    543 
    544 
    545 Operand LCodeGen::ToOperand(LOperand* op) {
    546   if (op->IsConstantOperand()) {
    547     LConstantOperand* const_op = LConstantOperand::cast(op);
    548     HConstant* constant = chunk()->LookupConstant(const_op);
    549     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    550     if (r.IsSmi()) {
    551       DCHECK(constant->HasSmiValue());
    552       return Operand(Smi::FromInt(constant->Integer32Value()));
    553     } else if (r.IsInteger32()) {
    554       DCHECK(constant->HasInteger32Value());
    555       return Operand(constant->Integer32Value());
    556     } else if (r.IsDouble()) {
    557       Abort(kToOperandUnsupportedDoubleImmediate);
    558     }
    559     DCHECK(r.IsTagged());
    560     return Operand(constant->handle(isolate()));
    561   } else if (op->IsRegister()) {
    562     return Operand(ToRegister(op));
    563   } else if (op->IsDoubleRegister()) {
    564     Abort(kToOperandIsDoubleRegisterUnimplemented);
    565     return Operand::Zero();
    566   }
    567   // Stack slots not implemented, use ToMemOperand instead.
    568   UNREACHABLE();
    569   return Operand::Zero();
    570 }
    571 
    572 
    573 static int ArgumentsOffsetWithoutFrame(int index) {
    574   DCHECK(index < 0);
    575   return -(index + 1) * kPointerSize;
    576 }
    577 
    578 
    579 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    580   DCHECK(!op->IsRegister());
    581   DCHECK(!op->IsDoubleRegister());
    582   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    583   if (NeedsEagerFrame()) {
    584     return MemOperand(fp, StackSlotOffset(op->index()));
    585   } else {
    586     // Retrieve parameter without eager stack-frame relative to the
    587     // stack-pointer.
    588     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    589   }
    590 }
    591 
    592 
    593 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    594   DCHECK(op->IsDoubleStackSlot());
    595   if (NeedsEagerFrame()) {
    596     return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
    597   } else {
    598     // Retrieve parameter without eager stack-frame relative to the
    599     // stack-pointer.
    600     return MemOperand(
    601         sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    602   }
    603 }
    604 
    605 
    606 void LCodeGen::WriteTranslation(LEnvironment* environment,
    607                                 Translation* translation) {
    608   if (environment == NULL) return;
    609 
    610   // The translation includes one command per value in the environment.
    611   int translation_size = environment->translation_size();
    612   // The output frame height does not include the parameters.
    613   int height = translation_size - environment->parameter_count();
    614 
    615   WriteTranslation(environment->outer(), translation);
    616   bool has_closure_id = !info()->closure().is_null() &&
    617       !info()->closure().is_identical_to(environment->closure());
    618   int closure_id = has_closure_id
    619       ? DefineDeoptimizationLiteral(environment->closure())
    620       : Translation::kSelfLiteralId;
    621 
    622   switch (environment->frame_type()) {
    623     case JS_FUNCTION:
    624       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
    625       break;
    626     case JS_CONSTRUCT:
    627       translation->BeginConstructStubFrame(closure_id, translation_size);
    628       break;
    629     case JS_GETTER:
    630       DCHECK(translation_size == 1);
    631       DCHECK(height == 0);
    632       translation->BeginGetterStubFrame(closure_id);
    633       break;
    634     case JS_SETTER:
    635       DCHECK(translation_size == 2);
    636       DCHECK(height == 0);
    637       translation->BeginSetterStubFrame(closure_id);
    638       break;
    639     case STUB:
    640       translation->BeginCompiledStubFrame();
    641       break;
    642     case ARGUMENTS_ADAPTOR:
    643       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
    644       break;
    645   }
    646 
    647   int object_index = 0;
    648   int dematerialized_index = 0;
    649   for (int i = 0; i < translation_size; ++i) {
    650     LOperand* value = environment->values()->at(i);
    651     AddToTranslation(environment,
    652                      translation,
    653                      value,
    654                      environment->HasTaggedValueAt(i),
    655                      environment->HasUint32ValueAt(i),
    656                      &object_index,
    657                      &dematerialized_index);
    658   }
    659 }
    660 
    661 
    662 void LCodeGen::AddToTranslation(LEnvironment* environment,
    663                                 Translation* translation,
    664                                 LOperand* op,
    665                                 bool is_tagged,
    666                                 bool is_uint32,
    667                                 int* object_index_pointer,
    668                                 int* dematerialized_index_pointer) {
    669   if (op == LEnvironment::materialization_marker()) {
    670     int object_index = (*object_index_pointer)++;
    671     if (environment->ObjectIsDuplicateAt(object_index)) {
    672       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    673       translation->DuplicateObject(dupe_of);
    674       return;
    675     }
    676     int object_length = environment->ObjectLengthAt(object_index);
    677     if (environment->ObjectIsArgumentsAt(object_index)) {
    678       translation->BeginArgumentsObject(object_length);
    679     } else {
    680       translation->BeginCapturedObject(object_length);
    681     }
    682     int dematerialized_index = *dematerialized_index_pointer;
    683     int env_offset = environment->translation_size() + dematerialized_index;
    684     *dematerialized_index_pointer += object_length;
    685     for (int i = 0; i < object_length; ++i) {
    686       LOperand* value = environment->values()->at(env_offset + i);
    687       AddToTranslation(environment,
    688                        translation,
    689                        value,
    690                        environment->HasTaggedValueAt(env_offset + i),
    691                        environment->HasUint32ValueAt(env_offset + i),
    692                        object_index_pointer,
    693                        dematerialized_index_pointer);
    694     }
    695     return;
    696   }
    697 
    698   if (op->IsStackSlot()) {
    699     if (is_tagged) {
    700       translation->StoreStackSlot(op->index());
    701     } else if (is_uint32) {
    702       translation->StoreUint32StackSlot(op->index());
    703     } else {
    704       translation->StoreInt32StackSlot(op->index());
    705     }
    706   } else if (op->IsDoubleStackSlot()) {
    707     translation->StoreDoubleStackSlot(op->index());
    708   } else if (op->IsRegister()) {
    709     Register reg = ToRegister(op);
    710     if (is_tagged) {
    711       translation->StoreRegister(reg);
    712     } else if (is_uint32) {
    713       translation->StoreUint32Register(reg);
    714     } else {
    715       translation->StoreInt32Register(reg);
    716     }
    717   } else if (op->IsDoubleRegister()) {
    718     DoubleRegister reg = ToDoubleRegister(op);
    719     translation->StoreDoubleRegister(reg);
    720   } else if (op->IsConstantOperand()) {
    721     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    722     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    723     translation->StoreLiteral(src_index);
    724   } else {
    725     UNREACHABLE();
    726   }
    727 }
    728 
    729 
    730 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
    731   int size = masm()->CallSize(code, mode);
    732   if (code->kind() == Code::BINARY_OP_IC ||
    733       code->kind() == Code::COMPARE_IC) {
    734     size += Assembler::kInstrSize;  // extra nop() added in CallCodeGeneric.
    735   }
    736   return size;
    737 }
    738 
    739 
    740 void LCodeGen::CallCode(Handle<Code> code,
    741                         RelocInfo::Mode mode,
    742                         LInstruction* instr,
    743                         TargetAddressStorageMode storage_mode) {
    744   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
    745 }
    746 
    747 
    748 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    749                                RelocInfo::Mode mode,
    750                                LInstruction* instr,
    751                                SafepointMode safepoint_mode,
    752                                TargetAddressStorageMode storage_mode) {
    753   DCHECK(instr != NULL);
    754   // Block literal pool emission to ensure nop indicating no inlined smi code
    755   // is in the correct position.
    756   Assembler::BlockConstPoolScope block_const_pool(masm());
    757   __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
    758   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    759 
    760   // Signal that we don't inline smi code before these stubs in the
    761   // optimizing code generator.
    762   if (code->kind() == Code::BINARY_OP_IC ||
    763       code->kind() == Code::COMPARE_IC) {
    764     __ nop();
    765   }
    766 }
    767 
    768 
    769 void LCodeGen::CallRuntime(const Runtime::Function* function,
    770                            int num_arguments,
    771                            LInstruction* instr,
    772                            SaveFPRegsMode save_doubles) {
    773   DCHECK(instr != NULL);
    774 
    775   __ CallRuntime(function, num_arguments, save_doubles);
    776 
    777   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    778 }
    779 
    780 
    781 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    782   if (context->IsRegister()) {
    783     __ Move(cp, ToRegister(context));
    784   } else if (context->IsStackSlot()) {
    785     __ ldr(cp, ToMemOperand(context));
    786   } else if (context->IsConstantOperand()) {
    787     HConstant* constant =
    788         chunk_->LookupConstant(LConstantOperand::cast(context));
    789     __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
    790   } else {
    791     UNREACHABLE();
    792   }
    793 }
    794 
    795 
    796 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    797                                        int argc,
    798                                        LInstruction* instr,
    799                                        LOperand* context) {
    800   LoadContextFromDeferred(context);
    801   __ CallRuntimeSaveDoubles(id);
    802   RecordSafepointWithRegisters(
    803       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    804 }
    805 
    806 
    807 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    808                                                     Safepoint::DeoptMode mode) {
    809   environment->set_has_been_used();
    810   if (!environment->HasBeenRegistered()) {
    811     // Physical stack frame layout:
    812     // -x ............. -4  0 ..................................... y
    813     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    814 
    815     // Layout of the environment:
    816     // 0 ..................................................... size-1
    817     // [parameters] [locals] [expression stack including arguments]
    818 
    819     // Layout of the translation:
    820     // 0 ........................................................ size - 1 + 4
    821     // [expression stack including arguments] [locals] [4 words] [parameters]
    822     // |>------------  translation_size ------------<|
    823 
    824     int frame_count = 0;
    825     int jsframe_count = 0;
    826     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    827       ++frame_count;
    828       if (e->frame_type() == JS_FUNCTION) {
    829         ++jsframe_count;
    830       }
    831     }
    832     Translation translation(&translations_, frame_count, jsframe_count, zone());
    833     WriteTranslation(environment, &translation);
    834     int deoptimization_index = deoptimizations_.length();
    835     int pc_offset = masm()->pc_offset();
    836     environment->Register(deoptimization_index,
    837                           translation.index(),
    838                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    839     deoptimizations_.Add(environment, zone());
    840   }
    841 }
    842 
    843 
    844 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
    845                             const char* detail,
    846                             Deoptimizer::BailoutType bailout_type) {
    847   LEnvironment* environment = instr->environment();
    848   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    849   DCHECK(environment->HasBeenRegistered());
    850   int id = environment->deoptimization_index();
    851   DCHECK(info()->IsOptimizing() || info()->IsStub());
    852   Address entry =
    853       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    854   if (entry == NULL) {
    855     Abort(kBailoutWasNotPrepared);
    856     return;
    857   }
    858 
    859   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    860     Register scratch = scratch0();
    861     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    862 
    863     // Store the condition on the stack if necessary
    864     if (condition != al) {
    865       __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
    866       __ mov(scratch, Operand(1), LeaveCC, condition);
    867       __ push(scratch);
    868     }
    869 
    870     __ push(r1);
    871     __ mov(scratch, Operand(count));
    872     __ ldr(r1, MemOperand(scratch));
    873     __ sub(r1, r1, Operand(1), SetCC);
    874     __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
    875     __ str(r1, MemOperand(scratch));
    876     __ pop(r1);
    877 
    878     if (condition != al) {
    879       // Clean up the stack before the deoptimizer call
    880       __ pop(scratch);
    881     }
    882 
    883     __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
    884 
    885     // 'Restore' the condition in a slightly hacky way. (It would be better
    886     // to use 'msr' and 'mrs' instructions here, but they are not supported by
    887     // our ARM simulator).
    888     if (condition != al) {
    889       condition = ne;
    890       __ cmp(scratch, Operand::Zero());
    891     }
    892   }
    893 
    894   if (info()->ShouldTrapOnDeopt()) {
    895     __ stop("trap_on_deopt", condition);
    896   }
    897 
    898   Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
    899                              instr->Mnemonic(), detail);
    900   DCHECK(info()->IsStub() || frame_is_built_);
    901   // Go through jump table if we need to handle condition, build frame, or
    902   // restore caller doubles.
    903   if (condition == al && frame_is_built_ &&
    904       !info()->saves_caller_doubles()) {
    905     DeoptComment(reason);
    906     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    907   } else {
    908     Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
    909                                             !frame_is_built_);
    910     // We often have several deopts to the same entry, reuse the last
    911     // jump entry if this is the case.
    912     if (jump_table_.is_empty() ||
    913         !table_entry.IsEquivalentTo(jump_table_.last())) {
    914       jump_table_.Add(table_entry, zone());
    915     }
    916     __ b(condition, &jump_table_.last().label);
    917   }
    918 }
    919 
    920 
    921 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
    922                             const char* detail) {
    923   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    924       ? Deoptimizer::LAZY
    925       : Deoptimizer::EAGER;
    926   DeoptimizeIf(condition, instr, detail, bailout_type);
    927 }
    928 
    929 
    930 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
    931   int length = deoptimizations_.length();
    932   if (length == 0) return;
    933   Handle<DeoptimizationInputData> data =
    934       DeoptimizationInputData::New(isolate(), length, TENURED);
    935 
    936   Handle<ByteArray> translations =
    937       translations_.CreateByteArray(isolate()->factory());
    938   data->SetTranslationByteArray(*translations);
    939   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
    940   data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
    941   if (info_->IsOptimizing()) {
    942     // Reference to shared function info does not change between phases.
    943     AllowDeferredHandleDereference allow_handle_dereference;
    944     data->SetSharedFunctionInfo(*info_->shared_info());
    945   } else {
    946     data->SetSharedFunctionInfo(Smi::FromInt(0));
    947   }
    948 
    949   Handle<FixedArray> literals =
    950       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
    951   { AllowDeferredHandleDereference copy_handles;
    952     for (int i = 0; i < deoptimization_literals_.length(); i++) {
    953       literals->set(i, *deoptimization_literals_[i]);
    954     }
    955     data->SetLiteralArray(*literals);
    956   }
    957 
    958   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
    959   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
    960 
    961   // Populate the deoptimization entries.
    962   for (int i = 0; i < length; i++) {
    963     LEnvironment* env = deoptimizations_[i];
    964     data->SetAstId(i, env->ast_id());
    965     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
    966     data->SetArgumentsStackHeight(i,
    967                                   Smi::FromInt(env->arguments_stack_height()));
    968     data->SetPc(i, Smi::FromInt(env->pc_offset()));
    969   }
    970   code->set_deoptimization_data(*data);
    971 }
    972 
    973 
    974 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
    975   int result = deoptimization_literals_.length();
    976   for (int i = 0; i < deoptimization_literals_.length(); ++i) {
    977     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
    978   }
    979   deoptimization_literals_.Add(literal, zone());
    980   return result;
    981 }
    982 
    983 
    984 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
    985   DCHECK(deoptimization_literals_.length() == 0);
    986 
    987   const ZoneList<Handle<JSFunction> >* inlined_closures =
    988       chunk()->inlined_closures();
    989 
    990   for (int i = 0, length = inlined_closures->length();
    991        i < length;
    992        i++) {
    993     DefineDeoptimizationLiteral(inlined_closures->at(i));
    994   }
    995 
    996   inlined_function_count_ = deoptimization_literals_.length();
    997 }
    998 
    999 
   1000 void LCodeGen::RecordSafepointWithLazyDeopt(
   1001     LInstruction* instr, SafepointMode safepoint_mode) {
   1002   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
   1003     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
   1004   } else {
   1005     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   1006     RecordSafepointWithRegisters(
   1007         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   1008   }
   1009 }
   1010 
   1011 
   1012 void LCodeGen::RecordSafepoint(
   1013     LPointerMap* pointers,
   1014     Safepoint::Kind kind,
   1015     int arguments,
   1016     Safepoint::DeoptMode deopt_mode) {
   1017   DCHECK(expected_safepoint_kind_ == kind);
   1018 
   1019   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   1020   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
   1021       kind, arguments, deopt_mode);
   1022   for (int i = 0; i < operands->length(); i++) {
   1023     LOperand* pointer = operands->at(i);
   1024     if (pointer->IsStackSlot()) {
   1025       safepoint.DefinePointerSlot(pointer->index(), zone());
   1026     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
   1027       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
   1028     }
   1029   }
   1030   if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
   1031     // Register pp always contains a pointer to the constant pool.
   1032     safepoint.DefinePointerRegister(pp, zone());
   1033   }
   1034 }
   1035 
   1036 
   1037 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
   1038                                Safepoint::DeoptMode deopt_mode) {
   1039   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
   1040 }
   1041 
   1042 
   1043 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
   1044   LPointerMap empty_pointers(zone());
   1045   RecordSafepoint(&empty_pointers, deopt_mode);
   1046 }
   1047 
   1048 
   1049 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
   1050                                             int arguments,
   1051                                             Safepoint::DeoptMode deopt_mode) {
   1052   RecordSafepoint(
   1053       pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
   1054 }
   1055 
   1056 
   1057 void LCodeGen::RecordAndWritePosition(int position) {
   1058   if (position == RelocInfo::kNoPosition) return;
   1059   masm()->positions_recorder()->RecordPosition(position);
   1060   masm()->positions_recorder()->WriteRecordedPositions();
   1061 }
   1062 
   1063 
   1064 static const char* LabelType(LLabel* label) {
   1065   if (label->is_loop_header()) return " (loop header)";
   1066   if (label->is_osr_entry()) return " (OSR entry)";
   1067   return "";
   1068 }
   1069 
   1070 
   1071 void LCodeGen::DoLabel(LLabel* label) {
   1072   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
   1073           current_instruction_,
   1074           label->hydrogen_value()->id(),
   1075           label->block_id(),
   1076           LabelType(label));
   1077   __ bind(label->label());
   1078   current_block_ = label->block_id();
   1079   DoGap(label);
   1080 }
   1081 
   1082 
   1083 void LCodeGen::DoParallelMove(LParallelMove* move) {
   1084   resolver_.Resolve(move);
   1085 }
   1086 
   1087 
   1088 void LCodeGen::DoGap(LGap* gap) {
   1089   for (int i = LGap::FIRST_INNER_POSITION;
   1090        i <= LGap::LAST_INNER_POSITION;
   1091        i++) {
   1092     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
   1093     LParallelMove* move = gap->GetParallelMove(inner_pos);
   1094     if (move != NULL) DoParallelMove(move);
   1095   }
   1096 }
   1097 
   1098 
   1099 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
   1100   DoGap(instr);
   1101 }
   1102 
   1103 
   1104 void LCodeGen::DoParameter(LParameter* instr) {
   1105   // Nothing to do.
   1106 }
   1107 
   1108 
   1109 void LCodeGen::DoCallStub(LCallStub* instr) {
   1110   DCHECK(ToRegister(instr->context()).is(cp));
   1111   DCHECK(ToRegister(instr->result()).is(r0));
   1112   switch (instr->hydrogen()->major_key()) {
   1113     case CodeStub::RegExpExec: {
   1114       RegExpExecStub stub(isolate());
   1115       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1116       break;
   1117     }
   1118     case CodeStub::SubString: {
   1119       SubStringStub stub(isolate());
   1120       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1121       break;
   1122     }
   1123     case CodeStub::StringCompare: {
   1124       StringCompareStub stub(isolate());
   1125       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1126       break;
   1127     }
   1128     default:
   1129       UNREACHABLE();
   1130   }
   1131 }
   1132 
   1133 
   1134 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   1135   GenerateOsrPrologue();
   1136 }
   1137 
   1138 
   1139 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   1140   Register dividend = ToRegister(instr->dividend());
   1141   int32_t divisor = instr->divisor();
   1142   DCHECK(dividend.is(ToRegister(instr->result())));
   1143 
   1144   // Theoretically, a variation of the branch-free code for integer division by
   1145   // a power of 2 (calculating the remainder via an additional multiplication
   1146   // (which gets simplified to an 'and') and subtraction) should be faster, and
   1147   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
   1148   // indicate that positive dividends are heavily favored, so the branching
   1149   // version performs better.
   1150   HMod* hmod = instr->hydrogen();
   1151   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1152   Label dividend_is_not_negative, done;
   1153   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
   1154     __ cmp(dividend, Operand::Zero());
   1155     __ b(pl, &dividend_is_not_negative);
   1156     // Note that this is correct even for kMinInt operands.
   1157     __ rsb(dividend, dividend, Operand::Zero());
   1158     __ and_(dividend, dividend, Operand(mask));
   1159     __ rsb(dividend, dividend, Operand::Zero(), SetCC);
   1160     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1161       DeoptimizeIf(eq, instr);
   1162     }
   1163     __ b(&done);
   1164   }
   1165 
   1166   __ bind(&dividend_is_not_negative);
   1167   __ and_(dividend, dividend, Operand(mask));
   1168   __ bind(&done);
   1169 }
   1170 
   1171 
   1172 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   1173   Register dividend = ToRegister(instr->dividend());
   1174   int32_t divisor = instr->divisor();
   1175   Register result = ToRegister(instr->result());
   1176   DCHECK(!dividend.is(result));
   1177 
   1178   if (divisor == 0) {
   1179     DeoptimizeIf(al, instr);
   1180     return;
   1181   }
   1182 
   1183   __ TruncatingDiv(result, dividend, Abs(divisor));
   1184   __ mov(ip, Operand(Abs(divisor)));
   1185   __ smull(result, ip, result, ip);
   1186   __ sub(result, dividend, result, SetCC);
   1187 
   1188   // Check for negative zero.
   1189   HMod* hmod = instr->hydrogen();
   1190   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1191     Label remainder_not_zero;
   1192     __ b(ne, &remainder_not_zero);
   1193     __ cmp(dividend, Operand::Zero());
   1194     DeoptimizeIf(lt, instr);
   1195     __ bind(&remainder_not_zero);
   1196   }
   1197 }
   1198 
   1199 
   1200 void LCodeGen::DoModI(LModI* instr) {
   1201   HMod* hmod = instr->hydrogen();
   1202   if (CpuFeatures::IsSupported(SUDIV)) {
   1203     CpuFeatureScope scope(masm(), SUDIV);
   1204 
   1205     Register left_reg = ToRegister(instr->left());
   1206     Register right_reg = ToRegister(instr->right());
   1207     Register result_reg = ToRegister(instr->result());
   1208 
   1209     Label done;
   1210     // Check for x % 0, sdiv might signal an exception. We have to deopt in this
   1211     // case because we can't return a NaN.
   1212     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1213       __ cmp(right_reg, Operand::Zero());
   1214       DeoptimizeIf(eq, instr);
   1215     }
   1216 
   1217     // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
   1218     // want. We have to deopt if we care about -0, because we can't return that.
   1219     if (hmod->CheckFlag(HValue::kCanOverflow)) {
   1220       Label no_overflow_possible;
   1221       __ cmp(left_reg, Operand(kMinInt));
   1222       __ b(ne, &no_overflow_possible);
   1223       __ cmp(right_reg, Operand(-1));
   1224       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1225         DeoptimizeIf(eq, instr);
   1226       } else {
   1227         __ b(ne, &no_overflow_possible);
   1228         __ mov(result_reg, Operand::Zero());
   1229         __ jmp(&done);
   1230       }
   1231       __ bind(&no_overflow_possible);
   1232     }
   1233 
   1234     // For 'r3 = r1 % r2' we can have the following ARM code:
   1235     //   sdiv r3, r1, r2
   1236     //   mls r3, r3, r2, r1
   1237 
   1238     __ sdiv(result_reg, left_reg, right_reg);
   1239     __ Mls(result_reg, result_reg, right_reg, left_reg);
   1240 
   1241     // If we care about -0, test if the dividend is <0 and the result is 0.
   1242     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1243       __ cmp(result_reg, Operand::Zero());
   1244       __ b(ne, &done);
   1245       __ cmp(left_reg, Operand::Zero());
   1246       DeoptimizeIf(lt, instr);
   1247     }
   1248     __ bind(&done);
   1249 
   1250   } else {
   1251     // General case, without any SDIV support.
   1252     Register left_reg = ToRegister(instr->left());
   1253     Register right_reg = ToRegister(instr->right());
   1254     Register result_reg = ToRegister(instr->result());
   1255     Register scratch = scratch0();
   1256     DCHECK(!scratch.is(left_reg));
   1257     DCHECK(!scratch.is(right_reg));
   1258     DCHECK(!scratch.is(result_reg));
   1259     DwVfpRegister dividend = ToDoubleRegister(instr->temp());
   1260     DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
   1261     DCHECK(!divisor.is(dividend));
   1262     LowDwVfpRegister quotient = double_scratch0();
   1263     DCHECK(!quotient.is(dividend));
   1264     DCHECK(!quotient.is(divisor));
   1265 
   1266     Label done;
   1267     // Check for x % 0, we have to deopt in this case because we can't return a
   1268     // NaN.
   1269     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1270       __ cmp(right_reg, Operand::Zero());
   1271       DeoptimizeIf(eq, instr);
   1272     }
   1273 
   1274     __ Move(result_reg, left_reg);
   1275     // Load the arguments in VFP registers. The divisor value is preloaded
   1276     // before. Be careful that 'right_reg' is only live on entry.
   1277     // TODO(svenpanne) The last comments seems to be wrong nowadays.
   1278     __ vmov(double_scratch0().low(), left_reg);
   1279     __ vcvt_f64_s32(dividend, double_scratch0().low());
   1280     __ vmov(double_scratch0().low(), right_reg);
   1281     __ vcvt_f64_s32(divisor, double_scratch0().low());
   1282 
   1283     // We do not care about the sign of the divisor. Note that we still handle
   1284     // the kMinInt % -1 case correctly, though.
   1285     __ vabs(divisor, divisor);
   1286     // Compute the quotient and round it to a 32bit integer.
   1287     __ vdiv(quotient, dividend, divisor);
   1288     __ vcvt_s32_f64(quotient.low(), quotient);
   1289     __ vcvt_f64_s32(quotient, quotient.low());
   1290 
   1291     // Compute the remainder in result.
   1292     __ vmul(double_scratch0(), divisor, quotient);
   1293     __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
   1294     __ vmov(scratch, double_scratch0().low());
   1295     __ sub(result_reg, left_reg, scratch, SetCC);
   1296 
   1297     // If we care about -0, test if the dividend is <0 and the result is 0.
   1298     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1299       __ b(ne, &done);
   1300       __ cmp(left_reg, Operand::Zero());
   1301       DeoptimizeIf(mi, instr);
   1302     }
   1303     __ bind(&done);
   1304   }
   1305 }
   1306 
   1307 
   1308 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1309   Register dividend = ToRegister(instr->dividend());
   1310   int32_t divisor = instr->divisor();
   1311   Register result = ToRegister(instr->result());
   1312   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   1313   DCHECK(!result.is(dividend));
   1314 
   1315   // Check for (0 / -x) that will produce negative zero.
   1316   HDiv* hdiv = instr->hydrogen();
   1317   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1318     __ cmp(dividend, Operand::Zero());
   1319     DeoptimizeIf(eq, instr);
   1320   }
   1321   // Check for (kMinInt / -1).
   1322   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1323     __ cmp(dividend, Operand(kMinInt));
   1324     DeoptimizeIf(eq, instr);
   1325   }
   1326   // Deoptimize if remainder will not be 0.
   1327   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   1328       divisor != 1 && divisor != -1) {
   1329     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1330     __ tst(dividend, Operand(mask));
   1331     DeoptimizeIf(ne, instr);
   1332   }
   1333 
   1334   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   1335     __ rsb(result, dividend, Operand(0));
   1336     return;
   1337   }
   1338   int32_t shift = WhichPowerOf2Abs(divisor);
   1339   if (shift == 0) {
   1340     __ mov(result, dividend);
   1341   } else if (shift == 1) {
   1342     __ add(result, dividend, Operand(dividend, LSR, 31));
   1343   } else {
   1344     __ mov(result, Operand(dividend, ASR, 31));
   1345     __ add(result, dividend, Operand(result, LSR, 32 - shift));
   1346   }
   1347   if (shift > 0) __ mov(result, Operand(result, ASR, shift));
   1348   if (divisor < 0) __ rsb(result, result, Operand(0));
   1349 }
   1350 
   1351 
   1352 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1353   Register dividend = ToRegister(instr->dividend());
   1354   int32_t divisor = instr->divisor();
   1355   Register result = ToRegister(instr->result());
   1356   DCHECK(!dividend.is(result));
   1357 
   1358   if (divisor == 0) {
   1359     DeoptimizeIf(al, instr);
   1360     return;
   1361   }
   1362 
   1363   // Check for (0 / -x) that will produce negative zero.
   1364   HDiv* hdiv = instr->hydrogen();
   1365   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1366     __ cmp(dividend, Operand::Zero());
   1367     DeoptimizeIf(eq, instr);
   1368   }
   1369 
   1370   __ TruncatingDiv(result, dividend, Abs(divisor));
   1371   if (divisor < 0) __ rsb(result, result, Operand::Zero());
   1372 
   1373   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1374     __ mov(ip, Operand(divisor));
   1375     __ smull(scratch0(), ip, result, ip);
   1376     __ sub(scratch0(), scratch0(), dividend, SetCC);
   1377     DeoptimizeIf(ne, instr);
   1378   }
   1379 }
   1380 
   1381 
   1382 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1383 void LCodeGen::DoDivI(LDivI* instr) {
   1384   HBinaryOperation* hdiv = instr->hydrogen();
   1385   Register dividend = ToRegister(instr->dividend());
   1386   Register divisor = ToRegister(instr->divisor());
   1387   Register result = ToRegister(instr->result());
   1388 
   1389   // Check for x / 0.
   1390   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1391     __ cmp(divisor, Operand::Zero());
   1392     DeoptimizeIf(eq, instr);
   1393   }
   1394 
   1395   // Check for (0 / -x) that will produce negative zero.
   1396   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1397     Label positive;
   1398     if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
   1399       // Do the test only if it hadn't be done above.
   1400       __ cmp(divisor, Operand::Zero());
   1401     }
   1402     __ b(pl, &positive);
   1403     __ cmp(dividend, Operand::Zero());
   1404     DeoptimizeIf(eq, instr);
   1405     __ bind(&positive);
   1406   }
   1407 
   1408   // Check for (kMinInt / -1).
   1409   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1410       (!CpuFeatures::IsSupported(SUDIV) ||
   1411        !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
   1412     // We don't need to check for overflow when truncating with sdiv
   1413     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
   1414     __ cmp(dividend, Operand(kMinInt));
   1415     __ cmp(divisor, Operand(-1), eq);
   1416     DeoptimizeIf(eq, instr);
   1417   }
   1418 
   1419   if (CpuFeatures::IsSupported(SUDIV)) {
   1420     CpuFeatureScope scope(masm(), SUDIV);
   1421     __ sdiv(result, dividend, divisor);
   1422   } else {
   1423     DoubleRegister vleft = ToDoubleRegister(instr->temp());
   1424     DoubleRegister vright = double_scratch0();
   1425     __ vmov(double_scratch0().low(), dividend);
   1426     __ vcvt_f64_s32(vleft, double_scratch0().low());
   1427     __ vmov(double_scratch0().low(), divisor);
   1428     __ vcvt_f64_s32(vright, double_scratch0().low());
   1429     __ vdiv(vleft, vleft, vright);  // vleft now contains the result.
   1430     __ vcvt_s32_f64(double_scratch0().low(), vleft);
   1431     __ vmov(result, double_scratch0().low());
   1432   }
   1433 
   1434   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1435     // Compute remainder and deopt if it's not zero.
   1436     Register remainder = scratch0();
   1437     __ Mls(remainder, result, divisor, dividend);
   1438     __ cmp(remainder, Operand::Zero());
   1439     DeoptimizeIf(ne, instr);
   1440   }
   1441 }
   1442 
   1443 
   1444 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1445   DwVfpRegister addend = ToDoubleRegister(instr->addend());
   1446   DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
   1447   DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1448 
   1449   // This is computed in-place.
   1450   DCHECK(addend.is(ToDoubleRegister(instr->result())));
   1451 
   1452   __ vmla(addend, multiplier, multiplicand);
   1453 }
   1454 
   1455 
   1456 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
   1457   DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
   1458   DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
   1459   DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1460 
   1461   // This is computed in-place.
   1462   DCHECK(minuend.is(ToDoubleRegister(instr->result())));
   1463 
   1464   __ vmls(minuend, multiplier, multiplicand);
   1465 }
   1466 
   1467 
   1468 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1469   Register dividend = ToRegister(instr->dividend());
   1470   Register result = ToRegister(instr->result());
   1471   int32_t divisor = instr->divisor();
   1472 
   1473   // If the divisor is 1, return the dividend.
   1474   if (divisor == 1) {
   1475     __ Move(result, dividend);
   1476     return;
   1477   }
   1478 
   1479   // If the divisor is positive, things are easy: There can be no deopts and we
   1480   // can simply do an arithmetic right shift.
   1481   int32_t shift = WhichPowerOf2Abs(divisor);
   1482   if (divisor > 1) {
   1483     __ mov(result, Operand(dividend, ASR, shift));
   1484     return;
   1485   }
   1486 
   1487   // If the divisor is negative, we have to negate and handle edge cases.
   1488   __ rsb(result, dividend, Operand::Zero(), SetCC);
   1489   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1490     DeoptimizeIf(eq, instr);
   1491   }
   1492 
   1493   // Dividing by -1 is basically negation, unless we overflow.
   1494   if (divisor == -1) {
   1495     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1496       DeoptimizeIf(vs, instr);
   1497     }
   1498     return;
   1499   }
   1500 
   1501   // If the negation could not overflow, simply shifting is OK.
   1502   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1503     __ mov(result, Operand(result, ASR, shift));
   1504     return;
   1505   }
   1506 
   1507   __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
   1508   __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
   1509 }
   1510 
   1511 
   1512 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1513   Register dividend = ToRegister(instr->dividend());
   1514   int32_t divisor = instr->divisor();
   1515   Register result = ToRegister(instr->result());
   1516   DCHECK(!dividend.is(result));
   1517 
   1518   if (divisor == 0) {
   1519     DeoptimizeIf(al, instr);
   1520     return;
   1521   }
   1522 
   1523   // Check for (0 / -x) that will produce negative zero.
   1524   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1525   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1526     __ cmp(dividend, Operand::Zero());
   1527     DeoptimizeIf(eq, instr);
   1528   }
   1529 
   1530   // Easy case: We need no dynamic check for the dividend and the flooring
   1531   // division is the same as the truncating division.
   1532   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1533       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1534     __ TruncatingDiv(result, dividend, Abs(divisor));
   1535     if (divisor < 0) __ rsb(result, result, Operand::Zero());
   1536     return;
   1537   }
   1538 
   1539   // In the general case we may need to adjust before and after the truncating
   1540   // division to get a flooring division.
   1541   Register temp = ToRegister(instr->temp());
   1542   DCHECK(!temp.is(dividend) && !temp.is(result));
   1543   Label needs_adjustment, done;
   1544   __ cmp(dividend, Operand::Zero());
   1545   __ b(divisor > 0 ? lt : gt, &needs_adjustment);
   1546   __ TruncatingDiv(result, dividend, Abs(divisor));
   1547   if (divisor < 0) __ rsb(result, result, Operand::Zero());
   1548   __ jmp(&done);
   1549   __ bind(&needs_adjustment);
   1550   __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   1551   __ TruncatingDiv(result, temp, Abs(divisor));
   1552   if (divisor < 0) __ rsb(result, result, Operand::Zero());
   1553   __ sub(result, result, Operand(1));
   1554   __ bind(&done);
   1555 }
   1556 
   1557 
   1558 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1559 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1560   HBinaryOperation* hdiv = instr->hydrogen();
   1561   Register left = ToRegister(instr->dividend());
   1562   Register right = ToRegister(instr->divisor());
   1563   Register result = ToRegister(instr->result());
   1564 
   1565   // Check for x / 0.
   1566   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1567     __ cmp(right, Operand::Zero());
   1568     DeoptimizeIf(eq, instr);
   1569   }
   1570 
   1571   // Check for (0 / -x) that will produce negative zero.
   1572   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1573     Label positive;
   1574     if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
   1575       // Do the test only if it hadn't be done above.
   1576       __ cmp(right, Operand::Zero());
   1577     }
   1578     __ b(pl, &positive);
   1579     __ cmp(left, Operand::Zero());
   1580     DeoptimizeIf(eq, instr);
   1581     __ bind(&positive);
   1582   }
   1583 
   1584   // Check for (kMinInt / -1).
   1585   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1586       (!CpuFeatures::IsSupported(SUDIV) ||
   1587        !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
   1588     // We don't need to check for overflow when truncating with sdiv
   1589     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
   1590     __ cmp(left, Operand(kMinInt));
   1591     __ cmp(right, Operand(-1), eq);
   1592     DeoptimizeIf(eq, instr);
   1593   }
   1594 
   1595   if (CpuFeatures::IsSupported(SUDIV)) {
   1596     CpuFeatureScope scope(masm(), SUDIV);
   1597     __ sdiv(result, left, right);
   1598   } else {
   1599     DoubleRegister vleft = ToDoubleRegister(instr->temp());
   1600     DoubleRegister vright = double_scratch0();
   1601     __ vmov(double_scratch0().low(), left);
   1602     __ vcvt_f64_s32(vleft, double_scratch0().low());
   1603     __ vmov(double_scratch0().low(), right);
   1604     __ vcvt_f64_s32(vright, double_scratch0().low());
   1605     __ vdiv(vleft, vleft, vright);  // vleft now contains the result.
   1606     __ vcvt_s32_f64(double_scratch0().low(), vleft);
   1607     __ vmov(result, double_scratch0().low());
   1608   }
   1609 
   1610   Label done;
   1611   Register remainder = scratch0();
   1612   __ Mls(remainder, result, right, left);
   1613   __ cmp(remainder, Operand::Zero());
   1614   __ b(eq, &done);
   1615   __ eor(remainder, remainder, Operand(right));
   1616   __ add(result, result, Operand(remainder, ASR, 31));
   1617   __ bind(&done);
   1618 }
   1619 
   1620 
   1621 void LCodeGen::DoMulI(LMulI* instr) {
   1622   Register result = ToRegister(instr->result());
   1623   // Note that result may alias left.
   1624   Register left = ToRegister(instr->left());
   1625   LOperand* right_op = instr->right();
   1626 
   1627   bool bailout_on_minus_zero =
   1628     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1629   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1630 
   1631   if (right_op->IsConstantOperand()) {
   1632     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1633 
   1634     if (bailout_on_minus_zero && (constant < 0)) {
   1635       // The case of a null constant will be handled separately.
   1636       // If constant is negative and left is null, the result should be -0.
   1637       __ cmp(left, Operand::Zero());
   1638       DeoptimizeIf(eq, instr);
   1639     }
   1640 
   1641     switch (constant) {
   1642       case -1:
   1643         if (overflow) {
   1644           __ rsb(result, left, Operand::Zero(), SetCC);
   1645           DeoptimizeIf(vs, instr);
   1646         } else {
   1647           __ rsb(result, left, Operand::Zero());
   1648         }
   1649         break;
   1650       case 0:
   1651         if (bailout_on_minus_zero) {
   1652           // If left is strictly negative and the constant is null, the
   1653           // result is -0. Deoptimize if required, otherwise return 0.
   1654           __ cmp(left, Operand::Zero());
   1655           DeoptimizeIf(mi, instr);
   1656         }
   1657         __ mov(result, Operand::Zero());
   1658         break;
   1659       case 1:
   1660         __ Move(result, left);
   1661         break;
   1662       default:
   1663         // Multiplying by powers of two and powers of two plus or minus
   1664         // one can be done faster with shifted operands.
   1665         // For other constants we emit standard code.
   1666         int32_t mask = constant >> 31;
   1667         uint32_t constant_abs = (constant + mask) ^ mask;
   1668 
   1669         if (base::bits::IsPowerOfTwo32(constant_abs)) {
   1670           int32_t shift = WhichPowerOf2(constant_abs);
   1671           __ mov(result, Operand(left, LSL, shift));
   1672           // Correct the sign of the result is the constant is negative.
   1673           if (constant < 0)  __ rsb(result, result, Operand::Zero());
   1674         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
   1675           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1676           __ add(result, left, Operand(left, LSL, shift));
   1677           // Correct the sign of the result is the constant is negative.
   1678           if (constant < 0)  __ rsb(result, result, Operand::Zero());
   1679         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
   1680           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1681           __ rsb(result, left, Operand(left, LSL, shift));
   1682           // Correct the sign of the result is the constant is negative.
   1683           if (constant < 0)  __ rsb(result, result, Operand::Zero());
   1684         } else {
   1685           // Generate standard code.
   1686           __ mov(ip, Operand(constant));
   1687           __ mul(result, left, ip);
   1688         }
   1689     }
   1690 
   1691   } else {
   1692     DCHECK(right_op->IsRegister());
   1693     Register right = ToRegister(right_op);
   1694 
   1695     if (overflow) {
   1696       Register scratch = scratch0();
   1697       // scratch:result = left * right.
   1698       if (instr->hydrogen()->representation().IsSmi()) {
   1699         __ SmiUntag(result, left);
   1700         __ smull(result, scratch, result, right);
   1701       } else {
   1702         __ smull(result, scratch, left, right);
   1703       }
   1704       __ cmp(scratch, Operand(result, ASR, 31));
   1705       DeoptimizeIf(ne, instr);
   1706     } else {
   1707       if (instr->hydrogen()->representation().IsSmi()) {
   1708         __ SmiUntag(result, left);
   1709         __ mul(result, result, right);
   1710       } else {
   1711         __ mul(result, left, right);
   1712       }
   1713     }
   1714 
   1715     if (bailout_on_minus_zero) {
   1716       Label done;
   1717       __ teq(left, Operand(right));
   1718       __ b(pl, &done);
   1719       // Bail out if the result is minus zero.
   1720       __ cmp(result, Operand::Zero());
   1721       DeoptimizeIf(eq, instr);
   1722       __ bind(&done);
   1723     }
   1724   }
   1725 }
   1726 
   1727 
   1728 void LCodeGen::DoBitI(LBitI* instr) {
   1729   LOperand* left_op = instr->left();
   1730   LOperand* right_op = instr->right();
   1731   DCHECK(left_op->IsRegister());
   1732   Register left = ToRegister(left_op);
   1733   Register result = ToRegister(instr->result());
   1734   Operand right(no_reg);
   1735 
   1736   if (right_op->IsStackSlot()) {
   1737     right = Operand(EmitLoadRegister(right_op, ip));
   1738   } else {
   1739     DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
   1740     right = ToOperand(right_op);
   1741   }
   1742 
   1743   switch (instr->op()) {
   1744     case Token::BIT_AND:
   1745       __ and_(result, left, right);
   1746       break;
   1747     case Token::BIT_OR:
   1748       __ orr(result, left, right);
   1749       break;
   1750     case Token::BIT_XOR:
   1751       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
   1752         __ mvn(result, Operand(left));
   1753       } else {
   1754         __ eor(result, left, right);
   1755       }
   1756       break;
   1757     default:
   1758       UNREACHABLE();
   1759       break;
   1760   }
   1761 }
   1762 
   1763 
   1764 void LCodeGen::DoShiftI(LShiftI* instr) {
   1765   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1766   // result may alias either of them.
   1767   LOperand* right_op = instr->right();
   1768   Register left = ToRegister(instr->left());
   1769   Register result = ToRegister(instr->result());
   1770   Register scratch = scratch0();
   1771   if (right_op->IsRegister()) {
   1772     // Mask the right_op operand.
   1773     __ and_(scratch, ToRegister(right_op), Operand(0x1F));
   1774     switch (instr->op()) {
   1775       case Token::ROR:
   1776         __ mov(result, Operand(left, ROR, scratch));
   1777         break;
   1778       case Token::SAR:
   1779         __ mov(result, Operand(left, ASR, scratch));
   1780         break;
   1781       case Token::SHR:
   1782         if (instr->can_deopt()) {
   1783           __ mov(result, Operand(left, LSR, scratch), SetCC);
   1784           DeoptimizeIf(mi, instr);
   1785         } else {
   1786           __ mov(result, Operand(left, LSR, scratch));
   1787         }
   1788         break;
   1789       case Token::SHL:
   1790         __ mov(result, Operand(left, LSL, scratch));
   1791         break;
   1792       default:
   1793         UNREACHABLE();
   1794         break;
   1795     }
   1796   } else {
   1797     // Mask the right_op operand.
   1798     int value = ToInteger32(LConstantOperand::cast(right_op));
   1799     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1800     switch (instr->op()) {
   1801       case Token::ROR:
   1802           if (shift_count != 0) {
   1803           __ mov(result, Operand(left, ROR, shift_count));
   1804         } else {
   1805           __ Move(result, left);
   1806         }
   1807         break;
   1808       case Token::SAR:
   1809         if (shift_count != 0) {
   1810           __ mov(result, Operand(left, ASR, shift_count));
   1811         } else {
   1812           __ Move(result, left);
   1813         }
   1814         break;
   1815       case Token::SHR:
   1816         if (shift_count != 0) {
   1817           __ mov(result, Operand(left, LSR, shift_count));
   1818         } else {
   1819           if (instr->can_deopt()) {
   1820             __ tst(left, Operand(0x80000000));
   1821             DeoptimizeIf(ne, instr);
   1822           }
   1823           __ Move(result, left);
   1824         }
   1825         break;
   1826       case Token::SHL:
   1827         if (shift_count != 0) {
   1828           if (instr->hydrogen_value()->representation().IsSmi() &&
   1829               instr->can_deopt()) {
   1830             if (shift_count != 1) {
   1831               __ mov(result, Operand(left, LSL, shift_count - 1));
   1832               __ SmiTag(result, result, SetCC);
   1833             } else {
   1834               __ SmiTag(result, left, SetCC);
   1835             }
   1836             DeoptimizeIf(vs, instr);
   1837           } else {
   1838             __ mov(result, Operand(left, LSL, shift_count));
   1839           }
   1840         } else {
   1841           __ Move(result, left);
   1842         }
   1843         break;
   1844       default:
   1845         UNREACHABLE();
   1846         break;
   1847     }
   1848   }
   1849 }
   1850 
   1851 
   1852 void LCodeGen::DoSubI(LSubI* instr) {
   1853   LOperand* left = instr->left();
   1854   LOperand* right = instr->right();
   1855   LOperand* result = instr->result();
   1856   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1857   SBit set_cond = can_overflow ? SetCC : LeaveCC;
   1858 
   1859   if (right->IsStackSlot()) {
   1860     Register right_reg = EmitLoadRegister(right, ip);
   1861     __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   1862   } else {
   1863     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1864     __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   1865   }
   1866 
   1867   if (can_overflow) {
   1868     DeoptimizeIf(vs, instr);
   1869   }
   1870 }
   1871 
   1872 
   1873 void LCodeGen::DoRSubI(LRSubI* instr) {
   1874   LOperand* left = instr->left();
   1875   LOperand* right = instr->right();
   1876   LOperand* result = instr->result();
   1877   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1878   SBit set_cond = can_overflow ? SetCC : LeaveCC;
   1879 
   1880   if (right->IsStackSlot()) {
   1881     Register right_reg = EmitLoadRegister(right, ip);
   1882     __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   1883   } else {
   1884     DCHECK(right->IsRegister() || right->IsConstantOperand());
   1885     __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   1886   }
   1887 
   1888   if (can_overflow) {
   1889     DeoptimizeIf(vs, instr);
   1890   }
   1891 }
   1892 
   1893 
   1894 void LCodeGen::DoConstantI(LConstantI* instr) {
   1895   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1896 }
   1897 
   1898 
   1899 void LCodeGen::DoConstantS(LConstantS* instr) {
   1900   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1901 }
   1902 
   1903 
   1904 void LCodeGen::DoConstantD(LConstantD* instr) {
   1905   DCHECK(instr->result()->IsDoubleRegister());
   1906   DwVfpRegister result = ToDoubleRegister(instr->result());
   1907   double v = instr->value();
   1908   __ Vmov(result, v, scratch0());
   1909 }
   1910 
   1911 
   1912 void LCodeGen::DoConstantE(LConstantE* instr) {
   1913   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1914 }
   1915 
   1916 
   1917 void LCodeGen::DoConstantT(LConstantT* instr) {
   1918   Handle<Object> object = instr->value(isolate());
   1919   AllowDeferredHandleDereference smi_check;
   1920   __ Move(ToRegister(instr->result()), object);
   1921 }
   1922 
   1923 
   1924 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1925   Register result = ToRegister(instr->result());
   1926   Register map = ToRegister(instr->value());
   1927   __ EnumLength(result, map);
   1928 }
   1929 
   1930 
   1931 void LCodeGen::DoDateField(LDateField* instr) {
   1932   Register object = ToRegister(instr->date());
   1933   Register result = ToRegister(instr->result());
   1934   Register scratch = ToRegister(instr->temp());
   1935   Smi* index = instr->index();
   1936   Label runtime, done;
   1937   DCHECK(object.is(result));
   1938   DCHECK(object.is(r0));
   1939   DCHECK(!scratch.is(scratch0()));
   1940   DCHECK(!scratch.is(object));
   1941 
   1942   __ SmiTst(object);
   1943   DeoptimizeIf(eq, instr);
   1944   __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
   1945   DeoptimizeIf(ne, instr);
   1946 
   1947   if (index->value() == 0) {
   1948     __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
   1949   } else {
   1950     if (index->value() < JSDate::kFirstUncachedField) {
   1951       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
   1952       __ mov(scratch, Operand(stamp));
   1953       __ ldr(scratch, MemOperand(scratch));
   1954       __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
   1955       __ cmp(scratch, scratch0());
   1956       __ b(ne, &runtime);
   1957       __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
   1958                                              kPointerSize * index->value()));
   1959       __ jmp(&done);
   1960     }
   1961     __ bind(&runtime);
   1962     __ PrepareCallCFunction(2, scratch);
   1963     __ mov(r1, Operand(index));
   1964     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
   1965     __ bind(&done);
   1966   }
   1967 }
   1968 
   1969 
   1970 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
   1971                                            LOperand* index,
   1972                                            String::Encoding encoding) {
   1973   if (index->IsConstantOperand()) {
   1974     int offset = ToInteger32(LConstantOperand::cast(index));
   1975     if (encoding == String::TWO_BYTE_ENCODING) {
   1976       offset *= kUC16Size;
   1977     }
   1978     STATIC_ASSERT(kCharSize == 1);
   1979     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1980   }
   1981   Register scratch = scratch0();
   1982   DCHECK(!scratch.is(string));
   1983   DCHECK(!scratch.is(ToRegister(index)));
   1984   if (encoding == String::ONE_BYTE_ENCODING) {
   1985     __ add(scratch, string, Operand(ToRegister(index)));
   1986   } else {
   1987     STATIC_ASSERT(kUC16Size == 2);
   1988     __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
   1989   }
   1990   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1991 }
   1992 
   1993 
   1994 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1995   String::Encoding encoding = instr->hydrogen()->encoding();
   1996   Register string = ToRegister(instr->string());
   1997   Register result = ToRegister(instr->result());
   1998 
   1999   if (FLAG_debug_code) {
   2000     Register scratch = scratch0();
   2001     __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   2002     __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   2003 
   2004     __ and_(scratch, scratch,
   2005             Operand(kStringRepresentationMask | kStringEncodingMask));
   2006     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   2007     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   2008     __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
   2009                             ? one_byte_seq_type : two_byte_seq_type));
   2010     __ Check(eq, kUnexpectedStringType);
   2011   }
   2012 
   2013   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   2014   if (encoding == String::ONE_BYTE_ENCODING) {
   2015     __ ldrb(result, operand);
   2016   } else {
   2017     __ ldrh(result, operand);
   2018   }
   2019 }
   2020 
   2021 
   2022 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   2023   String::Encoding encoding = instr->hydrogen()->encoding();
   2024   Register string = ToRegister(instr->string());
   2025   Register value = ToRegister(instr->value());
   2026 
   2027   if (FLAG_debug_code) {
   2028     Register index = ToRegister(instr->index());
   2029     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   2030     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   2031     int encoding_mask =
   2032         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   2033         ? one_byte_seq_type : two_byte_seq_type;
   2034     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
   2035   }
   2036 
   2037   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   2038   if (encoding == String::ONE_BYTE_ENCODING) {
   2039     __ strb(value, operand);
   2040   } else {
   2041     __ strh(value, operand);
   2042   }
   2043 }
   2044 
   2045 
   2046 void LCodeGen::DoAddI(LAddI* instr) {
   2047   LOperand* left = instr->left();
   2048   LOperand* right = instr->right();
   2049   LOperand* result = instr->result();
   2050   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   2051   SBit set_cond = can_overflow ? SetCC : LeaveCC;
   2052 
   2053   if (right->IsStackSlot()) {
   2054     Register right_reg = EmitLoadRegister(right, ip);
   2055     __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   2056   } else {
   2057     DCHECK(right->IsRegister() || right->IsConstantOperand());
   2058     __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   2059   }
   2060 
   2061   if (can_overflow) {
   2062     DeoptimizeIf(vs, instr);
   2063   }
   2064 }
   2065 
   2066 
   2067 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   2068   LOperand* left = instr->left();
   2069   LOperand* right = instr->right();
   2070   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   2071   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   2072     Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
   2073     Register left_reg = ToRegister(left);
   2074     Operand right_op = (right->IsRegister() || right->IsConstantOperand())
   2075         ? ToOperand(right)
   2076         : Operand(EmitLoadRegister(right, ip));
   2077     Register result_reg = ToRegister(instr->result());
   2078     __ cmp(left_reg, right_op);
   2079     __ Move(result_reg, left_reg, condition);
   2080     __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
   2081   } else {
   2082     DCHECK(instr->hydrogen()->representation().IsDouble());
   2083     DwVfpRegister left_reg = ToDoubleRegister(left);
   2084     DwVfpRegister right_reg = ToDoubleRegister(right);
   2085     DwVfpRegister result_reg = ToDoubleRegister(instr->result());
   2086     Label result_is_nan, return_left, return_right, check_zero, done;
   2087     __ VFPCompareAndSetFlags(left_reg, right_reg);
   2088     if (operation == HMathMinMax::kMathMin) {
   2089       __ b(mi, &return_left);
   2090       __ b(gt, &return_right);
   2091     } else {
   2092       __ b(mi, &return_right);
   2093       __ b(gt, &return_left);
   2094     }
   2095     __ b(vs, &result_is_nan);
   2096     // Left equals right => check for -0.
   2097     __ VFPCompareAndSetFlags(left_reg, 0.0);
   2098     if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
   2099       __ b(ne, &done);  // left == right != 0.
   2100     } else {
   2101       __ b(ne, &return_left);  // left == right != 0.
   2102     }
   2103     // At this point, both left and right are either 0 or -0.
   2104     if (operation == HMathMinMax::kMathMin) {
   2105       // We could use a single 'vorr' instruction here if we had NEON support.
   2106       __ vneg(left_reg, left_reg);
   2107       __ vsub(result_reg, left_reg, right_reg);
   2108       __ vneg(result_reg, result_reg);
   2109     } else {
   2110       // Since we operate on +0 and/or -0, vadd and vand have the same effect;
   2111       // the decision for vadd is easy because vand is a NEON instruction.
   2112       __ vadd(result_reg, left_reg, right_reg);
   2113     }
   2114     __ b(&done);
   2115 
   2116     __ bind(&result_is_nan);
   2117     __ vadd(result_reg, left_reg, right_reg);
   2118     __ b(&done);
   2119 
   2120     __ bind(&return_right);
   2121     __ Move(result_reg, right_reg);
   2122     if (!left_reg.is(result_reg)) {
   2123       __ b(&done);
   2124     }
   2125 
   2126     __ bind(&return_left);
   2127     __ Move(result_reg, left_reg);
   2128 
   2129     __ bind(&done);
   2130   }
   2131 }
   2132 
   2133 
   2134 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   2135   DwVfpRegister left = ToDoubleRegister(instr->left());
   2136   DwVfpRegister right = ToDoubleRegister(instr->right());
   2137   DwVfpRegister result = ToDoubleRegister(instr->result());
   2138   switch (instr->op()) {
   2139     case Token::ADD:
   2140       __ vadd(result, left, right);
   2141       break;
   2142     case Token::SUB:
   2143       __ vsub(result, left, right);
   2144       break;
   2145     case Token::MUL:
   2146       __ vmul(result, left, right);
   2147       break;
   2148     case Token::DIV:
   2149       __ vdiv(result, left, right);
   2150       break;
   2151     case Token::MOD: {
   2152       __ PrepareCallCFunction(0, 2, scratch0());
   2153       __ MovToFloatParameters(left, right);
   2154       __ CallCFunction(
   2155           ExternalReference::mod_two_doubles_operation(isolate()),
   2156           0, 2);
   2157       // Move the result in the double result register.
   2158       __ MovFromFloatResult(result);
   2159       break;
   2160     }
   2161     default:
   2162       UNREACHABLE();
   2163       break;
   2164   }
   2165 }
   2166 
   2167 
   2168 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   2169   DCHECK(ToRegister(instr->context()).is(cp));
   2170   DCHECK(ToRegister(instr->left()).is(r1));
   2171   DCHECK(ToRegister(instr->right()).is(r0));
   2172   DCHECK(ToRegister(instr->result()).is(r0));
   2173 
   2174   Handle<Code> code =
   2175       CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
   2176   // Block literal pool emission to ensure nop indicating no inlined smi code
   2177   // is in the correct position.
   2178   Assembler::BlockConstPoolScope block_const_pool(masm());
   2179   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2180 }
   2181 
   2182 
   2183 template<class InstrType>
   2184 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
   2185   int left_block = instr->TrueDestination(chunk_);
   2186   int right_block = instr->FalseDestination(chunk_);
   2187 
   2188   int next_block = GetNextEmittedBlock();
   2189 
   2190   if (right_block == left_block || condition == al) {
   2191     EmitGoto(left_block);
   2192   } else if (left_block == next_block) {
   2193     __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
   2194   } else if (right_block == next_block) {
   2195     __ b(condition, chunk_->GetAssemblyLabel(left_block));
   2196   } else {
   2197     __ b(condition, chunk_->GetAssemblyLabel(left_block));
   2198     __ b(chunk_->GetAssemblyLabel(right_block));
   2199   }
   2200 }
   2201 
   2202 
   2203 template<class InstrType>
   2204 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
   2205   int false_block = instr->FalseDestination(chunk_);
   2206   __ b(condition, chunk_->GetAssemblyLabel(false_block));
   2207 }
   2208 
   2209 
   2210 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   2211   __ stop("LBreak");
   2212 }
   2213 
   2214 
   2215 void LCodeGen::DoBranch(LBranch* instr) {
   2216   Representation r = instr->hydrogen()->value()->representation();
   2217   if (r.IsInteger32() || r.IsSmi()) {
   2218     DCHECK(!info()->IsStub());
   2219     Register reg = ToRegister(instr->value());
   2220     __ cmp(reg, Operand::Zero());
   2221     EmitBranch(instr, ne);
   2222   } else if (r.IsDouble()) {
   2223     DCHECK(!info()->IsStub());
   2224     DwVfpRegister reg = ToDoubleRegister(instr->value());
   2225     // Test the double value. Zero and NaN are false.
   2226     __ VFPCompareAndSetFlags(reg, 0.0);
   2227     __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN -> false)
   2228     EmitBranch(instr, ne);
   2229   } else {
   2230     DCHECK(r.IsTagged());
   2231     Register reg = ToRegister(instr->value());
   2232     HType type = instr->hydrogen()->value()->type();
   2233     if (type.IsBoolean()) {
   2234       DCHECK(!info()->IsStub());
   2235       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2236       EmitBranch(instr, eq);
   2237     } else if (type.IsSmi()) {
   2238       DCHECK(!info()->IsStub());
   2239       __ cmp(reg, Operand::Zero());
   2240       EmitBranch(instr, ne);
   2241     } else if (type.IsJSArray()) {
   2242       DCHECK(!info()->IsStub());
   2243       EmitBranch(instr, al);
   2244     } else if (type.IsHeapNumber()) {
   2245       DCHECK(!info()->IsStub());
   2246       DwVfpRegister dbl_scratch = double_scratch0();
   2247       __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2248       // Test the double value. Zero and NaN are false.
   2249       __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
   2250       __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN)
   2251       EmitBranch(instr, ne);
   2252     } else if (type.IsString()) {
   2253       DCHECK(!info()->IsStub());
   2254       __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
   2255       __ cmp(ip, Operand::Zero());
   2256       EmitBranch(instr, ne);
   2257     } else {
   2258       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   2259       // Avoid deopts in the case where we've never executed this path before.
   2260       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   2261 
   2262       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   2263         // undefined -> false.
   2264         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
   2265         __ b(eq, instr->FalseLabel(chunk_));
   2266       }
   2267       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   2268         // Boolean -> its value.
   2269         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2270         __ b(eq, instr->TrueLabel(chunk_));
   2271         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
   2272         __ b(eq, instr->FalseLabel(chunk_));
   2273       }
   2274       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   2275         // 'null' -> false.
   2276         __ CompareRoot(reg, Heap::kNullValueRootIndex);
   2277         __ b(eq, instr->FalseLabel(chunk_));
   2278       }
   2279 
   2280       if (expected.Contains(ToBooleanStub::SMI)) {
   2281         // Smis: 0 -> false, all other -> true.
   2282         __ cmp(reg, Operand::Zero());
   2283         __ b(eq, instr->FalseLabel(chunk_));
   2284         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2285       } else if (expected.NeedsMap()) {
   2286         // If we need a map later and have a Smi -> deopt.
   2287         __ SmiTst(reg);
   2288         DeoptimizeIf(eq, instr);
   2289       }
   2290 
   2291       const Register map = scratch0();
   2292       if (expected.NeedsMap()) {
   2293         __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2294 
   2295         if (expected.CanBeUndetectable()) {
   2296           // Undetectable -> false.
   2297           __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
   2298           __ tst(ip, Operand(1 << Map::kIsUndetectable));
   2299           __ b(ne, instr->FalseLabel(chunk_));
   2300         }
   2301       }
   2302 
   2303       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2304         // spec object -> true.
   2305         __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
   2306         __ b(ge, instr->TrueLabel(chunk_));
   2307       }
   2308 
   2309       if (expected.Contains(ToBooleanStub::STRING)) {
   2310         // String value -> false iff empty.
   2311         Label not_string;
   2312         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
   2313         __ b(ge, &not_string);
   2314         __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
   2315         __ cmp(ip, Operand::Zero());
   2316         __ b(ne, instr->TrueLabel(chunk_));
   2317         __ b(instr->FalseLabel(chunk_));
   2318         __ bind(&not_string);
   2319       }
   2320 
   2321       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2322         // Symbol value -> true.
   2323         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
   2324         __ b(eq, instr->TrueLabel(chunk_));
   2325       }
   2326 
   2327       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2328         // heap number -> false iff +0, -0, or NaN.
   2329         DwVfpRegister dbl_scratch = double_scratch0();
   2330         Label not_heap_number;
   2331         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   2332         __ b(ne, &not_heap_number);
   2333         __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2334         __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
   2335         __ cmp(r0, r0, vs);  // NaN -> false.
   2336         __ b(eq, instr->FalseLabel(chunk_));  // +0, -0 -> false.
   2337         __ b(instr->TrueLabel(chunk_));
   2338         __ bind(&not_heap_number);
   2339       }
   2340 
   2341       if (!expected.IsGeneric()) {
   2342         // We've seen something for the first time -> deopt.
   2343         // This can only happen if we are not generic already.
   2344         DeoptimizeIf(al, instr);
   2345       }
   2346     }
   2347   }
   2348 }
   2349 
   2350 
   2351 void LCodeGen::EmitGoto(int block) {
   2352   if (!IsNextEmittedBlock(block)) {
   2353     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2354   }
   2355 }
   2356 
   2357 
   2358 void LCodeGen::DoGoto(LGoto* instr) {
   2359   EmitGoto(instr->block_id());
   2360 }
   2361 
   2362 
   2363 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2364   Condition cond = kNoCondition;
   2365   switch (op) {
   2366     case Token::EQ:
   2367     case Token::EQ_STRICT:
   2368       cond = eq;
   2369       break;
   2370     case Token::NE:
   2371     case Token::NE_STRICT:
   2372       cond = ne;
   2373       break;
   2374     case Token::LT:
   2375       cond = is_unsigned ? lo : lt;
   2376       break;
   2377     case Token::GT:
   2378       cond = is_unsigned ? hi : gt;
   2379       break;
   2380     case Token::LTE:
   2381       cond = is_unsigned ? ls : le;
   2382       break;
   2383     case Token::GTE:
   2384       cond = is_unsigned ? hs : ge;
   2385       break;
   2386     case Token::IN:
   2387     case Token::INSTANCEOF:
   2388     default:
   2389       UNREACHABLE();
   2390   }
   2391   return cond;
   2392 }
   2393 
   2394 
   2395 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2396   LOperand* left = instr->left();
   2397   LOperand* right = instr->right();
   2398   bool is_unsigned =
   2399       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2400       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2401   Condition cond = TokenToCondition(instr->op(), is_unsigned);
   2402 
   2403   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2404     // We can statically evaluate the comparison.
   2405     double left_val = ToDouble(LConstantOperand::cast(left));
   2406     double right_val = ToDouble(LConstantOperand::cast(right));
   2407     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2408         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2409     EmitGoto(next_block);
   2410   } else {
   2411     if (instr->is_double()) {
   2412       // Compare left and right operands as doubles and load the
   2413       // resulting flags into the normal status register.
   2414       __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
   2415       // If a NaN is involved, i.e. the result is unordered (V set),
   2416       // jump to false block label.
   2417       __ b(vs, instr->FalseLabel(chunk_));
   2418     } else {
   2419       if (right->IsConstantOperand()) {
   2420         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2421         if (instr->hydrogen_value()->representation().IsSmi()) {
   2422           __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
   2423         } else {
   2424           __ cmp(ToRegister(left), Operand(value));
   2425         }
   2426       } else if (left->IsConstantOperand()) {
   2427         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2428         if (instr->hydrogen_value()->representation().IsSmi()) {
   2429           __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
   2430         } else {
   2431           __ cmp(ToRegister(right), Operand(value));
   2432         }
   2433         // We commuted the operands, so commute the condition.
   2434         cond = CommuteCondition(cond);
   2435       } else {
   2436         __ cmp(ToRegister(left), ToRegister(right));
   2437       }
   2438     }
   2439     EmitBranch(instr, cond);
   2440   }
   2441 }
   2442 
   2443 
   2444 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2445   Register left = ToRegister(instr->left());
   2446   Register right = ToRegister(instr->right());
   2447 
   2448   __ cmp(left, Operand(right));
   2449   EmitBranch(instr, eq);
   2450 }
   2451 
   2452 
   2453 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2454   if (instr->hydrogen()->representation().IsTagged()) {
   2455     Register input_reg = ToRegister(instr->object());
   2456     __ mov(ip, Operand(factory()->the_hole_value()));
   2457     __ cmp(input_reg, ip);
   2458     EmitBranch(instr, eq);
   2459     return;
   2460   }
   2461 
   2462   DwVfpRegister input_reg = ToDoubleRegister(instr->object());
   2463   __ VFPCompareAndSetFlags(input_reg, input_reg);
   2464   EmitFalseBranch(instr, vc);
   2465 
   2466   Register scratch = scratch0();
   2467   __ VmovHigh(scratch, input_reg);
   2468   __ cmp(scratch, Operand(kHoleNanUpper32));
   2469   EmitBranch(instr, eq);
   2470 }
   2471 
   2472 
   2473 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   2474   Representation rep = instr->hydrogen()->value()->representation();
   2475   DCHECK(!rep.IsInteger32());
   2476   Register scratch = ToRegister(instr->temp());
   2477 
   2478   if (rep.IsDouble()) {
   2479     DwVfpRegister value = ToDoubleRegister(instr->value());
   2480     __ VFPCompareAndSetFlags(value, 0.0);
   2481     EmitFalseBranch(instr, ne);
   2482     __ VmovHigh(scratch, value);
   2483     __ cmp(scratch, Operand(0x80000000));
   2484   } else {
   2485     Register value = ToRegister(instr->value());
   2486     __ CheckMap(value,
   2487                 scratch,
   2488                 Heap::kHeapNumberMapRootIndex,
   2489                 instr->FalseLabel(chunk()),
   2490                 DO_SMI_CHECK);
   2491     __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
   2492     __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
   2493     __ cmp(scratch, Operand(0x80000000));
   2494     __ cmp(ip, Operand(0x00000000), eq);
   2495   }
   2496   EmitBranch(instr, eq);
   2497 }
   2498 
   2499 
   2500 Condition LCodeGen::EmitIsObject(Register input,
   2501                                  Register temp1,
   2502                                  Label* is_not_object,
   2503                                  Label* is_object) {
   2504   Register temp2 = scratch0();
   2505   __ JumpIfSmi(input, is_not_object);
   2506 
   2507   __ LoadRoot(temp2, Heap::kNullValueRootIndex);
   2508   __ cmp(input, temp2);
   2509   __ b(eq, is_object);
   2510 
   2511   // Load map.
   2512   __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
   2513   // Undetectable objects behave like undefined.
   2514   __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
   2515   __ tst(temp2, Operand(1 << Map::kIsUndetectable));
   2516   __ b(ne, is_not_object);
   2517 
   2518   // Load instance type and check that it is in object type range.
   2519   __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
   2520   __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2521   __ b(lt, is_not_object);
   2522   __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2523   return le;
   2524 }
   2525 
   2526 
   2527 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   2528   Register reg = ToRegister(instr->value());
   2529   Register temp1 = ToRegister(instr->temp());
   2530 
   2531   Condition true_cond =
   2532       EmitIsObject(reg, temp1,
   2533           instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
   2534 
   2535   EmitBranch(instr, true_cond);
   2536 }
   2537 
   2538 
   2539 Condition LCodeGen::EmitIsString(Register input,
   2540                                  Register temp1,
   2541                                  Label* is_not_string,
   2542                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2543   if (check_needed == INLINE_SMI_CHECK) {
   2544     __ JumpIfSmi(input, is_not_string);
   2545   }
   2546   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
   2547 
   2548   return lt;
   2549 }
   2550 
   2551 
   2552 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2553   Register reg = ToRegister(instr->value());
   2554   Register temp1 = ToRegister(instr->temp());
   2555 
   2556   SmiCheck check_needed =
   2557       instr->hydrogen()->value()->type().IsHeapObject()
   2558           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2559   Condition true_cond =
   2560       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2561 
   2562   EmitBranch(instr, true_cond);
   2563 }
   2564 
   2565 
   2566 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2567   Register input_reg = EmitLoadRegister(instr->value(), ip);
   2568   __ SmiTst(input_reg);
   2569   EmitBranch(instr, eq);
   2570 }
   2571 
   2572 
   2573 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2574   Register input = ToRegister(instr->value());
   2575   Register temp = ToRegister(instr->temp());
   2576 
   2577   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2578     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2579   }
   2580   __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2581   __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2582   __ tst(temp, Operand(1 << Map::kIsUndetectable));
   2583   EmitBranch(instr, ne);
   2584 }
   2585 
   2586 
   2587 static Condition ComputeCompareCondition(Token::Value op) {
   2588   switch (op) {
   2589     case Token::EQ_STRICT:
   2590     case Token::EQ:
   2591       return eq;
   2592     case Token::LT:
   2593       return lt;
   2594     case Token::GT:
   2595       return gt;
   2596     case Token::LTE:
   2597       return le;
   2598     case Token::GTE:
   2599       return ge;
   2600     default:
   2601       UNREACHABLE();
   2602       return kNoCondition;
   2603   }
   2604 }
   2605 
   2606 
   2607 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2608   DCHECK(ToRegister(instr->context()).is(cp));
   2609   Token::Value op = instr->op();
   2610 
   2611   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   2612   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2613   // This instruction also signals no smi code inlined.
   2614   __ cmp(r0, Operand::Zero());
   2615 
   2616   Condition condition = ComputeCompareCondition(op);
   2617 
   2618   EmitBranch(instr, condition);
   2619 }
   2620 
   2621 
   2622 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2623   InstanceType from = instr->from();
   2624   InstanceType to = instr->to();
   2625   if (from == FIRST_TYPE) return to;
   2626   DCHECK(from == to || to == LAST_TYPE);
   2627   return from;
   2628 }
   2629 
   2630 
   2631 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2632   InstanceType from = instr->from();
   2633   InstanceType to = instr->to();
   2634   if (from == to) return eq;
   2635   if (to == LAST_TYPE) return hs;
   2636   if (from == FIRST_TYPE) return ls;
   2637   UNREACHABLE();
   2638   return eq;
   2639 }
   2640 
   2641 
   2642 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2643   Register scratch = scratch0();
   2644   Register input = ToRegister(instr->value());
   2645 
   2646   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2647     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2648   }
   2649 
   2650   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
   2651   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2652 }
   2653 
   2654 
   2655 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2656   Register input = ToRegister(instr->value());
   2657   Register result = ToRegister(instr->result());
   2658 
   2659   __ AssertString(input);
   2660 
   2661   __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
   2662   __ IndexFromHash(result, result);
   2663 }
   2664 
   2665 
   2666 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2667     LHasCachedArrayIndexAndBranch* instr) {
   2668   Register input = ToRegister(instr->value());
   2669   Register scratch = scratch0();
   2670 
   2671   __ ldr(scratch,
   2672          FieldMemOperand(input, String::kHashFieldOffset));
   2673   __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
   2674   EmitBranch(instr, eq);
   2675 }
   2676 
   2677 
   2678 // Branches to a label or falls through with the answer in flags.  Trashes
   2679 // the temp registers, but not the input.
   2680 void LCodeGen::EmitClassOfTest(Label* is_true,
   2681                                Label* is_false,
   2682                                Handle<String>class_name,
   2683                                Register input,
   2684                                Register temp,
   2685                                Register temp2) {
   2686   DCHECK(!input.is(temp));
   2687   DCHECK(!input.is(temp2));
   2688   DCHECK(!temp.is(temp2));
   2689 
   2690   __ JumpIfSmi(input, is_false);
   2691 
   2692   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2693     // Assuming the following assertions, we can use the same compares to test
   2694     // for both being a function type and being in the object type range.
   2695     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   2696     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2697                   FIRST_SPEC_OBJECT_TYPE + 1);
   2698     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2699                   LAST_SPEC_OBJECT_TYPE - 1);
   2700     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
   2701     __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
   2702     __ b(lt, is_false);
   2703     __ b(eq, is_true);
   2704     __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
   2705     __ b(eq, is_true);
   2706   } else {
   2707     // Faster code path to avoid two compares: subtract lower bound from the
   2708     // actual type and do a signed compare with the width of the type range.
   2709     __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2710     __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
   2711     __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2712     __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
   2713                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2714     __ b(gt, is_false);
   2715   }
   2716 
   2717   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   2718   // Check if the constructor in the map is a function.
   2719   __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
   2720 
   2721   // Objects with a non-function constructor have class 'Object'.
   2722   __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
   2723   if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
   2724     __ b(ne, is_true);
   2725   } else {
   2726     __ b(ne, is_false);
   2727   }
   2728 
   2729   // temp now contains the constructor function. Grab the
   2730   // instance class name from there.
   2731   __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2732   __ ldr(temp, FieldMemOperand(temp,
   2733                                SharedFunctionInfo::kInstanceClassNameOffset));
   2734   // The class name we are testing against is internalized since it's a literal.
   2735   // The name in the constructor is internalized because of the way the context
   2736   // is booted.  This routine isn't expected to work for random API-created
   2737   // classes and it doesn't have to because you can't access it with natives
   2738   // syntax.  Since both sides are internalized it is sufficient to use an
   2739   // identity comparison.
   2740   __ cmp(temp, Operand(class_name));
   2741   // End with the answer in flags.
   2742 }
   2743 
   2744 
   2745 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2746   Register input = ToRegister(instr->value());
   2747   Register temp = scratch0();
   2748   Register temp2 = ToRegister(instr->temp());
   2749   Handle<String> class_name = instr->hydrogen()->class_name();
   2750 
   2751   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2752       class_name, input, temp, temp2);
   2753 
   2754   EmitBranch(instr, eq);
   2755 }
   2756 
   2757 
   2758 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2759   Register reg = ToRegister(instr->value());
   2760   Register temp = ToRegister(instr->temp());
   2761 
   2762   __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2763   __ cmp(temp, Operand(instr->map()));
   2764   EmitBranch(instr, eq);
   2765 }
   2766 
   2767 
   2768 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2769   DCHECK(ToRegister(instr->context()).is(cp));
   2770   DCHECK(ToRegister(instr->left()).is(r0));  // Object is in r0.
   2771   DCHECK(ToRegister(instr->right()).is(r1));  // Function is in r1.
   2772 
   2773   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
   2774   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2775 
   2776   __ cmp(r0, Operand::Zero());
   2777   __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
   2778   __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
   2779 }
   2780 
   2781 
   2782 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   2783   class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
   2784    public:
   2785     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
   2786                                   LInstanceOfKnownGlobal* instr)
   2787         : LDeferredCode(codegen), instr_(instr) { }
   2788     virtual void Generate() OVERRIDE {
   2789       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
   2790                                                  &load_bool_);
   2791     }
   2792     virtual LInstruction* instr() OVERRIDE { return instr_; }
   2793     Label* map_check() { return &map_check_; }
   2794     Label* load_bool() { return &load_bool_; }
   2795 
   2796    private:
   2797     LInstanceOfKnownGlobal* instr_;
   2798     Label map_check_;
   2799     Label load_bool_;
   2800   };
   2801 
   2802   DeferredInstanceOfKnownGlobal* deferred;
   2803   deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
   2804 
   2805   Label done, false_result;
   2806   Register object = ToRegister(instr->value());
   2807   Register temp = ToRegister(instr->temp());
   2808   Register result = ToRegister(instr->result());
   2809 
   2810   // A Smi is not instance of anything.
   2811   __ JumpIfSmi(object, &false_result);
   2812 
   2813   // This is the inlined call site instanceof cache. The two occurences of the
   2814   // hole value will be patched to the last map/result pair generated by the
   2815   // instanceof stub.
   2816   Label cache_miss;
   2817   Register map = temp;
   2818   __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
   2819   {
   2820     // Block constant pool emission to ensure the positions of instructions are
   2821     // as expected by the patcher. See InstanceofStub::Generate().
   2822     Assembler::BlockConstPoolScope block_const_pool(masm());
   2823     __ bind(deferred->map_check());  // Label for calculating code patching.
   2824     // We use Factory::the_hole_value() on purpose instead of loading from the
   2825     // root array to force relocation to be able to later patch with
   2826     // the cached map.
   2827     Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
   2828     __ mov(ip, Operand(Handle<Object>(cell)));
   2829     __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
   2830     __ cmp(map, Operand(ip));
   2831     __ b(ne, &cache_miss);
   2832     __ bind(deferred->load_bool());  // Label for calculating code patching.
   2833     // We use Factory::the_hole_value() on purpose instead of loading from the
   2834     // root array to force relocation to be able to later patch
   2835     // with true or false.
   2836     __ mov(result, Operand(factory()->the_hole_value()));
   2837   }
   2838   __ b(&done);
   2839 
   2840   // The inlined call site cache did not match. Check null and string before
   2841   // calling the deferred code.
   2842   __ bind(&cache_miss);
   2843   // Null is not instance of anything.
   2844   __ LoadRoot(ip, Heap::kNullValueRootIndex);
   2845   __ cmp(object, Operand(ip));
   2846   __ b(eq, &false_result);
   2847 
   2848   // String values is not instance of anything.
   2849   Condition is_string = masm_->IsObjectStringType(object, temp);
   2850   __ b(is_string, &false_result);
   2851 
   2852   // Go to the deferred code.
   2853   __ b(deferred->entry());
   2854 
   2855   __ bind(&false_result);
   2856   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   2857 
   2858   // Here result has either true or false. Deferred code also produces true or
   2859   // false object.
   2860   __ bind(deferred->exit());
   2861   __ bind(&done);
   2862 }
   2863 
   2864 
   2865 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
   2866                                                Label* map_check,
   2867                                                Label* bool_load) {
   2868   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   2869   flags = static_cast<InstanceofStub::Flags>(
   2870       flags | InstanceofStub::kArgsInRegisters);
   2871   flags = static_cast<InstanceofStub::Flags>(
   2872       flags | InstanceofStub::kCallSiteInlineCheck);
   2873   flags = static_cast<InstanceofStub::Flags>(
   2874       flags | InstanceofStub::kReturnTrueFalseObject);
   2875   InstanceofStub stub(isolate(), flags);
   2876 
   2877   PushSafepointRegistersScope scope(this);
   2878   LoadContextFromDeferred(instr->context());
   2879 
   2880   __ Move(InstanceofStub::right(), instr->function());
   2881 
   2882   int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
   2883   int additional_delta = (call_size / Assembler::kInstrSize) + 4;
   2884   // Make sure that code size is predicable, since we use specific constants
   2885   // offsets in the code to find embedded values..
   2886   PredictableCodeSizeScope predictable(
   2887       masm_, (additional_delta + 1) * Assembler::kInstrSize);
   2888   // Make sure we don't emit any additional entries in the constant pool before
   2889   // the call to ensure that the CallCodeSize() calculated the correct number of
   2890   // instructions for the constant pool load.
   2891   {
   2892     ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
   2893     int map_check_delta =
   2894         masm_->InstructionsGeneratedSince(map_check) + additional_delta;
   2895     int bool_load_delta =
   2896         masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
   2897     Label before_push_delta;
   2898     __ bind(&before_push_delta);
   2899     __ BlockConstPoolFor(additional_delta);
   2900     // r5 is used to communicate the offset to the location of the map check.
   2901     __ mov(r5, Operand(map_check_delta * kPointerSize));
   2902     // r6 is used to communicate the offset to the location of the bool load.
   2903     __ mov(r6, Operand(bool_load_delta * kPointerSize));
   2904     // The mov above can generate one or two instructions. The delta was
   2905     // computed for two instructions, so we need to pad here in case of one
   2906     // instruction.
   2907     while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
   2908       __ nop();
   2909     }
   2910   }
   2911   CallCodeGeneric(stub.GetCode(),
   2912                   RelocInfo::CODE_TARGET,
   2913                   instr,
   2914                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   2915   LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   2916   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2917   // Put the result value (r0) into the result register slot and
   2918   // restore all registers.
   2919   __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
   2920 }
   2921 
   2922 
   2923 void LCodeGen::DoCmpT(LCmpT* instr) {
   2924   DCHECK(ToRegister(instr->context()).is(cp));
   2925   Token::Value op = instr->op();
   2926 
   2927   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   2928   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2929   // This instruction also signals no smi code inlined.
   2930   __ cmp(r0, Operand::Zero());
   2931 
   2932   Condition condition = ComputeCompareCondition(op);
   2933   __ LoadRoot(ToRegister(instr->result()),
   2934               Heap::kTrueValueRootIndex,
   2935               condition);
   2936   __ LoadRoot(ToRegister(instr->result()),
   2937               Heap::kFalseValueRootIndex,
   2938               NegateCondition(condition));
   2939 }
   2940 
   2941 
   2942 void LCodeGen::DoReturn(LReturn* instr) {
   2943   if (FLAG_trace && info()->IsOptimizing()) {
   2944     // Push the return value on the stack as the parameter.
   2945     // Runtime::TraceExit returns its parameter in r0.  We're leaving the code
   2946     // managed by the register allocator and tearing down the frame, it's
   2947     // safe to write to the context register.
   2948     __ push(r0);
   2949     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2950     __ CallRuntime(Runtime::kTraceExit, 1);
   2951   }
   2952   if (info()->saves_caller_doubles()) {
   2953     RestoreCallerDoubles();
   2954   }
   2955   int no_frame_start = -1;
   2956   if (NeedsEagerFrame()) {
   2957     no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
   2958   }
   2959   { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
   2960     if (instr->has_constant_parameter_count()) {
   2961       int parameter_count = ToInteger32(instr->constant_parameter_count());
   2962       int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2963       if (sp_delta != 0) {
   2964         __ add(sp, sp, Operand(sp_delta));
   2965       }
   2966     } else {
   2967       Register reg = ToRegister(instr->parameter_count());
   2968       // The argument count parameter is a smi
   2969       __ SmiUntag(reg);
   2970       __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
   2971     }
   2972 
   2973     __ Jump(lr);
   2974 
   2975     if (no_frame_start != -1) {
   2976       info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
   2977     }
   2978   }
   2979 }
   2980 
   2981 
   2982 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   2983   Register result = ToRegister(instr->result());
   2984   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
   2985   __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
   2986   if (instr->hydrogen()->RequiresHoleCheck()) {
   2987     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   2988     __ cmp(result, ip);
   2989     DeoptimizeIf(eq, instr);
   2990   }
   2991 }
   2992 
   2993 
   2994 template <class T>
   2995 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   2996   DCHECK(FLAG_vector_ics);
   2997   Register vector = ToRegister(instr->temp_vector());
   2998   DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
   2999   __ Move(vector, instr->hydrogen()->feedback_vector());
   3000   // No need to allocate this register.
   3001   DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
   3002   __ mov(VectorLoadICDescriptor::SlotRegister(),
   3003          Operand(Smi::FromInt(instr->hydrogen()->slot())));
   3004 }
   3005 
   3006 
   3007 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   3008   DCHECK(ToRegister(instr->context()).is(cp));
   3009   DCHECK(ToRegister(instr->global_object())
   3010              .is(LoadDescriptor::ReceiverRegister()));
   3011   DCHECK(ToRegister(instr->result()).is(r0));
   3012 
   3013   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   3014   if (FLAG_vector_ics) {
   3015     EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
   3016   }
   3017   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
   3018   Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
   3019   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3020 }
   3021 
   3022 
   3023 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   3024   Register value = ToRegister(instr->value());
   3025   Register cell = scratch0();
   3026 
   3027   // Load the cell.
   3028   __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
   3029 
   3030   // If the cell we are storing to contains the hole it could have
   3031   // been deleted from the property dictionary. In that case, we need
   3032   // to update the property details in the property dictionary to mark
   3033   // it as no longer deleted.
   3034   if (instr->hydrogen()->RequiresHoleCheck()) {
   3035     // We use a temp to check the payload (CompareRoot might clobber ip).
   3036     Register payload = ToRegister(instr->temp());
   3037     __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
   3038     __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
   3039     DeoptimizeIf(eq, instr);
   3040   }
   3041 
   3042   // Store the value.
   3043   __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
   3044   // Cells are always rescanned, so no write barrier here.
   3045 }
   3046 
   3047 
   3048 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   3049   Register context = ToRegister(instr->context());
   3050   Register result = ToRegister(instr->result());
   3051   __ ldr(result, ContextOperand(context, instr->slot_index()));
   3052   if (instr->hydrogen()->RequiresHoleCheck()) {
   3053     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   3054     __ cmp(result, ip);
   3055     if (instr->hydrogen()->DeoptimizesOnHole()) {
   3056       DeoptimizeIf(eq, instr);
   3057     } else {
   3058       __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
   3059     }
   3060   }
   3061 }
   3062 
   3063 
   3064 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   3065   Register context = ToRegister(instr->context());
   3066   Register value = ToRegister(instr->value());
   3067   Register scratch = scratch0();
   3068   MemOperand target = ContextOperand(context, instr->slot_index());
   3069 
   3070   Label skip_assignment;
   3071 
   3072   if (instr->hydrogen()->RequiresHoleCheck()) {
   3073     __ ldr(scratch, target);
   3074     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   3075     __ cmp(scratch, ip);
   3076     if (instr->hydrogen()->DeoptimizesOnHole()) {
   3077       DeoptimizeIf(eq, instr);
   3078     } else {
   3079       __ b(ne, &skip_assignment);
   3080     }
   3081   }
   3082 
   3083   __ str(value, target);
   3084   if (instr->hydrogen()->NeedsWriteBarrier()) {
   3085     SmiCheck check_needed =
   3086         instr->hydrogen()->value()->type().IsHeapObject()
   3087             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   3088     __ RecordWriteContextSlot(context,
   3089                               target.offset(),
   3090                               value,
   3091                               scratch,
   3092                               GetLinkRegisterState(),
   3093                               kSaveFPRegs,
   3094                               EMIT_REMEMBERED_SET,
   3095                               check_needed);
   3096   }
   3097 
   3098   __ bind(&skip_assignment);
   3099 }
   3100 
   3101 
   3102 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   3103   HObjectAccess access = instr->hydrogen()->access();
   3104   int offset = access.offset();
   3105   Register object = ToRegister(instr->object());
   3106 
   3107   if (access.IsExternalMemory()) {
   3108     Register result = ToRegister(instr->result());
   3109     MemOperand operand = MemOperand(object, offset);
   3110     __ Load(result, operand, access.representation());
   3111     return;
   3112   }
   3113 
   3114   if (instr->hydrogen()->representation().IsDouble()) {
   3115     DwVfpRegister result = ToDoubleRegister(instr->result());
   3116     __ vldr(result, FieldMemOperand(object, offset));
   3117     return;
   3118   }
   3119 
   3120   Register result = ToRegister(instr->result());
   3121   if (!access.IsInobject()) {
   3122     __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   3123     object = result;
   3124   }
   3125   MemOperand operand = FieldMemOperand(object, offset);
   3126   __ Load(result, operand, access.representation());
   3127 }
   3128 
   3129 
   3130 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   3131   DCHECK(ToRegister(instr->context()).is(cp));
   3132   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   3133   DCHECK(ToRegister(instr->result()).is(r0));
   3134 
   3135   // Name is always in r2.
   3136   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   3137   if (FLAG_vector_ics) {
   3138     EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
   3139   }
   3140   Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
   3141   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
   3142 }
   3143 
   3144 
   3145 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   3146   Register scratch = scratch0();
   3147   Register function = ToRegister(instr->function());
   3148   Register result = ToRegister(instr->result());
   3149 
   3150   // Get the prototype or initial map from the function.
   3151   __ ldr(result,
   3152          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   3153 
   3154   // Check that the function has a prototype or an initial map.
   3155   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   3156   __ cmp(result, ip);
   3157   DeoptimizeIf(eq, instr);
   3158 
   3159   // If the function does not have an initial map, we're done.
   3160   Label done;
   3161   __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
   3162   __ b(ne, &done);
   3163 
   3164   // Get the prototype from the initial map.
   3165   __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
   3166 
   3167   // All done.
   3168   __ bind(&done);
   3169 }
   3170 
   3171 
   3172 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   3173   Register result = ToRegister(instr->result());
   3174   __ LoadRoot(result, instr->index());
   3175 }
   3176 
   3177 
   3178 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   3179   Register arguments = ToRegister(instr->arguments());
   3180   Register result = ToRegister(instr->result());
   3181   // There are two words between the frame pointer and the last argument.
   3182   // Subtracting from length accounts for one of them add one more.
   3183   if (instr->length()->IsConstantOperand()) {
   3184     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   3185     if (instr->index()->IsConstantOperand()) {
   3186       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3187       int index = (const_length - const_index) + 1;
   3188       __ ldr(result, MemOperand(arguments, index * kPointerSize));
   3189     } else {
   3190       Register index = ToRegister(instr->index());
   3191       __ rsb(result, index, Operand(const_length + 1));
   3192       __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
   3193     }
   3194   } else if (instr->index()->IsConstantOperand()) {
   3195       Register length = ToRegister(instr->length());
   3196       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3197       int loc = const_index - 1;
   3198       if (loc != 0) {
   3199         __ sub(result, length, Operand(loc));
   3200         __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
   3201       } else {
   3202         __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
   3203       }
   3204     } else {
   3205     Register length = ToRegister(instr->length());
   3206     Register index = ToRegister(instr->index());
   3207     __ sub(result, length, index);
   3208     __ add(result, result, Operand(1));
   3209     __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
   3210   }
   3211 }
   3212 
   3213 
   3214 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   3215   Register external_pointer = ToRegister(instr->elements());
   3216   Register key = no_reg;
   3217   ElementsKind elements_kind = instr->elements_kind();
   3218   bool key_is_constant = instr->key()->IsConstantOperand();
   3219   int constant_key = 0;
   3220   if (key_is_constant) {
   3221     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3222     if (constant_key & 0xF0000000) {
   3223       Abort(kArrayIndexConstantValueTooBig);
   3224     }
   3225   } else {
   3226     key = ToRegister(instr->key());
   3227   }
   3228   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3229   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3230       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3231   int base_offset = instr->base_offset();
   3232 
   3233   if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   3234       elements_kind == FLOAT32_ELEMENTS ||
   3235       elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
   3236       elements_kind == FLOAT64_ELEMENTS) {
   3237     int base_offset = instr->base_offset();
   3238     DwVfpRegister result = ToDoubleRegister(instr->result());
   3239     Operand operand = key_is_constant
   3240         ? Operand(constant_key << element_size_shift)
   3241         : Operand(key, LSL, shift_size);
   3242     __ add(scratch0(), external_pointer, operand);
   3243     if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   3244         elements_kind == FLOAT32_ELEMENTS) {
   3245       __ vldr(double_scratch0().low(), scratch0(), base_offset);
   3246       __ vcvt_f64_f32(result, double_scratch0().low());
   3247     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   3248       __ vldr(result, scratch0(), base_offset);
   3249     }
   3250   } else {
   3251     Register result = ToRegister(instr->result());
   3252     MemOperand mem_operand = PrepareKeyedOperand(
   3253         key, external_pointer, key_is_constant, constant_key,
   3254         element_size_shift, shift_size, base_offset);
   3255     switch (elements_kind) {
   3256       case EXTERNAL_INT8_ELEMENTS:
   3257       case INT8_ELEMENTS:
   3258         __ ldrsb(result, mem_operand);
   3259         break;
   3260       case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
   3261       case EXTERNAL_UINT8_ELEMENTS:
   3262       case UINT8_ELEMENTS:
   3263       case UINT8_CLAMPED_ELEMENTS:
   3264         __ ldrb(result, mem_operand);
   3265         break;
   3266       case EXTERNAL_INT16_ELEMENTS:
   3267       case INT16_ELEMENTS:
   3268         __ ldrsh(result, mem_operand);
   3269         break;
   3270       case EXTERNAL_UINT16_ELEMENTS:
   3271       case UINT16_ELEMENTS:
   3272         __ ldrh(result, mem_operand);
   3273         break;
   3274       case EXTERNAL_INT32_ELEMENTS:
   3275       case INT32_ELEMENTS:
   3276         __ ldr(result, mem_operand);
   3277         break;
   3278       case EXTERNAL_UINT32_ELEMENTS:
   3279       case UINT32_ELEMENTS:
   3280         __ ldr(result, mem_operand);
   3281         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   3282           __ cmp(result, Operand(0x80000000));
   3283           DeoptimizeIf(cs, instr);
   3284         }
   3285         break;
   3286       case FLOAT32_ELEMENTS:
   3287       case FLOAT64_ELEMENTS:
   3288       case EXTERNAL_FLOAT32_ELEMENTS:
   3289       case EXTERNAL_FLOAT64_ELEMENTS:
   3290       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3291       case FAST_HOLEY_ELEMENTS:
   3292       case FAST_HOLEY_SMI_ELEMENTS:
   3293       case FAST_DOUBLE_ELEMENTS:
   3294       case FAST_ELEMENTS:
   3295       case FAST_SMI_ELEMENTS:
   3296       case DICTIONARY_ELEMENTS:
   3297       case SLOPPY_ARGUMENTS_ELEMENTS:
   3298         UNREACHABLE();
   3299         break;
   3300     }
   3301   }
   3302 }
   3303 
   3304 
   3305 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   3306   Register elements = ToRegister(instr->elements());
   3307   bool key_is_constant = instr->key()->IsConstantOperand();
   3308   Register key = no_reg;
   3309   DwVfpRegister result = ToDoubleRegister(instr->result());
   3310   Register scratch = scratch0();
   3311 
   3312   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   3313 
   3314   int base_offset = instr->base_offset();
   3315   if (key_is_constant) {
   3316     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3317     if (constant_key & 0xF0000000) {
   3318       Abort(kArrayIndexConstantValueTooBig);
   3319     }
   3320     base_offset += constant_key * kDoubleSize;
   3321   }
   3322   __ add(scratch, elements, Operand(base_offset));
   3323 
   3324   if (!key_is_constant) {
   3325     key = ToRegister(instr->key());
   3326     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3327         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3328     __ add(scratch, scratch, Operand(key, LSL, shift_size));
   3329   }
   3330 
   3331   __ vldr(result, scratch, 0);
   3332 
   3333   if (instr->hydrogen()->RequiresHoleCheck()) {
   3334     __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
   3335     __ cmp(scratch, Operand(kHoleNanUpper32));
   3336     DeoptimizeIf(eq, instr);
   3337   }
   3338 }
   3339 
   3340 
   3341 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   3342   Register elements = ToRegister(instr->elements());
   3343   Register result = ToRegister(instr->result());
   3344   Register scratch = scratch0();
   3345   Register store_base = scratch;
   3346   int offset = instr->base_offset();
   3347 
   3348   if (instr->key()->IsConstantOperand()) {
   3349     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   3350     offset += ToInteger32(const_operand) * kPointerSize;
   3351     store_base = elements;
   3352   } else {
   3353     Register key = ToRegister(instr->key());
   3354     // Even though the HLoadKeyed instruction forces the input
   3355     // representation for the key to be an integer, the input gets replaced
   3356     // during bound check elimination with the index argument to the bounds
   3357     // check, which can be tagged, so that case must be handled here, too.
   3358     if (instr->hydrogen()->key()->representation().IsSmi()) {
   3359       __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
   3360     } else {
   3361       __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
   3362     }
   3363   }
   3364   __ ldr(result, MemOperand(store_base, offset));
   3365 
   3366   // Check for the hole value.
   3367   if (instr->hydrogen()->RequiresHoleCheck()) {
   3368     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   3369       __ SmiTst(result);
   3370       DeoptimizeIf(ne, instr);
   3371     } else {
   3372       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   3373       __ cmp(result, scratch);
   3374       DeoptimizeIf(eq, instr);
   3375     }
   3376   }
   3377 }
   3378 
   3379 
   3380 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3381   if (instr->is_typed_elements()) {
   3382     DoLoadKeyedExternalArray(instr);
   3383   } else if (instr->hydrogen()->representation().IsDouble()) {
   3384     DoLoadKeyedFixedDoubleArray(instr);
   3385   } else {
   3386     DoLoadKeyedFixedArray(instr);
   3387   }
   3388 }
   3389 
   3390 
   3391 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
   3392                                          Register base,
   3393                                          bool key_is_constant,
   3394                                          int constant_key,
   3395                                          int element_size,
   3396                                          int shift_size,
   3397                                          int base_offset) {
   3398   if (key_is_constant) {
   3399     return MemOperand(base, (constant_key << element_size) + base_offset);
   3400   }
   3401 
   3402   if (base_offset == 0) {
   3403     if (shift_size >= 0) {
   3404       return MemOperand(base, key, LSL, shift_size);
   3405     } else {
   3406       DCHECK_EQ(-1, shift_size);
   3407       return MemOperand(base, key, LSR, 1);
   3408     }
   3409   }
   3410 
   3411   if (shift_size >= 0) {
   3412     __ add(scratch0(), base, Operand(key, LSL, shift_size));
   3413     return MemOperand(scratch0(), base_offset);
   3414   } else {
   3415     DCHECK_EQ(-1, shift_size);
   3416     __ add(scratch0(), base, Operand(key, ASR, 1));
   3417     return MemOperand(scratch0(), base_offset);
   3418   }
   3419 }
   3420 
   3421 
   3422 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3423   DCHECK(ToRegister(instr->context()).is(cp));
   3424   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   3425   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
   3426 
   3427   if (FLAG_vector_ics) {
   3428     EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
   3429   }
   3430 
   3431   Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
   3432   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
   3433 }
   3434 
   3435 
   3436 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3437   Register scratch = scratch0();
   3438   Register result = ToRegister(instr->result());
   3439 
   3440   if (instr->hydrogen()->from_inlined()) {
   3441     __ sub(result, sp, Operand(2 * kPointerSize));
   3442   } else {
   3443     // Check if the calling frame is an arguments adaptor frame.
   3444     Label done, adapted;
   3445     __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3446     __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
   3447     __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3448 
   3449     // Result is the frame pointer for the frame if not adapted and for the real
   3450     // frame below the adaptor frame if adapted.
   3451     __ mov(result, fp, LeaveCC, ne);
   3452     __ mov(result, scratch, LeaveCC, eq);
   3453   }
   3454 }
   3455 
   3456 
   3457 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3458   Register elem = ToRegister(instr->elements());
   3459   Register result = ToRegister(instr->result());
   3460 
   3461   Label done;
   3462 
   3463   // If no arguments adaptor frame the number of arguments is fixed.
   3464   __ cmp(fp, elem);
   3465   __ mov(result, Operand(scope()->num_parameters()));
   3466   __ b(eq, &done);
   3467 
   3468   // Arguments adaptor frame present. Get argument length from there.
   3469   __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3470   __ ldr(result,
   3471          MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3472   __ SmiUntag(result);
   3473 
   3474   // Argument length is in result register.
   3475   __ bind(&done);
   3476 }
   3477 
   3478 
   3479 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3480   Register receiver = ToRegister(instr->receiver());
   3481   Register function = ToRegister(instr->function());
   3482   Register result = ToRegister(instr->result());
   3483   Register scratch = scratch0();
   3484 
   3485   // If the receiver is null or undefined, we have to pass the global
   3486   // object as a receiver to normal functions. Values have to be
   3487   // passed unchanged to builtins and strict-mode functions.
   3488   Label global_object, result_in_receiver;
   3489 
   3490   if (!instr->hydrogen()->known_function()) {
   3491     // Do not transform the receiver to object for strict mode
   3492     // functions.
   3493     __ ldr(scratch,
   3494            FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3495     __ ldr(scratch,
   3496            FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3497     int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
   3498     __ tst(scratch, Operand(mask));
   3499     __ b(ne, &result_in_receiver);
   3500 
   3501     // Do not transform the receiver to object for builtins.
   3502     __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
   3503     __ b(ne, &result_in_receiver);
   3504   }
   3505 
   3506   // Normal function. Replace undefined or null with global receiver.
   3507   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
   3508   __ cmp(receiver, scratch);
   3509   __ b(eq, &global_object);
   3510   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3511   __ cmp(receiver, scratch);
   3512   __ b(eq, &global_object);
   3513 
   3514   // Deoptimize if the receiver is not a JS object.
   3515   __ SmiTst(receiver);
   3516   DeoptimizeIf(eq, instr);
   3517   __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
   3518   DeoptimizeIf(lt, instr);
   3519 
   3520   __ b(&result_in_receiver);
   3521   __ bind(&global_object);
   3522   __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
   3523   __ ldr(result,
   3524          ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
   3525   __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
   3526 
   3527   if (result.is(receiver)) {
   3528     __ bind(&result_in_receiver);
   3529   } else {
   3530     Label result_ok;
   3531     __ b(&result_ok);
   3532     __ bind(&result_in_receiver);
   3533     __ mov(result, receiver);
   3534     __ bind(&result_ok);
   3535   }
   3536 }
   3537 
   3538 
   3539 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3540   Register receiver = ToRegister(instr->receiver());
   3541   Register function = ToRegister(instr->function());
   3542   Register length = ToRegister(instr->length());
   3543   Register elements = ToRegister(instr->elements());
   3544   Register scratch = scratch0();
   3545   DCHECK(receiver.is(r0));  // Used for parameter count.
   3546   DCHECK(function.is(r1));  // Required by InvokeFunction.
   3547   DCHECK(ToRegister(instr->result()).is(r0));
   3548 
   3549   // Copy the arguments to this function possibly from the
   3550   // adaptor frame below it.
   3551   const uint32_t kArgumentsLimit = 1 * KB;
   3552   __ cmp(length, Operand(kArgumentsLimit));
   3553   DeoptimizeIf(hi, instr);
   3554 
   3555   // Push the receiver and use the register to keep the original
   3556   // number of arguments.
   3557   __ push(receiver);
   3558   __ mov(receiver, length);
   3559   // The arguments are at a one pointer size offset from elements.
   3560   __ add(elements, elements, Operand(1 * kPointerSize));
   3561 
   3562   // Loop through the arguments pushing them onto the execution
   3563   // stack.
   3564   Label invoke, loop;
   3565   // length is a small non-negative integer, due to the test above.
   3566   __ cmp(length, Operand::Zero());
   3567   __ b(eq, &invoke);
   3568   __ bind(&loop);
   3569   __ ldr(scratch, MemOperand(elements, length, LSL, 2));
   3570   __ push(scratch);
   3571   __ sub(length, length, Operand(1), SetCC);
   3572   __ b(ne, &loop);
   3573 
   3574   __ bind(&invoke);
   3575   DCHECK(instr->HasPointerMap());
   3576   LPointerMap* pointers = instr->pointer_map();
   3577   SafepointGenerator safepoint_generator(
   3578       this, pointers, Safepoint::kLazyDeopt);
   3579   // The number of arguments is stored in receiver which is r0, as expected
   3580   // by InvokeFunction.
   3581   ParameterCount actual(receiver);
   3582   __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
   3583 }
   3584 
   3585 
   3586 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3587   LOperand* argument = instr->value();
   3588   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3589     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3590   } else {
   3591     Register argument_reg = EmitLoadRegister(argument, ip);
   3592     __ push(argument_reg);
   3593   }
   3594 }
   3595 
   3596 
   3597 void LCodeGen::DoDrop(LDrop* instr) {
   3598   __ Drop(instr->count());
   3599 }
   3600 
   3601 
   3602 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3603   Register result = ToRegister(instr->result());
   3604   __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3605 }
   3606 
   3607 
   3608 void LCodeGen::DoContext(LContext* instr) {
   3609   // If there is a non-return use, the context must be moved to a register.
   3610   Register result = ToRegister(instr->result());
   3611   if (info()->IsOptimizing()) {
   3612     __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3613   } else {
   3614     // If there is no frame, the context must be in cp.
   3615     DCHECK(result.is(cp));
   3616   }
   3617 }
   3618 
   3619 
   3620 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3621   DCHECK(ToRegister(instr->context()).is(cp));
   3622   __ push(cp);  // The context is the first argument.
   3623   __ Move(scratch0(), instr->hydrogen()->pairs());
   3624   __ push(scratch0());
   3625   __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   3626   __ push(scratch0());
   3627   CallRuntime(Runtime::kDeclareGlobals, 3, instr);
   3628 }
   3629 
   3630 
   3631 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3632                                  int formal_parameter_count,
   3633                                  int arity,
   3634                                  LInstruction* instr,
   3635                                  R1State r1_state) {
   3636   bool dont_adapt_arguments =
   3637       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3638   bool can_invoke_directly =
   3639       dont_adapt_arguments || formal_parameter_count == arity;
   3640 
   3641   LPointerMap* pointers = instr->pointer_map();
   3642 
   3643   if (can_invoke_directly) {
   3644     if (r1_state == R1_UNINITIALIZED) {
   3645       __ Move(r1, function);
   3646     }
   3647 
   3648     // Change context.
   3649     __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
   3650 
   3651     // Set r0 to arguments count if adaption is not needed. Assumes that r0
   3652     // is available to write to at this point.
   3653     if (dont_adapt_arguments) {
   3654       __ mov(r0, Operand(arity));
   3655     }
   3656 
   3657     // Invoke function.
   3658     __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   3659     __ Call(ip);
   3660 
   3661     // Set up deoptimization.
   3662     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3663   } else {
   3664     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3665     ParameterCount count(arity);
   3666     ParameterCount expected(formal_parameter_count);
   3667     __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
   3668   }
   3669 }
   3670 
   3671 
   3672 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3673   DCHECK(instr->context() != NULL);
   3674   DCHECK(ToRegister(instr->context()).is(cp));
   3675   Register input = ToRegister(instr->value());
   3676   Register result = ToRegister(instr->result());
   3677   Register scratch = scratch0();
   3678 
   3679   // Deoptimize if not a heap number.
   3680   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3681   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   3682   __ cmp(scratch, Operand(ip));
   3683   DeoptimizeIf(ne, instr);
   3684 
   3685   Label done;
   3686   Register exponent = scratch0();
   3687   scratch = no_reg;
   3688   __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3689   // Check the sign of the argument. If the argument is positive, just
   3690   // return it.
   3691   __ tst(exponent, Operand(HeapNumber::kSignMask));
   3692   // Move the input to the result if necessary.
   3693   __ Move(result, input);
   3694   __ b(eq, &done);
   3695 
   3696   // Input is negative. Reverse its sign.
   3697   // Preserve the value of all registers.
   3698   {
   3699     PushSafepointRegistersScope scope(this);
   3700 
   3701     // Registers were saved at the safepoint, so we can use
   3702     // many scratch registers.
   3703     Register tmp1 = input.is(r1) ? r0 : r1;
   3704     Register tmp2 = input.is(r2) ? r0 : r2;
   3705     Register tmp3 = input.is(r3) ? r0 : r3;
   3706     Register tmp4 = input.is(r4) ? r0 : r4;
   3707 
   3708     // exponent: floating point exponent value.
   3709 
   3710     Label allocated, slow;
   3711     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3712     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3713     __ b(&allocated);
   3714 
   3715     // Slow case: Call the runtime system to do the number allocation.
   3716     __ bind(&slow);
   3717 
   3718     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3719                             instr->context());
   3720     // Set the pointer to the new heap number in tmp.
   3721     if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
   3722     // Restore input_reg after call to runtime.
   3723     __ LoadFromSafepointRegisterSlot(input, input);
   3724     __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3725 
   3726     __ bind(&allocated);
   3727     // exponent: floating point exponent value.
   3728     // tmp1: allocated heap number.
   3729     __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
   3730     __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3731     __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3732     __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3733 
   3734     __ StoreToSafepointRegisterSlot(tmp1, result);
   3735   }
   3736 
   3737   __ bind(&done);
   3738 }
   3739 
   3740 
   3741 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3742   Register input = ToRegister(instr->value());
   3743   Register result = ToRegister(instr->result());
   3744   __ cmp(input, Operand::Zero());
   3745   __ Move(result, input, pl);
   3746   // We can make rsb conditional because the previous cmp instruction
   3747   // will clear the V (overflow) flag and rsb won't set this flag
   3748   // if input is positive.
   3749   __ rsb(result, input, Operand::Zero(), SetCC, mi);
   3750   // Deoptimize on overflow.
   3751   DeoptimizeIf(vs, instr);
   3752 }
   3753 
   3754 
   3755 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3756   // Class for deferred case.
   3757   class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
   3758    public:
   3759     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3760         : LDeferredCode(codegen), instr_(instr) { }
   3761     virtual void Generate() OVERRIDE {
   3762       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3763     }
   3764     virtual LInstruction* instr() OVERRIDE { return instr_; }
   3765    private:
   3766     LMathAbs* instr_;
   3767   };
   3768 
   3769   Representation r = instr->hydrogen()->value()->representation();
   3770   if (r.IsDouble()) {
   3771     DwVfpRegister input = ToDoubleRegister(instr->value());
   3772     DwVfpRegister result = ToDoubleRegister(instr->result());
   3773     __ vabs(result, input);
   3774   } else if (r.IsSmiOrInteger32()) {
   3775     EmitIntegerMathAbs(instr);
   3776   } else {
   3777     // Representation is tagged.
   3778     DeferredMathAbsTaggedHeapNumber* deferred =
   3779         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3780     Register input = ToRegister(instr->value());
   3781     // Smi check.
   3782     __ JumpIfNotSmi(input, deferred->entry());
   3783     // If smi, handle it directly.
   3784     EmitIntegerMathAbs(instr);
   3785     __ bind(deferred->exit());
   3786   }
   3787 }
   3788 
   3789 
   3790 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3791   DwVfpRegister input = ToDoubleRegister(instr->value());
   3792   Register result = ToRegister(instr->result());
   3793   Register input_high = scratch0();
   3794   Label done, exact;
   3795 
   3796   __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
   3797   DeoptimizeIf(al, instr);
   3798 
   3799   __ bind(&exact);
   3800   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3801     // Test for -0.
   3802     __ cmp(result, Operand::Zero());
   3803     __ b(ne, &done);
   3804     __ cmp(input_high, Operand::Zero());
   3805     DeoptimizeIf(mi, instr);
   3806   }
   3807   __ bind(&done);
   3808 }
   3809 
   3810 
   3811 void LCodeGen::DoMathRound(LMathRound* instr) {
   3812   DwVfpRegister input = ToDoubleRegister(instr->value());
   3813   Register result = ToRegister(instr->result());
   3814   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3815   DwVfpRegister input_plus_dot_five = double_scratch1;
   3816   Register input_high = scratch0();
   3817   DwVfpRegister dot_five = double_scratch0();
   3818   Label convert, done;
   3819 
   3820   __ Vmov(dot_five, 0.5, scratch0());
   3821   __ vabs(double_scratch1, input);
   3822   __ VFPCompareAndSetFlags(double_scratch1, dot_five);
   3823   // If input is in [-0.5, -0], the result is -0.
   3824   // If input is in [+0, +0.5[, the result is +0.
   3825   // If the input is +0.5, the result is 1.
   3826   __ b(hi, &convert);  // Out of [-0.5, +0.5].
   3827   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3828     __ VmovHigh(input_high, input);
   3829     __ cmp(input_high, Operand::Zero());
   3830     DeoptimizeIf(mi, instr);  // [-0.5, -0].
   3831   }
   3832   __ VFPCompareAndSetFlags(input, dot_five);
   3833   __ mov(result, Operand(1), LeaveCC, eq);  // +0.5.
   3834   // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
   3835   // flag kBailoutOnMinusZero.
   3836   __ mov(result, Operand::Zero(), LeaveCC, ne);
   3837   __ b(&done);
   3838 
   3839   __ bind(&convert);
   3840   __ vadd(input_plus_dot_five, input, dot_five);
   3841   // Reuse dot_five (double_scratch0) as we no longer need this value.
   3842   __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
   3843                    &done, &done);
   3844   DeoptimizeIf(al, instr);
   3845   __ bind(&done);
   3846 }
   3847 
   3848 
   3849 void LCodeGen::DoMathFround(LMathFround* instr) {
   3850   DwVfpRegister input_reg = ToDoubleRegister(instr->value());
   3851   DwVfpRegister output_reg = ToDoubleRegister(instr->result());
   3852   LowDwVfpRegister scratch = double_scratch0();
   3853   __ vcvt_f32_f64(scratch.low(), input_reg);
   3854   __ vcvt_f64_f32(output_reg, scratch.low());
   3855 }
   3856 
   3857 
   3858 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3859   DwVfpRegister input = ToDoubleRegister(instr->value());
   3860   DwVfpRegister result = ToDoubleRegister(instr->result());
   3861   __ vsqrt(result, input);
   3862 }
   3863 
   3864 
   3865 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3866   DwVfpRegister input = ToDoubleRegister(instr->value());
   3867   DwVfpRegister result = ToDoubleRegister(instr->result());
   3868   DwVfpRegister temp = double_scratch0();
   3869 
   3870   // Note that according to ECMA-262 15.8.2.13:
   3871   // Math.pow(-Infinity, 0.5) == Infinity
   3872   // Math.sqrt(-Infinity) == NaN
   3873   Label done;
   3874   __ vmov(temp, -V8_INFINITY, scratch0());
   3875   __ VFPCompareAndSetFlags(input, temp);
   3876   __ vneg(result, temp, eq);
   3877   __ b(&done, eq);
   3878 
   3879   // Add +0 to convert -0 to +0.
   3880   __ vadd(result, input, kDoubleRegZero);
   3881   __ vsqrt(result, result);
   3882   __ bind(&done);
   3883 }
   3884 
   3885 
   3886 void LCodeGen::DoPower(LPower* instr) {
   3887   Representation exponent_type = instr->hydrogen()->right()->representation();
   3888   // Having marked this as a call, we can use any registers.
   3889   // Just make sure that the input/output registers are the expected ones.
   3890   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3891   DCHECK(!instr->right()->IsDoubleRegister() ||
   3892          ToDoubleRegister(instr->right()).is(d1));
   3893   DCHECK(!instr->right()->IsRegister() ||
   3894          ToRegister(instr->right()).is(tagged_exponent));
   3895   DCHECK(ToDoubleRegister(instr->left()).is(d0));
   3896   DCHECK(ToDoubleRegister(instr->result()).is(d2));
   3897 
   3898   if (exponent_type.IsSmi()) {
   3899     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3900     __ CallStub(&stub);
   3901   } else if (exponent_type.IsTagged()) {
   3902     Label no_deopt;
   3903     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3904     DCHECK(!r6.is(tagged_exponent));
   3905     __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
   3906     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   3907     __ cmp(r6, Operand(ip));
   3908     DeoptimizeIf(ne, instr);
   3909     __ bind(&no_deopt);
   3910     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3911     __ CallStub(&stub);
   3912   } else if (exponent_type.IsInteger32()) {
   3913     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3914     __ CallStub(&stub);
   3915   } else {
   3916     DCHECK(exponent_type.IsDouble());
   3917     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3918     __ CallStub(&stub);
   3919   }
   3920 }
   3921 
   3922 
   3923 void LCodeGen::DoMathExp(LMathExp* instr) {
   3924   DwVfpRegister input = ToDoubleRegister(instr->value());
   3925   DwVfpRegister result = ToDoubleRegister(instr->result());
   3926   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
   3927   DwVfpRegister double_scratch2 = double_scratch0();
   3928   Register temp1 = ToRegister(instr->temp1());
   3929   Register temp2 = ToRegister(instr->temp2());
   3930 
   3931   MathExpGenerator::EmitMathExp(
   3932       masm(), input, result, double_scratch1, double_scratch2,
   3933       temp1, temp2, scratch0());
   3934 }
   3935 
   3936 
   3937 void LCodeGen::DoMathLog(LMathLog* instr) {
   3938   __ PrepareCallCFunction(0, 1, scratch0());
   3939   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3940   __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
   3941                    0, 1);
   3942   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3943 }
   3944 
   3945 
   3946 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3947   Register input = ToRegister(instr->value());
   3948   Register result = ToRegister(instr->result());
   3949   __ clz(result, input);
   3950 }
   3951 
   3952 
   3953 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3954   DCHECK(ToRegister(instr->context()).is(cp));
   3955   DCHECK(ToRegister(instr->function()).is(r1));
   3956   DCHECK(instr->HasPointerMap());
   3957 
   3958   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   3959   if (known_function.is_null()) {
   3960     LPointerMap* pointers = instr->pointer_map();
   3961     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3962     ParameterCount count(instr->arity());
   3963     __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
   3964   } else {
   3965     CallKnownFunction(known_function,
   3966                       instr->hydrogen()->formal_parameter_count(),
   3967                       instr->arity(),
   3968                       instr,
   3969                       R1_CONTAINS_TARGET);
   3970   }
   3971 }
   3972 
   3973 
   3974 void LCodeGen::DoTailCallThroughMegamorphicCache(
   3975     LTailCallThroughMegamorphicCache* instr) {
   3976   Register receiver = ToRegister(instr->receiver());
   3977   Register name = ToRegister(instr->name());
   3978   DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
   3979   DCHECK(name.is(LoadDescriptor::NameRegister()));
   3980   DCHECK(receiver.is(r1));
   3981   DCHECK(name.is(r2));
   3982 
   3983   Register scratch = r3;
   3984   Register extra = r4;
   3985   Register extra2 = r5;
   3986   Register extra3 = r6;
   3987 
   3988   // Important for the tail-call.
   3989   bool must_teardown_frame = NeedsEagerFrame();
   3990 
   3991   // The probe will tail call to a handler if found.
   3992   isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
   3993                                          must_teardown_frame, receiver, name,
   3994                                          scratch, extra, extra2, extra3);
   3995 
   3996   // Tail call to miss if we ended up here.
   3997   if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
   3998   LoadIC::GenerateMiss(masm());
   3999 }
   4000 
   4001 
   4002 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   4003   DCHECK(ToRegister(instr->result()).is(r0));
   4004 
   4005   LPointerMap* pointers = instr->pointer_map();
   4006   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   4007 
   4008   if (instr->target()->IsConstantOperand()) {
   4009     LConstantOperand* target = LConstantOperand::cast(instr->target());
   4010     Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   4011     generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   4012     PlatformInterfaceDescriptor* call_descriptor =
   4013         instr->descriptor().platform_specific_descriptor();
   4014     __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
   4015             call_descriptor->storage_mode());
   4016   } else {
   4017     DCHECK(instr->target()->IsRegister());
   4018     Register target = ToRegister(instr->target());
   4019     generator.BeforeCall(__ CallSize(target));
   4020     // Make sure we don't emit any additional entries in the constant pool
   4021     // before the call to ensure that the CallCodeSize() calculated the correct
   4022     // number of instructions for the constant pool load.
   4023     {
   4024       ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
   4025       __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   4026     }
   4027     __ Call(target);
   4028   }
   4029   generator.AfterCall();
   4030 }
   4031 
   4032 
   4033 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
   4034   DCHECK(ToRegister(instr->function()).is(r1));
   4035   DCHECK(ToRegister(instr->result()).is(r0));
   4036 
   4037   if (instr->hydrogen()->pass_argument_count()) {
   4038     __ mov(r0, Operand(instr->arity()));
   4039   }
   4040 
   4041   // Change context.
   4042   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
   4043 
   4044   // Load the code entry address
   4045   __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   4046   __ Call(ip);
   4047 
   4048   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   4049 }
   4050 
   4051 
   4052 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   4053   DCHECK(ToRegister(instr->context()).is(cp));
   4054   DCHECK(ToRegister(instr->function()).is(r1));
   4055   DCHECK(ToRegister(instr->result()).is(r0));
   4056 
   4057   int arity = instr->arity();
   4058   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
   4059   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4060 }
   4061 
   4062 
   4063 void LCodeGen::DoCallNew(LCallNew* instr) {
   4064   DCHECK(ToRegister(instr->context()).is(cp));
   4065   DCHECK(ToRegister(instr->constructor()).is(r1));
   4066   DCHECK(ToRegister(instr->result()).is(r0));
   4067 
   4068   __ mov(r0, Operand(instr->arity()));
   4069   // No cell in r2 for construct type feedback in optimized code
   4070   __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
   4071   CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
   4072   CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4073 }
   4074 
   4075 
   4076 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   4077   DCHECK(ToRegister(instr->context()).is(cp));
   4078   DCHECK(ToRegister(instr->constructor()).is(r1));
   4079   DCHECK(ToRegister(instr->result()).is(r0));
   4080 
   4081   __ mov(r0, Operand(instr->arity()));
   4082   __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
   4083   ElementsKind kind = instr->hydrogen()->elements_kind();
   4084   AllocationSiteOverrideMode override_mode =
   4085       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   4086           ? DISABLE_ALLOCATION_SITES
   4087           : DONT_OVERRIDE;
   4088 
   4089   if (instr->arity() == 0) {
   4090     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   4091     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4092   } else if (instr->arity() == 1) {
   4093     Label done;
   4094     if (IsFastPackedElementsKind(kind)) {
   4095       Label packed_case;
   4096       // We might need a change here
   4097       // look at the first argument
   4098       __ ldr(r5, MemOperand(sp, 0));
   4099       __ cmp(r5, Operand::Zero());
   4100       __ b(eq, &packed_case);
   4101 
   4102       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   4103       ArraySingleArgumentConstructorStub stub(isolate(),
   4104                                               holey_kind,
   4105                                               override_mode);
   4106       CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4107       __ jmp(&done);
   4108       __ bind(&packed_case);
   4109     }
   4110 
   4111     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   4112     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4113     __ bind(&done);
   4114   } else {
   4115     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
   4116     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4117   }
   4118 }
   4119 
   4120 
   4121 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   4122   CallRuntime(instr->function(), instr->arity(), instr);
   4123 }
   4124 
   4125 
   4126 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   4127   Register function = ToRegister(instr->function());
   4128   Register code_object = ToRegister(instr->code_object());
   4129   __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
   4130   __ str(code_object,
   4131          FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   4132 }
   4133 
   4134 
   4135 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   4136   Register result = ToRegister(instr->result());
   4137   Register base = ToRegister(instr->base_object());
   4138   if (instr->offset()->IsConstantOperand()) {
   4139     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   4140     __ add(result, base, Operand(ToInteger32(offset)));
   4141   } else {
   4142     Register offset = ToRegister(instr->offset());
   4143     __ add(result, base, offset);
   4144   }
   4145 }
   4146 
   4147 
   4148 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   4149   Representation representation = instr->representation();
   4150 
   4151   Register object = ToRegister(instr->object());
   4152   Register scratch = scratch0();
   4153   HObjectAccess access = instr->hydrogen()->access();
   4154   int offset = access.offset();
   4155 
   4156   if (access.IsExternalMemory()) {
   4157     Register value = ToRegister(instr->value());
   4158     MemOperand operand = MemOperand(object, offset);
   4159     __ Store(value, operand, representation);
   4160     return;
   4161   }
   4162 
   4163   __ AssertNotSmi(object);
   4164 
   4165   DCHECK(!representation.IsSmi() ||
   4166          !instr->value()->IsConstantOperand() ||
   4167          IsSmi(LConstantOperand::cast(instr->value())));
   4168   if (representation.IsDouble()) {
   4169     DCHECK(access.IsInobject());
   4170     DCHECK(!instr->hydrogen()->has_transition());
   4171     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   4172     DwVfpRegister value = ToDoubleRegister(instr->value());
   4173     __ vstr(value, FieldMemOperand(object, offset));
   4174     return;
   4175   }
   4176 
   4177   if (instr->hydrogen()->has_transition()) {
   4178     Handle<Map> transition = instr->hydrogen()->transition_map();
   4179     AddDeprecationDependency(transition);
   4180     __ mov(scratch, Operand(transition));
   4181     __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   4182     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   4183       Register temp = ToRegister(instr->temp());
   4184       // Update the write barrier for the map field.
   4185       __ RecordWriteForMap(object,
   4186                            scratch,
   4187                            temp,
   4188                            GetLinkRegisterState(),
   4189                            kSaveFPRegs);
   4190     }
   4191   }
   4192 
   4193   // Do the store.
   4194   Register value = ToRegister(instr->value());
   4195   if (access.IsInobject()) {
   4196     MemOperand operand = FieldMemOperand(object, offset);
   4197     __ Store(value, operand, representation);
   4198     if (instr->hydrogen()->NeedsWriteBarrier()) {
   4199       // Update the write barrier for the object for in-object properties.
   4200       __ RecordWriteField(object,
   4201                           offset,
   4202                           value,
   4203                           scratch,
   4204                           GetLinkRegisterState(),
   4205                           kSaveFPRegs,
   4206                           EMIT_REMEMBERED_SET,
   4207                           instr->hydrogen()->SmiCheckForWriteBarrier(),
   4208                           instr->hydrogen()->PointersToHereCheckForValue());
   4209     }
   4210   } else {
   4211     __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
   4212     MemOperand operand = FieldMemOperand(scratch, offset);
   4213     __ Store(value, operand, representation);
   4214     if (instr->hydrogen()->NeedsWriteBarrier()) {
   4215       // Update the write barrier for the properties array.
   4216       // object is used as a scratch register.
   4217       __ RecordWriteField(scratch,
   4218                           offset,
   4219                           value,
   4220                           object,
   4221                           GetLinkRegisterState(),
   4222                           kSaveFPRegs,
   4223                           EMIT_REMEMBERED_SET,
   4224                           instr->hydrogen()->SmiCheckForWriteBarrier(),
   4225                           instr->hydrogen()->PointersToHereCheckForValue());
   4226     }
   4227   }
   4228 }
   4229 
   4230 
   4231 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   4232   DCHECK(ToRegister(instr->context()).is(cp));
   4233   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   4234   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   4235 
   4236   __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
   4237   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   4238   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
   4239 }
   4240 
   4241 
   4242 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   4243   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
   4244   if (instr->index()->IsConstantOperand()) {
   4245     Operand index = ToOperand(instr->index());
   4246     Register length = ToRegister(instr->length());
   4247     __ cmp(length, index);
   4248     cc = CommuteCondition(cc);
   4249   } else {
   4250     Register index = ToRegister(instr->index());
   4251     Operand length = ToOperand(instr->length());
   4252     __ cmp(index, length);
   4253   }
   4254   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   4255     Label done;
   4256     __ b(NegateCondition(cc), &done);
   4257     __ stop("eliminated bounds check failed");
   4258     __ bind(&done);
   4259   } else {
   4260     DeoptimizeIf(cc, instr);
   4261   }
   4262 }
   4263 
   4264 
   4265 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   4266   Register external_pointer = ToRegister(instr->elements());
   4267   Register key = no_reg;
   4268   ElementsKind elements_kind = instr->elements_kind();
   4269   bool key_is_constant = instr->key()->IsConstantOperand();
   4270   int constant_key = 0;
   4271   if (key_is_constant) {
   4272     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4273     if (constant_key & 0xF0000000) {
   4274       Abort(kArrayIndexConstantValueTooBig);
   4275     }
   4276   } else {
   4277     key = ToRegister(instr->key());
   4278   }
   4279   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   4280   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4281       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4282   int base_offset = instr->base_offset();
   4283 
   4284   if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   4285       elements_kind == FLOAT32_ELEMENTS ||
   4286       elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
   4287       elements_kind == FLOAT64_ELEMENTS) {
   4288     Register address = scratch0();
   4289     DwVfpRegister value(ToDoubleRegister(instr->value()));
   4290     if (key_is_constant) {
   4291       if (constant_key != 0) {
   4292         __ add(address, external_pointer,
   4293                Operand(constant_key << element_size_shift));
   4294       } else {
   4295         address = external_pointer;
   4296       }
   4297     } else {
   4298       __ add(address, external_pointer, Operand(key, LSL, shift_size));
   4299     }
   4300     if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   4301         elements_kind == FLOAT32_ELEMENTS) {
   4302       __ vcvt_f32_f64(double_scratch0().low(), value);
   4303       __ vstr(double_scratch0().low(), address, base_offset);
   4304     } else {  // Storing doubles, not floats.
   4305       __ vstr(value, address, base_offset);
   4306     }
   4307   } else {
   4308     Register value(ToRegister(instr->value()));
   4309     MemOperand mem_operand = PrepareKeyedOperand(
   4310         key, external_pointer, key_is_constant, constant_key,
   4311         element_size_shift, shift_size,
   4312         base_offset);
   4313     switch (elements_kind) {
   4314       case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
   4315       case EXTERNAL_INT8_ELEMENTS:
   4316       case EXTERNAL_UINT8_ELEMENTS:
   4317       case UINT8_ELEMENTS:
   4318       case UINT8_CLAMPED_ELEMENTS:
   4319       case INT8_ELEMENTS:
   4320         __ strb(value, mem_operand);
   4321         break;
   4322       case EXTERNAL_INT16_ELEMENTS:
   4323       case EXTERNAL_UINT16_ELEMENTS:
   4324       case INT16_ELEMENTS:
   4325       case UINT16_ELEMENTS:
   4326         __ strh(value, mem_operand);
   4327         break;
   4328       case EXTERNAL_INT32_ELEMENTS:
   4329       case EXTERNAL_UINT32_ELEMENTS:
   4330       case INT32_ELEMENTS:
   4331       case UINT32_ELEMENTS:
   4332         __ str(value, mem_operand);
   4333         break;
   4334       case FLOAT32_ELEMENTS:
   4335       case FLOAT64_ELEMENTS:
   4336       case EXTERNAL_FLOAT32_ELEMENTS:
   4337       case EXTERNAL_FLOAT64_ELEMENTS:
   4338       case FAST_DOUBLE_ELEMENTS:
   4339       case FAST_ELEMENTS:
   4340       case FAST_SMI_ELEMENTS:
   4341       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4342       case FAST_HOLEY_ELEMENTS:
   4343       case FAST_HOLEY_SMI_ELEMENTS:
   4344       case DICTIONARY_ELEMENTS:
   4345       case SLOPPY_ARGUMENTS_ELEMENTS:
   4346         UNREACHABLE();
   4347         break;
   4348     }
   4349   }
   4350 }
   4351 
   4352 
   4353 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4354   DwVfpRegister value = ToDoubleRegister(instr->value());
   4355   Register elements = ToRegister(instr->elements());
   4356   Register scratch = scratch0();
   4357   DwVfpRegister double_scratch = double_scratch0();
   4358   bool key_is_constant = instr->key()->IsConstantOperand();
   4359   int base_offset = instr->base_offset();
   4360 
   4361   // Calculate the effective address of the slot in the array to store the
   4362   // double value.
   4363   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4364   if (key_is_constant) {
   4365     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4366     if (constant_key & 0xF0000000) {
   4367       Abort(kArrayIndexConstantValueTooBig);
   4368     }
   4369     __ add(scratch, elements,
   4370            Operand((constant_key << element_size_shift) + base_offset));
   4371   } else {
   4372     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4373         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4374     __ add(scratch, elements, Operand(base_offset));
   4375     __ add(scratch, scratch,
   4376            Operand(ToRegister(instr->key()), LSL, shift_size));
   4377   }
   4378 
   4379   if (instr->NeedsCanonicalization()) {
   4380     // Force a canonical NaN.
   4381     if (masm()->emit_debug_code()) {
   4382       __ vmrs(ip);
   4383       __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
   4384       __ Assert(ne, kDefaultNaNModeNotSet);
   4385     }
   4386     __ VFPCanonicalizeNaN(double_scratch, value);
   4387     __ vstr(double_scratch, scratch, 0);
   4388   } else {
   4389     __ vstr(value, scratch, 0);
   4390   }
   4391 }
   4392 
   4393 
   4394 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4395   Register value = ToRegister(instr->value());
   4396   Register elements = ToRegister(instr->elements());
   4397   Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
   4398       : no_reg;
   4399   Register scratch = scratch0();
   4400   Register store_base = scratch;
   4401   int offset = instr->base_offset();
   4402 
   4403   // Do the store.
   4404   if (instr->key()->IsConstantOperand()) {
   4405     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   4406     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4407     offset += ToInteger32(const_operand) * kPointerSize;
   4408     store_base = elements;
   4409   } else {
   4410     // Even though the HLoadKeyed instruction forces the input
   4411     // representation for the key to be an integer, the input gets replaced
   4412     // during bound check elimination with the index argument to the bounds
   4413     // check, which can be tagged, so that case must be handled here, too.
   4414     if (instr->hydrogen()->key()->representation().IsSmi()) {
   4415       __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
   4416     } else {
   4417       __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
   4418     }
   4419   }
   4420   __ str(value, MemOperand(store_base, offset));
   4421 
   4422   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4423     SmiCheck check_needed =
   4424         instr->hydrogen()->value()->type().IsHeapObject()
   4425             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4426     // Compute address of modified element and store it into key register.
   4427     __ add(key, store_base, Operand(offset));
   4428     __ RecordWrite(elements,
   4429                    key,
   4430                    value,
   4431                    GetLinkRegisterState(),
   4432                    kSaveFPRegs,
   4433                    EMIT_REMEMBERED_SET,
   4434                    check_needed,
   4435                    instr->hydrogen()->PointersToHereCheckForValue());
   4436   }
   4437 }
   4438 
   4439 
   4440 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4441   // By cases: external, fast double
   4442   if (instr->is_typed_elements()) {
   4443     DoStoreKeyedExternalArray(instr);
   4444   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4445     DoStoreKeyedFixedDoubleArray(instr);
   4446   } else {
   4447     DoStoreKeyedFixedArray(instr);
   4448   }
   4449 }
   4450 
   4451 
   4452 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4453   DCHECK(ToRegister(instr->context()).is(cp));
   4454   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   4455   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   4456   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   4457 
   4458   Handle<Code> ic =
   4459       CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
   4460   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
   4461 }
   4462 
   4463 
   4464 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4465   Register object_reg = ToRegister(instr->object());
   4466   Register scratch = scratch0();
   4467 
   4468   Handle<Map> from_map = instr->original_map();
   4469   Handle<Map> to_map = instr->transitioned_map();
   4470   ElementsKind from_kind = instr->from_kind();
   4471   ElementsKind to_kind = instr->to_kind();
   4472 
   4473   Label not_applicable;
   4474   __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4475   __ cmp(scratch, Operand(from_map));
   4476   __ b(ne, &not_applicable);
   4477 
   4478   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4479     Register new_map_reg = ToRegister(instr->new_map_temp());
   4480     __ mov(new_map_reg, Operand(to_map));
   4481     __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4482     // Write barrier.
   4483     __ RecordWriteForMap(object_reg,
   4484                          new_map_reg,
   4485                          scratch,
   4486                          GetLinkRegisterState(),
   4487                          kDontSaveFPRegs);
   4488   } else {
   4489     DCHECK(ToRegister(instr->context()).is(cp));
   4490     DCHECK(object_reg.is(r0));
   4491     PushSafepointRegistersScope scope(this);
   4492     __ Move(r1, to_map);
   4493     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
   4494     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
   4495     __ CallStub(&stub);
   4496     RecordSafepointWithRegisters(
   4497         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   4498   }
   4499   __ bind(&not_applicable);
   4500 }
   4501 
   4502 
   4503 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4504   Register object = ToRegister(instr->object());
   4505   Register temp = ToRegister(instr->temp());
   4506   Label no_memento_found;
   4507   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
   4508   DeoptimizeIf(eq, instr);
   4509   __ bind(&no_memento_found);
   4510 }
   4511 
   4512 
   4513 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4514   DCHECK(ToRegister(instr->context()).is(cp));
   4515   DCHECK(ToRegister(instr->left()).is(r1));
   4516   DCHECK(ToRegister(instr->right()).is(r0));
   4517   StringAddStub stub(isolate(),
   4518                      instr->hydrogen()->flags(),
   4519                      instr->hydrogen()->pretenure_flag());
   4520   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4521 }
   4522 
   4523 
   4524 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4525   class DeferredStringCharCodeAt FINAL : public LDeferredCode {
   4526    public:
   4527     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4528         : LDeferredCode(codegen), instr_(instr) { }
   4529     virtual void Generate() OVERRIDE {
   4530       codegen()->DoDeferredStringCharCodeAt(instr_);
   4531     }
   4532     virtual LInstruction* instr() OVERRIDE { return instr_; }
   4533    private:
   4534     LStringCharCodeAt* instr_;
   4535   };
   4536 
   4537   DeferredStringCharCodeAt* deferred =
   4538       new(zone()) DeferredStringCharCodeAt(this, instr);
   4539 
   4540   StringCharLoadGenerator::Generate(masm(),
   4541                                     ToRegister(instr->string()),
   4542                                     ToRegister(instr->index()),
   4543                                     ToRegister(instr->result()),
   4544                                     deferred->entry());
   4545   __ bind(deferred->exit());
   4546 }
   4547 
   4548 
   4549 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4550   Register string = ToRegister(instr->string());
   4551   Register result = ToRegister(instr->result());
   4552   Register scratch = scratch0();
   4553 
   4554   // TODO(3095996): Get rid of this. For now, we need to make the
   4555   // result register contain a valid pointer because it is already
   4556   // contained in the register pointer map.
   4557   __ mov(result, Operand::Zero());
   4558 
   4559   PushSafepointRegistersScope scope(this);
   4560   __ push(string);
   4561   // Push the index as a smi. This is safe because of the checks in
   4562   // DoStringCharCodeAt above.
   4563   if (instr->index()->IsConstantOperand()) {
   4564     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4565     __ mov(scratch, Operand(Smi::FromInt(const_index)));
   4566     __ push(scratch);
   4567   } else {
   4568     Register index = ToRegister(instr->index());
   4569     __ SmiTag(index);
   4570     __ push(index);
   4571   }
   4572   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
   4573                           instr->context());
   4574   __ AssertSmi(r0);
   4575   __ SmiUntag(r0);
   4576   __ StoreToSafepointRegisterSlot(r0, result);
   4577 }
   4578 
   4579 
   4580 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4581   class DeferredStringCharFromCode FINAL : public LDeferredCode {
   4582    public:
   4583     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4584         : LDeferredCode(codegen), instr_(instr) { }
   4585     virtual void Generate() OVERRIDE {
   4586       codegen()->DoDeferredStringCharFromCode(instr_);
   4587     }
   4588     virtual LInstruction* instr() OVERRIDE { return instr_; }
   4589    private:
   4590     LStringCharFromCode* instr_;
   4591   };
   4592 
   4593   DeferredStringCharFromCode* deferred =
   4594       new(zone()) DeferredStringCharFromCode(this, instr);
   4595 
   4596   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   4597   Register char_code = ToRegister(instr->char_code());
   4598   Register result = ToRegister(instr->result());
   4599   DCHECK(!char_code.is(result));
   4600 
   4601   __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
   4602   __ b(hi, deferred->entry());
   4603   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4604   __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
   4605   __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4606   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4607   __ cmp(result, ip);
   4608   __ b(eq, deferred->entry());
   4609   __ bind(deferred->exit());
   4610 }
   4611 
   4612 
   4613 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4614   Register char_code = ToRegister(instr->char_code());
   4615   Register result = ToRegister(instr->result());
   4616 
   4617   // TODO(3095996): Get rid of this. For now, we need to make the
   4618   // result register contain a valid pointer because it is already
   4619   // contained in the register pointer map.
   4620   __ mov(result, Operand::Zero());
   4621 
   4622   PushSafepointRegistersScope scope(this);
   4623   __ SmiTag(char_code);
   4624   __ push(char_code);
   4625   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
   4626   __ StoreToSafepointRegisterSlot(r0, result);
   4627 }
   4628 
   4629 
   4630 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4631   LOperand* input = instr->value();
   4632   DCHECK(input->IsRegister() || input->IsStackSlot());
   4633   LOperand* output = instr->result();
   4634   DCHECK(output->IsDoubleRegister());
   4635   SwVfpRegister single_scratch = double_scratch0().low();
   4636   if (input->IsStackSlot()) {
   4637     Register scratch = scratch0();
   4638     __ ldr(scratch, ToMemOperand(input));
   4639     __ vmov(single_scratch, scratch);
   4640   } else {
   4641     __ vmov(single_scratch, ToRegister(input));
   4642   }
   4643   __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
   4644 }
   4645 
   4646 
   4647 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4648   LOperand* input = instr->value();
   4649   LOperand* output = instr->result();
   4650 
   4651   SwVfpRegister flt_scratch = double_scratch0().low();
   4652   __ vmov(flt_scratch, ToRegister(input));
   4653   __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
   4654 }
   4655 
   4656 
   4657 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4658   class DeferredNumberTagI FINAL : public LDeferredCode {
   4659    public:
   4660     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4661         : LDeferredCode(codegen), instr_(instr) { }
   4662     virtual void Generate() OVERRIDE {
   4663       codegen()->DoDeferredNumberTagIU(instr_,
   4664                                        instr_->value(),
   4665                                        instr_->temp1(),
   4666                                        instr_->temp2(),
   4667                                        SIGNED_INT32);
   4668     }
   4669     virtual LInstruction* instr() OVERRIDE { return instr_; }
   4670    private:
   4671     LNumberTagI* instr_;
   4672   };
   4673 
   4674   Register src = ToRegister(instr->value());
   4675   Register dst = ToRegister(instr->result());
   4676 
   4677   DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
   4678   __ SmiTag(dst, src, SetCC);
   4679   __ b(vs, deferred->entry());
   4680   __ bind(deferred->exit());
   4681 }
   4682 
   4683 
   4684 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4685   class DeferredNumberTagU FINAL : public LDeferredCode {
   4686    public:
   4687     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4688         : LDeferredCode(codegen), instr_(instr) { }
   4689     virtual void Generate() OVERRIDE {
   4690       codegen()->DoDeferredNumberTagIU(instr_,
   4691                                        instr_->value(),
   4692                                        instr_->temp1(),
   4693                                        instr_->temp2(),
   4694                                        UNSIGNED_INT32);
   4695     }
   4696     virtual LInstruction* instr() OVERRIDE { return instr_; }
   4697    private:
   4698     LNumberTagU* instr_;
   4699   };
   4700 
   4701   Register input = ToRegister(instr->value());
   4702   Register result = ToRegister(instr->result());
   4703 
   4704   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4705   __ cmp(input, Operand(Smi::kMaxValue));
   4706   __ b(hi, deferred->entry());
   4707   __ SmiTag(result, input);
   4708   __ bind(deferred->exit());
   4709 }
   4710 
   4711 
   4712 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
   4713                                      LOperand* value,
   4714                                      LOperand* temp1,
   4715                                      LOperand* temp2,
   4716                                      IntegerSignedness signedness) {
   4717   Label done, slow;
   4718   Register src = ToRegister(value);
   4719   Register dst = ToRegister(instr->result());
   4720   Register tmp1 = scratch0();
   4721   Register tmp2 = ToRegister(temp1);
   4722   Register tmp3 = ToRegister(temp2);
   4723   LowDwVfpRegister dbl_scratch = double_scratch0();
   4724 
   4725   if (signedness == SIGNED_INT32) {
   4726     // There was overflow, so bits 30 and 31 of the original integer
   4727     // disagree. Try to allocate a heap number in new space and store
   4728     // the value in there. If that fails, call the runtime system.
   4729     if (dst.is(src)) {
   4730       __ SmiUntag(src, dst);
   4731       __ eor(src, src, Operand(0x80000000));
   4732     }
   4733     __ vmov(dbl_scratch.low(), src);
   4734     __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
   4735   } else {
   4736     __ vmov(dbl_scratch.low(), src);
   4737     __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
   4738   }
   4739 
   4740   if (FLAG_inline_new) {
   4741     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
   4742     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
   4743     __ b(&done);
   4744   }
   4745 
   4746   // Slow case: Call the runtime system to do the number allocation.
   4747   __ bind(&slow);
   4748   {
   4749     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4750     // result register is stored, as this register is in the pointer map, but
   4751     // contains an integer value.
   4752     __ mov(dst, Operand::Zero());
   4753 
   4754     // Preserve the value of all registers.
   4755     PushSafepointRegistersScope scope(this);
   4756 
   4757     // NumberTagI and NumberTagD use the context from the frame, rather than
   4758     // the environment's HContext or HInlinedContext value.
   4759     // They only call Runtime::kAllocateHeapNumber.
   4760     // The corresponding HChange instructions are added in a phase that does
   4761     // not have easy access to the local context.
   4762     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4763     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4764     RecordSafepointWithRegisters(
   4765         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4766     __ sub(r0, r0, Operand(kHeapObjectTag));
   4767     __ StoreToSafepointRegisterSlot(r0, dst);
   4768   }
   4769 
   4770   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4771   // number.
   4772   __ bind(&done);
   4773   __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
   4774   __ add(dst, dst, Operand(kHeapObjectTag));
   4775 }
   4776 
   4777 
   4778 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4779   class DeferredNumberTagD FINAL : public LDeferredCode {
   4780    public:
   4781     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4782         : LDeferredCode(codegen), instr_(instr) { }
   4783     virtual void Generate() OVERRIDE {
   4784       codegen()->DoDeferredNumberTagD(instr_);
   4785     }
   4786     virtual LInstruction* instr() OVERRIDE { return instr_; }
   4787    private:
   4788     LNumberTagD* instr_;
   4789   };
   4790 
   4791   DwVfpRegister input_reg = ToDoubleRegister(instr->value());
   4792   Register scratch = scratch0();
   4793   Register reg = ToRegister(instr->result());
   4794   Register temp1 = ToRegister(instr->temp());
   4795   Register temp2 = ToRegister(instr->temp2());
   4796 
   4797   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4798   if (FLAG_inline_new) {
   4799     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4800     // We want the untagged address first for performance
   4801     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
   4802                           DONT_TAG_RESULT);
   4803   } else {
   4804     __ jmp(deferred->entry());
   4805   }
   4806   __ bind(deferred->exit());
   4807   __ vstr(input_reg, reg, HeapNumber::kValueOffset);
   4808   // Now that we have finished with the object's real address tag it
   4809   __ add(reg, reg, Operand(kHeapObjectTag));
   4810 }
   4811 
   4812 
   4813 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4814   // TODO(3095996): Get rid of this. For now, we need to make the
   4815   // result register contain a valid pointer because it is already
   4816   // contained in the register pointer map.
   4817   Register reg = ToRegister(instr->result());
   4818   __ mov(reg, Operand::Zero());
   4819 
   4820   PushSafepointRegistersScope scope(this);
   4821   // NumberTagI and NumberTagD use the context from the frame, rather than
   4822   // the environment's HContext or HInlinedContext value.
   4823   // They only call Runtime::kAllocateHeapNumber.
   4824   // The corresponding HChange instructions are added in a phase that does
   4825   // not have easy access to the local context.
   4826   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4827   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4828   RecordSafepointWithRegisters(
   4829       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4830   __ sub(r0, r0, Operand(kHeapObjectTag));
   4831   __ StoreToSafepointRegisterSlot(r0, reg);
   4832 }
   4833 
   4834 
   4835 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4836   HChange* hchange = instr->hydrogen();
   4837   Register input = ToRegister(instr->value());
   4838   Register output = ToRegister(instr->result());
   4839   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4840       hchange->value()->CheckFlag(HValue::kUint32)) {
   4841     __ tst(input, Operand(0xc0000000));
   4842     DeoptimizeIf(ne, instr);
   4843   }
   4844   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4845       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4846     __ SmiTag(output, input, SetCC);
   4847     DeoptimizeIf(vs, instr);
   4848   } else {
   4849     __ SmiTag(output, input);
   4850   }
   4851 }
   4852 
   4853 
   4854 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4855   Register input = ToRegister(instr->value());
   4856   Register result = ToRegister(instr->result());
   4857   if (instr->needs_check()) {
   4858     STATIC_ASSERT(kHeapObjectTag == 1);
   4859     // If the input is a HeapObject, SmiUntag will set the carry flag.
   4860     __ SmiUntag(result, input, SetCC);
   4861     DeoptimizeIf(cs, instr);
   4862   } else {
   4863     __ SmiUntag(result, input);
   4864   }
   4865 }
   4866 
   4867 
   4868 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
   4869                                 DwVfpRegister result_reg,
   4870                                 NumberUntagDMode mode) {
   4871   bool can_convert_undefined_to_nan =
   4872       instr->hydrogen()->can_convert_undefined_to_nan();
   4873   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
   4874 
   4875   Register scratch = scratch0();
   4876   SwVfpRegister flt_scratch = double_scratch0().low();
   4877   DCHECK(!result_reg.is(double_scratch0()));
   4878   Label convert, load_smi, done;
   4879   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4880     // Smi check.
   4881     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4882     // Heap number map check.
   4883     __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4884     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   4885     __ cmp(scratch, Operand(ip));
   4886     if (can_convert_undefined_to_nan) {
   4887       __ b(ne, &convert);
   4888     } else {
   4889       DeoptimizeIf(ne, instr);
   4890     }
   4891     // load heap number
   4892     __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
   4893     if (deoptimize_on_minus_zero) {
   4894       __ VmovLow(scratch, result_reg);
   4895       __ cmp(scratch, Operand::Zero());
   4896       __ b(ne, &done);
   4897       __ VmovHigh(scratch, result_reg);
   4898       __ cmp(scratch, Operand(HeapNumber::kSignMask));
   4899       DeoptimizeIf(eq, instr);
   4900     }
   4901     __ jmp(&done);
   4902     if (can_convert_undefined_to_nan) {
   4903       __ bind(&convert);
   4904       // Convert undefined (and hole) to NaN.
   4905       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4906       __ cmp(input_reg, Operand(ip));
   4907       DeoptimizeIf(ne, instr);
   4908       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4909       __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
   4910       __ jmp(&done);
   4911     }
   4912   } else {
   4913     __ SmiUntag(scratch, input_reg);
   4914     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4915   }
   4916   // Smi to double register conversion
   4917   __ bind(&load_smi);
   4918   // scratch: untagged value of input_reg
   4919   __ vmov(flt_scratch, scratch);
   4920   __ vcvt_f64_s32(result_reg, flt_scratch);
   4921   __ bind(&done);
   4922 }
   4923 
   4924 
   4925 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4926   Register input_reg = ToRegister(instr->value());
   4927   Register scratch1 = scratch0();
   4928   Register scratch2 = ToRegister(instr->temp());
   4929   LowDwVfpRegister double_scratch = double_scratch0();
   4930   DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4931 
   4932   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4933   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4934 
   4935   Label done;
   4936 
   4937   // The input was optimistically untagged; revert it.
   4938   // The carry flag is set when we reach this deferred code as we just executed
   4939   // SmiUntag(heap_object, SetCC)
   4940   STATIC_ASSERT(kHeapObjectTag == 1);
   4941   __ adc(scratch2, input_reg, Operand(input_reg));
   4942 
   4943   // Heap number map check.
   4944   __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
   4945   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   4946   __ cmp(scratch1, Operand(ip));
   4947 
   4948   if (instr->truncating()) {
   4949     // Performs a truncating conversion of a floating point number as used by
   4950     // the JS bitwise operations.
   4951     Label no_heap_number, check_bools, check_false;
   4952     __ b(ne, &no_heap_number);
   4953     __ TruncateHeapNumberToI(input_reg, scratch2);
   4954     __ b(&done);
   4955 
   4956     // Check for Oddballs. Undefined/False is converted to zero and True to one
   4957     // for truncating conversions.
   4958     __ bind(&no_heap_number);
   4959     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4960     __ cmp(scratch2, Operand(ip));
   4961     __ b(ne, &check_bools);
   4962     __ mov(input_reg, Operand::Zero());
   4963     __ b(&done);
   4964 
   4965     __ bind(&check_bools);
   4966     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
   4967     __ cmp(scratch2, Operand(ip));
   4968     __ b(ne, &check_false);
   4969     __ mov(input_reg, Operand(1));
   4970     __ b(&done);
   4971 
   4972     __ bind(&check_false);
   4973     __ LoadRoot(ip, Heap::kFalseValueRootIndex);
   4974     __ cmp(scratch2, Operand(ip));
   4975     DeoptimizeIf(ne, instr, "cannot truncate");
   4976     __ mov(input_reg, Operand::Zero());
   4977   } else {
   4978     DeoptimizeIf(ne, instr, "not a heap number");
   4979 
   4980     __ sub(ip, scratch2, Operand(kHeapObjectTag));
   4981     __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
   4982     __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
   4983     DeoptimizeIf(ne, instr, "lost precision or NaN");
   4984 
   4985     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4986       __ cmp(input_reg, Operand::Zero());
   4987       __ b(ne, &done);
   4988       __ VmovHigh(scratch1, double_scratch2);
   4989       __ tst(scratch1, Operand(HeapNumber::kSignMask));
   4990       DeoptimizeIf(ne, instr, "minus zero");
   4991     }
   4992   }
   4993   __ bind(&done);
   4994 }
   4995 
   4996 
   4997 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4998   class DeferredTaggedToI FINAL : public LDeferredCode {
   4999    public:
   5000     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   5001         : LDeferredCode(codegen), instr_(instr) { }
   5002     virtual void Generate() OVERRIDE {
   5003       codegen()->DoDeferredTaggedToI(instr_);
   5004     }
   5005     virtual LInstruction* instr() OVERRIDE { return instr_; }
   5006    private:
   5007     LTaggedToI* instr_;
   5008   };
   5009 
   5010   LOperand* input = instr->value();
   5011   DCHECK(input->IsRegister());
   5012   DCHECK(input->Equals(instr->result()));
   5013 
   5014   Register input_reg = ToRegister(input);
   5015 
   5016   if (instr->hydrogen()->value()->representation().IsSmi()) {
   5017     __ SmiUntag(input_reg);
   5018   } else {
   5019     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   5020 
   5021     // Optimistically untag the input.
   5022     // If the input is a HeapObject, SmiUntag will set the carry flag.
   5023     __ SmiUntag(input_reg, SetCC);
   5024     // Branch to deferred code if the input was tagged.
   5025     // The deferred code will take care of restoring the tag.
   5026     __ b(cs, deferred->entry());
   5027     __ bind(deferred->exit());
   5028   }
   5029 }
   5030 
   5031 
   5032 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   5033   LOperand* input = instr->value();
   5034   DCHECK(input->IsRegister());
   5035   LOperand* result = instr->result();
   5036   DCHECK(result->IsDoubleRegister());
   5037 
   5038   Register input_reg = ToRegister(input);
   5039   DwVfpRegister result_reg = ToDoubleRegister(result);
   5040 
   5041   HValue* value = instr->hydrogen()->value();
   5042   NumberUntagDMode mode = value->representation().IsSmi()
   5043       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   5044 
   5045   EmitNumberUntagD(instr, input_reg, result_reg, mode);
   5046 }
   5047 
   5048 
   5049 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   5050   Register result_reg = ToRegister(instr->result());
   5051   Register scratch1 = scratch0();
   5052   DwVfpRegister double_input = ToDoubleRegister(instr->value());
   5053   LowDwVfpRegister double_scratch = double_scratch0();
   5054 
   5055   if (instr->truncating()) {
   5056     __ TruncateDoubleToI(result_reg, double_input);
   5057   } else {
   5058     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
   5059     // Deoptimize if the input wasn't a int32 (inside a double).
   5060     DeoptimizeIf(ne, instr);
   5061     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5062       Label done;
   5063       __ cmp(result_reg, Operand::Zero());
   5064       __ b(ne, &done);
   5065       __ VmovHigh(scratch1, double_input);
   5066       __ tst(scratch1, Operand(HeapNumber::kSignMask));
   5067       DeoptimizeIf(ne, instr);
   5068       __ bind(&done);
   5069     }
   5070   }
   5071 }
   5072 
   5073 
   5074 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   5075   Register result_reg = ToRegister(instr->result());
   5076   Register scratch1 = scratch0();
   5077   DwVfpRegister double_input = ToDoubleRegister(instr->value());
   5078   LowDwVfpRegister double_scratch = double_scratch0();
   5079 
   5080   if (instr->truncating()) {
   5081     __ TruncateDoubleToI(result_reg, double_input);
   5082   } else {
   5083     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
   5084     // Deoptimize if the input wasn't a int32 (inside a double).
   5085     DeoptimizeIf(ne, instr);
   5086     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5087       Label done;
   5088       __ cmp(result_reg, Operand::Zero());
   5089       __ b(ne, &done);
   5090       __ VmovHigh(scratch1, double_input);
   5091       __ tst(scratch1, Operand(HeapNumber::kSignMask));
   5092       DeoptimizeIf(ne, instr);
   5093       __ bind(&done);
   5094     }
   5095   }
   5096   __ SmiTag(result_reg, SetCC);
   5097   DeoptimizeIf(vs, instr);
   5098 }
   5099 
   5100 
   5101 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   5102   LOperand* input = instr->value();
   5103   __ SmiTst(ToRegister(input));
   5104   DeoptimizeIf(ne, instr);
   5105 }
   5106 
   5107 
   5108 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   5109   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   5110     LOperand* input = instr->value();
   5111     __ SmiTst(ToRegister(input));
   5112     DeoptimizeIf(eq, instr);
   5113   }
   5114 }
   5115 
   5116 
   5117 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   5118   Register input = ToRegister(instr->value());
   5119   Register scratch = scratch0();
   5120 
   5121   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5122   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   5123 
   5124   if (instr->hydrogen()->is_interval_check()) {
   5125     InstanceType first;
   5126     InstanceType last;
   5127     instr->hydrogen()->GetCheckInterval(&first, &last);
   5128 
   5129     __ cmp(scratch, Operand(first));
   5130 
   5131     // If there is only one type in the interval check for equality.
   5132     if (first == last) {
   5133       DeoptimizeIf(ne, instr);
   5134     } else {
   5135       DeoptimizeIf(lo, instr);
   5136       // Omit check for the last type.
   5137       if (last != LAST_TYPE) {
   5138         __ cmp(scratch, Operand(last));
   5139         DeoptimizeIf(hi, instr);
   5140       }
   5141     }
   5142   } else {
   5143     uint8_t mask;
   5144     uint8_t tag;
   5145     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   5146 
   5147     if (base::bits::IsPowerOfTwo32(mask)) {
   5148       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
   5149       __ tst(scratch, Operand(mask));
   5150       DeoptimizeIf(tag == 0 ? ne : eq, instr);
   5151     } else {
   5152       __ and_(scratch, scratch, Operand(mask));
   5153       __ cmp(scratch, Operand(tag));
   5154       DeoptimizeIf(ne, instr);
   5155     }
   5156   }
   5157 }
   5158 
   5159 
   5160 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   5161   Register reg = ToRegister(instr->value());
   5162   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   5163   AllowDeferredHandleDereference smi_check;
   5164   if (isolate()->heap()->InNewSpace(*object)) {
   5165     Register reg = ToRegister(instr->value());
   5166     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   5167     __ mov(ip, Operand(Handle<Object>(cell)));
   5168     __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
   5169     __ cmp(reg, ip);
   5170   } else {
   5171     __ cmp(reg, Operand(object));
   5172   }
   5173   DeoptimizeIf(ne, instr);
   5174 }
   5175 
   5176 
   5177 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   5178   {
   5179     PushSafepointRegistersScope scope(this);
   5180     __ push(object);
   5181     __ mov(cp, Operand::Zero());
   5182     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   5183     RecordSafepointWithRegisters(
   5184         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   5185     __ StoreToSafepointRegisterSlot(r0, scratch0());
   5186   }
   5187   __ tst(scratch0(), Operand(kSmiTagMask));
   5188   DeoptimizeIf(eq, instr);
   5189 }
   5190 
   5191 
   5192 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   5193   class DeferredCheckMaps FINAL : public LDeferredCode {
   5194    public:
   5195     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   5196         : LDeferredCode(codegen), instr_(instr), object_(object) {
   5197       SetExit(check_maps());
   5198     }
   5199     virtual void Generate() OVERRIDE {
   5200       codegen()->DoDeferredInstanceMigration(instr_, object_);
   5201     }
   5202     Label* check_maps() { return &check_maps_; }
   5203     virtual LInstruction* instr() OVERRIDE { return instr_; }
   5204    private:
   5205     LCheckMaps* instr_;
   5206     Label check_maps_;
   5207     Register object_;
   5208   };
   5209 
   5210   if (instr->hydrogen()->IsStabilityCheck()) {
   5211     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5212     for (int i = 0; i < maps->size(); ++i) {
   5213       AddStabilityDependency(maps->at(i).handle());
   5214     }
   5215     return;
   5216   }
   5217 
   5218   Register map_reg = scratch0();
   5219 
   5220   LOperand* input = instr->value();
   5221   DCHECK(input->IsRegister());
   5222   Register reg = ToRegister(input);
   5223 
   5224   __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
   5225 
   5226   DeferredCheckMaps* deferred = NULL;
   5227   if (instr->hydrogen()->HasMigrationTarget()) {
   5228     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   5229     __ bind(deferred->check_maps());
   5230   }
   5231 
   5232   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5233   Label success;
   5234   for (int i = 0; i < maps->size() - 1; i++) {
   5235     Handle<Map> map = maps->at(i).handle();
   5236     __ CompareMap(map_reg, map, &success);
   5237     __ b(eq, &success);
   5238   }
   5239 
   5240   Handle<Map> map = maps->at(maps->size() - 1).handle();
   5241   __ CompareMap(map_reg, map, &success);
   5242   if (instr->hydrogen()->HasMigrationTarget()) {
   5243     __ b(ne, deferred->entry());
   5244   } else {
   5245     DeoptimizeIf(ne, instr);
   5246   }
   5247 
   5248   __ bind(&success);
   5249 }
   5250 
   5251 
   5252 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5253   DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
   5254   Register result_reg = ToRegister(instr->result());
   5255   __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
   5256 }
   5257 
   5258 
   5259 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5260   Register unclamped_reg = ToRegister(instr->unclamped());
   5261   Register result_reg = ToRegister(instr->result());
   5262   __ ClampUint8(result_reg, unclamped_reg);
   5263 }
   5264 
   5265 
   5266 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5267   Register scratch = scratch0();
   5268   Register input_reg = ToRegister(instr->unclamped());
   5269   Register result_reg = ToRegister(instr->result());
   5270   DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
   5271   Label is_smi, done, heap_number;
   5272 
   5273   // Both smi and heap number cases are handled.
   5274   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
   5275 
   5276   // Check for heap number
   5277   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5278   __ cmp(scratch, Operand(factory()->heap_number_map()));
   5279   __ b(eq, &heap_number);
   5280 
   5281   // Check for undefined. Undefined is converted to zero for clamping
   5282   // conversions.
   5283   __ cmp(input_reg, Operand(factory()->undefined_value()));
   5284   DeoptimizeIf(ne, instr);
   5285   __ mov(result_reg, Operand::Zero());
   5286   __ jmp(&done);
   5287 
   5288   // Heap number
   5289   __ bind(&heap_number);
   5290   __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   5291   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
   5292   __ jmp(&done);
   5293 
   5294   // smi
   5295   __ bind(&is_smi);
   5296   __ ClampUint8(result_reg, result_reg);
   5297 
   5298   __ bind(&done);
   5299 }
   5300 
   5301 
   5302 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
   5303   DwVfpRegister value_reg = ToDoubleRegister(instr->value());
   5304   Register result_reg = ToRegister(instr->result());
   5305   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
   5306     __ VmovHigh(result_reg, value_reg);
   5307   } else {
   5308     __ VmovLow(result_reg, value_reg);
   5309   }
   5310 }
   5311 
   5312 
   5313 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
   5314   Register hi_reg = ToRegister(instr->hi());
   5315   Register lo_reg = ToRegister(instr->lo());
   5316   DwVfpRegister result_reg = ToDoubleRegister(instr->result());
   5317   __ VmovHigh(result_reg, hi_reg);
   5318   __ VmovLow(result_reg, lo_reg);
   5319 }
   5320 
   5321 
   5322 void LCodeGen::DoAllocate(LAllocate* instr) {
   5323   class DeferredAllocate FINAL : public LDeferredCode {
   5324    public:
   5325     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5326         : LDeferredCode(codegen), instr_(instr) { }
   5327     virtual void Generate() OVERRIDE {
   5328       codegen()->DoDeferredAllocate(instr_);
   5329     }
   5330     virtual LInstruction* instr() OVERRIDE { return instr_; }
   5331    private:
   5332     LAllocate* instr_;
   5333   };
   5334 
   5335   DeferredAllocate* deferred =
   5336       new(zone()) DeferredAllocate(this, instr);
   5337 
   5338   Register result = ToRegister(instr->result());
   5339   Register scratch = ToRegister(instr->temp1());
   5340   Register scratch2 = ToRegister(instr->temp2());
   5341 
   5342   // Allocate memory for the object.
   5343   AllocationFlags flags = TAG_OBJECT;
   5344   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5345     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5346   }
   5347   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5348     DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5349     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5350     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   5351   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5352     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5353     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   5354   }
   5355 
   5356   if (instr->size()->IsConstantOperand()) {
   5357     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5358     if (size <= Page::kMaxRegularHeapObjectSize) {
   5359       __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5360     } else {
   5361       __ jmp(deferred->entry());
   5362     }
   5363   } else {
   5364     Register size = ToRegister(instr->size());
   5365     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5366   }
   5367 
   5368   __ bind(deferred->exit());
   5369 
   5370   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5371     STATIC_ASSERT(kHeapObjectTag == 1);
   5372     if (instr->size()->IsConstantOperand()) {
   5373       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5374       __ mov(scratch, Operand(size - kHeapObjectTag));
   5375     } else {
   5376       __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
   5377     }
   5378     __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5379     Label loop;
   5380     __ bind(&loop);
   5381     __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
   5382     __ str(scratch2, MemOperand(result, scratch));
   5383     __ b(ge, &loop);
   5384   }
   5385 }
   5386 
   5387 
   5388 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5389   Register result = ToRegister(instr->result());
   5390 
   5391   // TODO(3095996): Get rid of this. For now, we need to make the
   5392   // result register contain a valid pointer because it is already
   5393   // contained in the register pointer map.
   5394   __ mov(result, Operand(Smi::FromInt(0)));
   5395 
   5396   PushSafepointRegistersScope scope(this);
   5397   if (instr->size()->IsRegister()) {
   5398     Register size = ToRegister(instr->size());
   5399     DCHECK(!size.is(result));
   5400     __ SmiTag(size);
   5401     __ push(size);
   5402   } else {
   5403     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5404     if (size >= 0 && size <= Smi::kMaxValue) {
   5405       __ Push(Smi::FromInt(size));
   5406     } else {
   5407       // We should never get here at runtime => abort
   5408       __ stop("invalid allocation size");
   5409       return;
   5410     }
   5411   }
   5412 
   5413   int flags = AllocateDoubleAlignFlag::encode(
   5414       instr->hydrogen()->MustAllocateDoubleAligned());
   5415   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5416     DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5417     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5418     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   5419   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5420     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5421     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   5422   } else {
   5423     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5424   }
   5425   __ Push(Smi::FromInt(flags));
   5426 
   5427   CallRuntimeFromDeferred(
   5428       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   5429   __ StoreToSafepointRegisterSlot(r0, result);
   5430 }
   5431 
   5432 
   5433 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5434   DCHECK(ToRegister(instr->value()).is(r0));
   5435   __ push(r0);
   5436   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5437 }
   5438 
   5439 
   5440 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
   5441   DCHECK(ToRegister(instr->context()).is(cp));
   5442   Label materialized;
   5443   // Registers will be used as follows:
   5444   // r6 = literals array.
   5445   // r1 = regexp literal.
   5446   // r0 = regexp literal clone.
   5447   // r2-5 are used as temporaries.
   5448   int literal_offset =
   5449       FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
   5450   __ Move(r6, instr->hydrogen()->literals());
   5451   __ ldr(r1, FieldMemOperand(r6, literal_offset));
   5452   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   5453   __ cmp(r1, ip);
   5454   __ b(ne, &materialized);
   5455 
   5456   // Create regexp literal using runtime function
   5457   // Result will be in r0.
   5458   __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
   5459   __ mov(r4, Operand(instr->hydrogen()->pattern()));
   5460   __ mov(r3, Operand(instr->hydrogen()->flags()));
   5461   __ Push(r6, r5, r4, r3);
   5462   CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   5463   __ mov(r1, r0);
   5464 
   5465   __ bind(&materialized);
   5466   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
   5467   Label allocated, runtime_allocate;
   5468 
   5469   __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
   5470   __ jmp(&allocated);
   5471 
   5472   __ bind(&runtime_allocate);
   5473   __ mov(r0, Operand(Smi::FromInt(size)));
   5474   __ Push(r1, r0);
   5475   CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   5476   __ pop(r1);
   5477 
   5478   __ bind(&allocated);
   5479   // Copy the content into the newly allocated memory.
   5480   __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
   5481 }
   5482 
   5483 
   5484 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
   5485   DCHECK(ToRegister(instr->context()).is(cp));
   5486   // Use the fast case closure allocation code that allocates in new
   5487   // space for nested functions that don't need literals cloning.
   5488   bool pretenure = instr->hydrogen()->pretenure();
   5489   if (!pretenure && instr->hydrogen()->has_no_literals()) {
   5490     FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
   5491                             instr->hydrogen()->kind());
   5492     __ mov(r2, Operand(instr->hydrogen()->shared_info()));
   5493     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5494   } else {
   5495     __ mov(r2, Operand(instr->hydrogen()->shared_info()));
   5496     __ mov(r1, Operand(pretenure ? factory()->true_value()
   5497                                  : factory()->false_value()));
   5498     __ Push(cp, r2, r1);
   5499     CallRuntime(Runtime::kNewClosure, 3, instr);
   5500   }
   5501 }
   5502 
   5503 
   5504 void LCodeGen::DoTypeof(LTypeof* instr) {
   5505   Register input = ToRegister(instr->value());
   5506   __ push(input);
   5507   CallRuntime(Runtime::kTypeof, 1, instr);
   5508 }
   5509 
   5510 
   5511 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5512   Register input = ToRegister(instr->value());
   5513 
   5514   Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
   5515                                                   instr->FalseLabel(chunk_),
   5516                                                   input,
   5517                                                   instr->type_literal());
   5518   if (final_branch_condition != kNoCondition) {
   5519     EmitBranch(instr, final_branch_condition);
   5520   }
   5521 }
   5522 
   5523 
   5524 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   5525                                  Label* false_label,
   5526                                  Register input,
   5527                                  Handle<String> type_name) {
   5528   Condition final_branch_condition = kNoCondition;
   5529   Register scratch = scratch0();
   5530   Factory* factory = isolate()->factory();
   5531   if (String::Equals(type_name, factory->number_string())) {
   5532     __ JumpIfSmi(input, true_label);
   5533     __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5534     __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   5535     final_branch_condition = eq;
   5536 
   5537   } else if (String::Equals(type_name, factory->string_string())) {
   5538     __ JumpIfSmi(input, false_label);
   5539     __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
   5540     __ b(ge, false_label);
   5541     __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5542     __ tst(scratch, Operand(1 << Map::kIsUndetectable));
   5543     final_branch_condition = eq;
   5544 
   5545   } else if (String::Equals(type_name, factory->symbol_string())) {
   5546     __ JumpIfSmi(input, false_label);
   5547     __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
   5548     final_branch_condition = eq;
   5549 
   5550   } else if (String::Equals(type_name, factory->boolean_string())) {
   5551     __ CompareRoot(input, Heap::kTrueValueRootIndex);
   5552     __ b(eq, true_label);
   5553     __ CompareRoot(input, Heap::kFalseValueRootIndex);
   5554     final_branch_condition = eq;
   5555 
   5556   } else if (String::Equals(type_name, factory->undefined_string())) {
   5557     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
   5558     __ b(eq, true_label);
   5559     __ JumpIfSmi(input, false_label);
   5560     // Check for undetectable objects => true.
   5561     __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5562     __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5563     __ tst(scratch, Operand(1 << Map::kIsUndetectable));
   5564     final_branch_condition = ne;
   5565 
   5566   } else if (String::Equals(type_name, factory->function_string())) {
   5567     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   5568     Register type_reg = scratch;
   5569     __ JumpIfSmi(input, false_label);
   5570     __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
   5571     __ b(eq, true_label);
   5572     __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
   5573     final_branch_condition = eq;
   5574 
   5575   } else if (String::Equals(type_name, factory->object_string())) {
   5576     Register map = scratch;
   5577     __ JumpIfSmi(input, false_label);
   5578     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5579     __ b(eq, true_label);
   5580     __ CheckObjectTypeRange(input,
   5581                             map,
   5582                             FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
   5583                             LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
   5584                             false_label);
   5585     // Check for undetectable objects => false.
   5586     __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
   5587     __ tst(scratch, Operand(1 << Map::kIsUndetectable));
   5588     final_branch_condition = eq;
   5589 
   5590   } else {
   5591     __ b(false_label);
   5592   }
   5593 
   5594   return final_branch_condition;
   5595 }
   5596 
   5597 
   5598 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   5599   Register temp1 = ToRegister(instr->temp());
   5600 
   5601   EmitIsConstructCall(temp1, scratch0());
   5602   EmitBranch(instr, eq);
   5603 }
   5604 
   5605 
   5606 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
   5607   DCHECK(!temp1.is(temp2));
   5608   // Get the frame pointer for the calling frame.
   5609   __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   5610 
   5611   // Skip the arguments adaptor frame if it exists.
   5612   __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
   5613   __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   5614   __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
   5615 
   5616   // Check the marker in the calling frame.
   5617   __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
   5618   __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
   5619 }
   5620 
   5621 
   5622 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5623   if (!info()->IsStub()) {
   5624     // Ensure that we have enough space after the previous lazy-bailout
   5625     // instruction for patching the code here.
   5626     int current_pc = masm()->pc_offset();
   5627     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5628       // Block literal pool emission for duration of padding.
   5629       Assembler::BlockConstPoolScope block_const_pool(masm());
   5630       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5631       DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
   5632       while (padding_size > 0) {
   5633         __ nop();
   5634         padding_size -= Assembler::kInstrSize;
   5635       }
   5636     }
   5637   }
   5638   last_lazy_deopt_pc_ = masm()->pc_offset();
   5639 }
   5640 
   5641 
   5642 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5643   last_lazy_deopt_pc_ = masm()->pc_offset();
   5644   DCHECK(instr->HasEnvironment());
   5645   LEnvironment* env = instr->environment();
   5646   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5647   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5648 }
   5649 
   5650 
   5651 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5652   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5653   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5654   // needed return address), even though the implementation of LAZY and EAGER is
   5655   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5656   // the special case below.
   5657   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5658     type = Deoptimizer::LAZY;
   5659   }
   5660 
   5661   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
   5662 }
   5663 
   5664 
   5665 void LCodeGen::DoDummy(LDummy* instr) {
   5666   // Nothing to see here, move on!
   5667 }
   5668 
   5669 
   5670 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5671   // Nothing to see here, move on!
   5672 }
   5673 
   5674 
   5675 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5676   PushSafepointRegistersScope scope(this);
   5677   LoadContextFromDeferred(instr->context());
   5678   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5679   RecordSafepointWithLazyDeopt(
   5680       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5681   DCHECK(instr->HasEnvironment());
   5682   LEnvironment* env = instr->environment();
   5683   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5684 }
   5685 
   5686 
   5687 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5688   class DeferredStackCheck FINAL : public LDeferredCode {
   5689    public:
   5690     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5691         : LDeferredCode(codegen), instr_(instr) { }
   5692     virtual void Generate() OVERRIDE {
   5693       codegen()->DoDeferredStackCheck(instr_);
   5694     }
   5695     virtual LInstruction* instr() OVERRIDE { return instr_; }
   5696    private:
   5697     LStackCheck* instr_;
   5698   };
   5699 
   5700   DCHECK(instr->HasEnvironment());
   5701   LEnvironment* env = instr->environment();
   5702   // There is no LLazyBailout instruction for stack-checks. We have to
   5703   // prepare for lazy deoptimization explicitly here.
   5704   if (instr->hydrogen()->is_function_entry()) {
   5705     // Perform stack overflow check.
   5706     Label done;
   5707     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
   5708     __ cmp(sp, Operand(ip));
   5709     __ b(hs, &done);
   5710     Handle<Code> stack_check = isolate()->builtins()->StackCheck();
   5711     PredictableCodeSizeScope predictable(masm(),
   5712         CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
   5713     DCHECK(instr->context()->IsRegister());
   5714     DCHECK(ToRegister(instr->context()).is(cp));
   5715     CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
   5716     __ bind(&done);
   5717   } else {
   5718     DCHECK(instr->hydrogen()->is_backwards_branch());
   5719     // Perform stack overflow check if this goto needs it before jumping.
   5720     DeferredStackCheck* deferred_stack_check =
   5721         new(zone()) DeferredStackCheck(this, instr);
   5722     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
   5723     __ cmp(sp, Operand(ip));
   5724     __ b(lo, deferred_stack_check->entry());
   5725     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5726     __ bind(instr->done_label());
   5727     deferred_stack_check->SetExit(instr->done_label());
   5728     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5729     // Don't record a deoptimization index for the safepoint here.
   5730     // This will be done explicitly when emitting call and the safepoint in
   5731     // the deferred code.
   5732   }
   5733 }
   5734 
   5735 
   5736 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5737   // This is a pseudo-instruction that ensures that the environment here is
   5738   // properly registered for deoptimization and records the assembler's PC
   5739   // offset.
   5740   LEnvironment* environment = instr->environment();
   5741 
   5742   // If the environment were already registered, we would have no way of
   5743   // backpatching it with the spill slot operands.
   5744   DCHECK(!environment->HasBeenRegistered());
   5745   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5746 
   5747   GenerateOsrPrologue();
   5748 }
   5749 
   5750 
   5751 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5752   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   5753   __ cmp(r0, ip);
   5754   DeoptimizeIf(eq, instr);
   5755 
   5756   Register null_value = r5;
   5757   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   5758   __ cmp(r0, null_value);
   5759   DeoptimizeIf(eq, instr);
   5760 
   5761   __ SmiTst(r0);
   5762   DeoptimizeIf(eq, instr);
   5763 
   5764   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   5765   __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
   5766   DeoptimizeIf(le, instr);
   5767 
   5768   Label use_cache, call_runtime;
   5769   __ CheckEnumCache(null_value, &call_runtime);
   5770 
   5771   __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
   5772   __ b(&use_cache);
   5773 
   5774   // Get the set of properties to enumerate.
   5775   __ bind(&call_runtime);
   5776   __ push(r0);
   5777   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
   5778 
   5779   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   5780   __ LoadRoot(ip, Heap::kMetaMapRootIndex);
   5781   __ cmp(r1, ip);
   5782   DeoptimizeIf(ne, instr);
   5783   __ bind(&use_cache);
   5784 }
   5785 
   5786 
   5787 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5788   Register map = ToRegister(instr->map());
   5789   Register result = ToRegister(instr->result());
   5790   Label load_cache, done;
   5791   __ EnumLength(result, map);
   5792   __ cmp(result, Operand(Smi::FromInt(0)));
   5793   __ b(ne, &load_cache);
   5794   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   5795   __ jmp(&done);
   5796 
   5797   __ bind(&load_cache);
   5798   __ LoadInstanceDescriptors(map, result);
   5799   __ ldr(result,
   5800          FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5801   __ ldr(result,
   5802          FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5803   __ cmp(result, Operand::Zero());
   5804   DeoptimizeIf(eq, instr);
   5805 
   5806   __ bind(&done);
   5807 }
   5808 
   5809 
   5810 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5811   Register object = ToRegister(instr->value());
   5812   Register map = ToRegister(instr->map());
   5813   __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5814   __ cmp(map, scratch0());
   5815   DeoptimizeIf(ne, instr);
   5816 }
   5817 
   5818 
   5819 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5820                                            Register result,
   5821                                            Register object,
   5822                                            Register index) {
   5823   PushSafepointRegistersScope scope(this);
   5824   __ Push(object);
   5825   __ Push(index);
   5826   __ mov(cp, Operand::Zero());
   5827   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5828   RecordSafepointWithRegisters(
   5829       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5830   __ StoreToSafepointRegisterSlot(r0, result);
   5831 }
   5832 
   5833 
   5834 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5835   class DeferredLoadMutableDouble FINAL : public LDeferredCode {
   5836    public:
   5837     DeferredLoadMutableDouble(LCodeGen* codegen,
   5838                               LLoadFieldByIndex* instr,
   5839                               Register result,
   5840                               Register object,
   5841                               Register index)
   5842         : LDeferredCode(codegen),
   5843           instr_(instr),
   5844           result_(result),
   5845           object_(object),
   5846           index_(index) {
   5847     }
   5848     virtual void Generate() OVERRIDE {
   5849       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5850     }
   5851     virtual LInstruction* instr() OVERRIDE { return instr_; }
   5852    private:
   5853     LLoadFieldByIndex* instr_;
   5854     Register result_;
   5855     Register object_;
   5856     Register index_;
   5857   };
   5858 
   5859   Register object = ToRegister(instr->object());
   5860   Register index = ToRegister(instr->index());
   5861   Register result = ToRegister(instr->result());
   5862   Register scratch = scratch0();
   5863 
   5864   DeferredLoadMutableDouble* deferred;
   5865   deferred = new(zone()) DeferredLoadMutableDouble(
   5866       this, instr, result, object, index);
   5867 
   5868   Label out_of_object, done;
   5869 
   5870   __ tst(index, Operand(Smi::FromInt(1)));
   5871   __ b(ne, deferred->entry());
   5872   __ mov(index, Operand(index, ASR, 1));
   5873 
   5874   __ cmp(index, Operand::Zero());
   5875   __ b(lt, &out_of_object);
   5876 
   5877   __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
   5878   __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5879 
   5880   __ b(&done);
   5881 
   5882   __ bind(&out_of_object);
   5883   __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5884   // Index is equal to negated out of object property index plus 1.
   5885   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
   5886   __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
   5887   __ ldr(result, FieldMemOperand(scratch,
   5888                                  FixedArray::kHeaderSize - kPointerSize));
   5889   __ bind(deferred->exit());
   5890   __ bind(&done);
   5891 }
   5892 
   5893 
   5894 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
   5895   Register context = ToRegister(instr->context());
   5896   __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
   5897 }
   5898 
   5899 
   5900 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
   5901   Handle<ScopeInfo> scope_info = instr->scope_info();
   5902   __ Push(scope_info);
   5903   __ push(ToRegister(instr->function()));
   5904   CallRuntime(Runtime::kPushBlockContext, 2, instr);
   5905   RecordSafepoint(Safepoint::kNoLazyDeopt);
   5906 }
   5907 
   5908 
   5909 #undef __
   5910 
   5911 } }  // namespace v8::internal
   5912