Home | History | Annotate | Download | only in arm
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #include "src/arm/lithium-codegen-arm.h"
      8 #include "src/arm/lithium-gap-resolver-arm.h"
      9 #include "src/code-stubs.h"
     10 #include "src/stub-cache.h"
     11 #include "src/hydrogen-osr.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 
     17 class SafepointGenerator V8_FINAL : public CallWrapper {
     18  public:
     19   SafepointGenerator(LCodeGen* codegen,
     20                      LPointerMap* pointers,
     21                      Safepoint::DeoptMode mode)
     22       : codegen_(codegen),
     23         pointers_(pointers),
     24         deopt_mode_(mode) { }
     25   virtual ~SafepointGenerator() {}
     26 
     27   virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
     28 
     29   virtual void AfterCall() const V8_OVERRIDE {
     30     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     31   }
     32 
     33  private:
     34   LCodeGen* codegen_;
     35   LPointerMap* pointers_;
     36   Safepoint::DeoptMode deopt_mode_;
     37 };
     38 
     39 
     40 #define __ masm()->
     41 
     42 bool LCodeGen::GenerateCode() {
     43   LPhase phase("Z_Code generation", chunk());
     44   ASSERT(is_unused());
     45   status_ = GENERATING;
     46 
     47   // Open a frame scope to indicate that there is a frame on the stack.  The
     48   // NONE indicates that the scope shouldn't actually generate code to set up
     49   // the frame (that is done in GeneratePrologue).
     50   FrameScope frame_scope(masm_, StackFrame::NONE);
     51 
     52   return GeneratePrologue() &&
     53       GenerateBody() &&
     54       GenerateDeferredCode() &&
     55       GenerateDeoptJumpTable() &&
     56       GenerateSafepointTable();
     57 }
     58 
     59 
     60 void LCodeGen::FinishCode(Handle<Code> code) {
     61   ASSERT(is_done());
     62   code->set_stack_slots(GetStackSlotCount());
     63   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     64   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
     65   PopulateDeoptimizationData(code);
     66 }
     67 
     68 
     69 void LCodeGen::SaveCallerDoubles() {
     70   ASSERT(info()->saves_caller_doubles());
     71   ASSERT(NeedsEagerFrame());
     72   Comment(";;; Save clobbered callee double registers");
     73   int count = 0;
     74   BitVector* doubles = chunk()->allocated_double_registers();
     75   BitVector::Iterator save_iterator(doubles);
     76   while (!save_iterator.Done()) {
     77     __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
     78             MemOperand(sp, count * kDoubleSize));
     79     save_iterator.Advance();
     80     count++;
     81   }
     82 }
     83 
     84 
     85 void LCodeGen::RestoreCallerDoubles() {
     86   ASSERT(info()->saves_caller_doubles());
     87   ASSERT(NeedsEagerFrame());
     88   Comment(";;; Restore clobbered callee double registers");
     89   BitVector* doubles = chunk()->allocated_double_registers();
     90   BitVector::Iterator save_iterator(doubles);
     91   int count = 0;
     92   while (!save_iterator.Done()) {
     93     __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
     94              MemOperand(sp, count * kDoubleSize));
     95     save_iterator.Advance();
     96     count++;
     97   }
     98 }
     99 
    100 
    101 bool LCodeGen::GeneratePrologue() {
    102   ASSERT(is_generating());
    103 
    104   if (info()->IsOptimizing()) {
    105     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    106 
    107 #ifdef DEBUG
    108     if (strlen(FLAG_stop_at) > 0 &&
    109         info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    110       __ stop("stop_at");
    111     }
    112 #endif
    113 
    114     // r1: Callee's JS function.
    115     // cp: Callee's context.
    116     // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
    117     // fp: Caller's frame pointer.
    118     // lr: Caller's pc.
    119 
    120     // Sloppy mode functions and builtins need to replace the receiver with the
    121     // global proxy when called as functions (without an explicit receiver
    122     // object).
    123     if (info_->this_has_uses() &&
    124         info_->strict_mode() == SLOPPY &&
    125         !info_->is_native()) {
    126       Label ok;
    127       int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
    128       __ ldr(r2, MemOperand(sp, receiver_offset));
    129       __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
    130       __ b(ne, &ok);
    131 
    132       __ ldr(r2, GlobalObjectOperand());
    133       __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
    134 
    135       __ str(r2, MemOperand(sp, receiver_offset));
    136 
    137       __ bind(&ok);
    138     }
    139   }
    140 
    141   info()->set_prologue_offset(masm_->pc_offset());
    142   if (NeedsEagerFrame()) {
    143     if (info()->IsStub()) {
    144       __ StubPrologue();
    145     } else {
    146       __ Prologue(info()->IsCodePreAgingActive());
    147     }
    148     frame_is_built_ = true;
    149     info_->AddNoFrameRange(0, masm_->pc_offset());
    150   }
    151 
    152   // Reserve space for the stack slots needed by the code.
    153   int slots = GetStackSlotCount();
    154   if (slots > 0) {
    155     if (FLAG_debug_code) {
    156       __ sub(sp,  sp, Operand(slots * kPointerSize));
    157       __ push(r0);
    158       __ push(r1);
    159       __ add(r0, sp, Operand(slots *  kPointerSize));
    160       __ mov(r1, Operand(kSlotsZapValue));
    161       Label loop;
    162       __ bind(&loop);
    163       __ sub(r0, r0, Operand(kPointerSize));
    164       __ str(r1, MemOperand(r0, 2 * kPointerSize));
    165       __ cmp(r0, sp);
    166       __ b(ne, &loop);
    167       __ pop(r1);
    168       __ pop(r0);
    169     } else {
    170       __ sub(sp,  sp, Operand(slots * kPointerSize));
    171     }
    172   }
    173 
    174   if (info()->saves_caller_doubles()) {
    175     SaveCallerDoubles();
    176   }
    177 
    178   // Possibly allocate a local context.
    179   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    180   if (heap_slots > 0) {
    181     Comment(";;; Allocate local context");
    182     bool need_write_barrier = true;
    183     // Argument to NewContext is the function, which is in r1.
    184     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
    185       FastNewContextStub stub(isolate(), heap_slots);
    186       __ CallStub(&stub);
    187       // Result of FastNewContextStub is always in new space.
    188       need_write_barrier = false;
    189     } else {
    190       __ push(r1);
    191       __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
    192     }
    193     RecordSafepoint(Safepoint::kNoLazyDeopt);
    194     // Context is returned in both r0 and cp.  It replaces the context
    195     // passed to us.  It's saved in the stack and kept live in cp.
    196     __ mov(cp, r0);
    197     __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
    198     // Copy any necessary parameters into the context.
    199     int num_parameters = scope()->num_parameters();
    200     for (int i = 0; i < num_parameters; i++) {
    201       Variable* var = scope()->parameter(i);
    202       if (var->IsContextSlot()) {
    203         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    204             (num_parameters - 1 - i) * kPointerSize;
    205         // Load parameter from stack.
    206         __ ldr(r0, MemOperand(fp, parameter_offset));
    207         // Store it in the context.
    208         MemOperand target = ContextOperand(cp, var->index());
    209         __ str(r0, target);
    210         // Update the write barrier. This clobbers r3 and r0.
    211         if (need_write_barrier) {
    212           __ RecordWriteContextSlot(
    213               cp,
    214               target.offset(),
    215               r0,
    216               r3,
    217               GetLinkRegisterState(),
    218               kSaveFPRegs);
    219         } else if (FLAG_debug_code) {
    220           Label done;
    221           __ JumpIfInNewSpace(cp, r0, &done);
    222           __ Abort(kExpectedNewSpaceObject);
    223           __ bind(&done);
    224         }
    225       }
    226     }
    227     Comment(";;; End allocate local context");
    228   }
    229 
    230   // Trace the call.
    231   if (FLAG_trace && info()->IsOptimizing()) {
    232     // We have not executed any compiled code yet, so cp still holds the
    233     // incoming context.
    234     __ CallRuntime(Runtime::kTraceEnter, 0);
    235   }
    236   return !is_aborted();
    237 }
    238 
    239 
    240 void LCodeGen::GenerateOsrPrologue() {
    241   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    242   // are none, at the OSR entrypoint instruction.
    243   if (osr_pc_offset_ >= 0) return;
    244 
    245   osr_pc_offset_ = masm()->pc_offset();
    246 
    247   // Adjust the frame size, subsuming the unoptimized frame into the
    248   // optimized frame.
    249   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    250   ASSERT(slots >= 0);
    251   __ sub(sp, sp, Operand(slots * kPointerSize));
    252 }
    253 
    254 
    255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    256   if (instr->IsCall()) {
    257     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    258   }
    259   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    260     safepoints_.BumpLastLazySafepointIndex();
    261   }
    262 }
    263 
    264 
    265 bool LCodeGen::GenerateDeferredCode() {
    266   ASSERT(is_generating());
    267   if (deferred_.length() > 0) {
    268     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    269       LDeferredCode* code = deferred_[i];
    270 
    271       HValue* value =
    272           instructions_->at(code->instruction_index())->hydrogen_value();
    273       RecordAndWritePosition(
    274           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
    275 
    276       Comment(";;; <@%d,#%d> "
    277               "-------------------- Deferred %s --------------------",
    278               code->instruction_index(),
    279               code->instr()->hydrogen_value()->id(),
    280               code->instr()->Mnemonic());
    281       __ bind(code->entry());
    282       if (NeedsDeferredFrame()) {
    283         Comment(";;; Build frame");
    284         ASSERT(!frame_is_built_);
    285         ASSERT(info()->IsStub());
    286         frame_is_built_ = true;
    287         __ PushFixedFrame();
    288         __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
    289         __ push(scratch0());
    290         __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    291         Comment(";;; Deferred code");
    292       }
    293       code->Generate();
    294       if (NeedsDeferredFrame()) {
    295         Comment(";;; Destroy frame");
    296         ASSERT(frame_is_built_);
    297         __ pop(ip);
    298         __ PopFixedFrame();
    299         frame_is_built_ = false;
    300       }
    301       __ jmp(code->exit());
    302     }
    303   }
    304 
    305   // Force constant pool emission at the end of the deferred code to make
    306   // sure that no constant pools are emitted after.
    307   masm()->CheckConstPool(true, false);
    308 
    309   return !is_aborted();
    310 }
    311 
    312 
    313 bool LCodeGen::GenerateDeoptJumpTable() {
    314   // Check that the jump table is accessible from everywhere in the function
    315   // code, i.e. that offsets to the table can be encoded in the 24bit signed
    316   // immediate of a branch instruction.
    317   // To simplify we consider the code size from the first instruction to the
    318   // end of the jump table. We also don't consider the pc load delta.
    319   // Each entry in the jump table generates one instruction and inlines one
    320   // 32bit data after it.
    321   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
    322       deopt_jump_table_.length() * 7)) {
    323     Abort(kGeneratedCodeIsTooLarge);
    324   }
    325 
    326   if (deopt_jump_table_.length() > 0) {
    327     Comment(";;; -------------------- Jump table --------------------");
    328   }
    329   Label table_start;
    330   __ bind(&table_start);
    331   Label needs_frame;
    332   for (int i = 0; i < deopt_jump_table_.length(); i++) {
    333     __ bind(&deopt_jump_table_[i].label);
    334     Address entry = deopt_jump_table_[i].address;
    335     Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
    336     int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
    337     if (id == Deoptimizer::kNotDeoptimizationEntry) {
    338       Comment(";;; jump table entry %d.", i);
    339     } else {
    340       Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
    341     }
    342     if (deopt_jump_table_[i].needs_frame) {
    343       ASSERT(!info()->saves_caller_doubles());
    344       __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
    345       if (needs_frame.is_bound()) {
    346         __ b(&needs_frame);
    347       } else {
    348         __ bind(&needs_frame);
    349         __ PushFixedFrame();
    350         // This variant of deopt can only be used with stubs. Since we don't
    351         // have a function pointer to install in the stack frame that we're
    352         // building, install a special marker there instead.
    353         ASSERT(info()->IsStub());
    354         __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
    355         __ push(scratch0());
    356         __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    357         __ mov(lr, Operand(pc), LeaveCC, al);
    358         __ mov(pc, ip);
    359       }
    360     } else {
    361       if (info()->saves_caller_doubles()) {
    362         ASSERT(info()->IsStub());
    363         RestoreCallerDoubles();
    364       }
    365       __ mov(lr, Operand(pc), LeaveCC, al);
    366       __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
    367     }
    368     masm()->CheckConstPool(false, false);
    369   }
    370 
    371   // Force constant pool emission at the end of the deopt jump table to make
    372   // sure that no constant pools are emitted after.
    373   masm()->CheckConstPool(true, false);
    374 
    375   // The deoptimization jump table is the last part of the instruction
    376   // sequence. Mark the generated code as done unless we bailed out.
    377   if (!is_aborted()) status_ = DONE;
    378   return !is_aborted();
    379 }
    380 
    381 
    382 bool LCodeGen::GenerateSafepointTable() {
    383   ASSERT(is_done());
    384   safepoints_.Emit(masm(), GetStackSlotCount());
    385   return !is_aborted();
    386 }
    387 
    388 
    389 Register LCodeGen::ToRegister(int index) const {
    390   return Register::FromAllocationIndex(index);
    391 }
    392 
    393 
    394 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
    395   return DwVfpRegister::FromAllocationIndex(index);
    396 }
    397 
    398 
    399 Register LCodeGen::ToRegister(LOperand* op) const {
    400   ASSERT(op->IsRegister());
    401   return ToRegister(op->index());
    402 }
    403 
    404 
    405 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    406   if (op->IsRegister()) {
    407     return ToRegister(op->index());
    408   } else if (op->IsConstantOperand()) {
    409     LConstantOperand* const_op = LConstantOperand::cast(op);
    410     HConstant* constant = chunk_->LookupConstant(const_op);
    411     Handle<Object> literal = constant->handle(isolate());
    412     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    413     if (r.IsInteger32()) {
    414       ASSERT(literal->IsNumber());
    415       __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
    416     } else if (r.IsDouble()) {
    417       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    418     } else {
    419       ASSERT(r.IsSmiOrTagged());
    420       __ Move(scratch, literal);
    421     }
    422     return scratch;
    423   } else if (op->IsStackSlot()) {
    424     __ ldr(scratch, ToMemOperand(op));
    425     return scratch;
    426   }
    427   UNREACHABLE();
    428   return scratch;
    429 }
    430 
    431 
    432 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    433   ASSERT(op->IsDoubleRegister());
    434   return ToDoubleRegister(op->index());
    435 }
    436 
    437 
    438 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
    439                                                SwVfpRegister flt_scratch,
    440                                                DwVfpRegister dbl_scratch) {
    441   if (op->IsDoubleRegister()) {
    442     return ToDoubleRegister(op->index());
    443   } else if (op->IsConstantOperand()) {
    444     LConstantOperand* const_op = LConstantOperand::cast(op);
    445     HConstant* constant = chunk_->LookupConstant(const_op);
    446     Handle<Object> literal = constant->handle(isolate());
    447     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    448     if (r.IsInteger32()) {
    449       ASSERT(literal->IsNumber());
    450       __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
    451       __ vmov(flt_scratch, ip);
    452       __ vcvt_f64_s32(dbl_scratch, flt_scratch);
    453       return dbl_scratch;
    454     } else if (r.IsDouble()) {
    455       Abort(kUnsupportedDoubleImmediate);
    456     } else if (r.IsTagged()) {
    457       Abort(kUnsupportedTaggedImmediate);
    458     }
    459   } else if (op->IsStackSlot()) {
    460     // TODO(regis): Why is vldr not taking a MemOperand?
    461     // __ vldr(dbl_scratch, ToMemOperand(op));
    462     MemOperand mem_op = ToMemOperand(op);
    463     __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
    464     return dbl_scratch;
    465   }
    466   UNREACHABLE();
    467   return dbl_scratch;
    468 }
    469 
    470 
    471 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    472   HConstant* constant = chunk_->LookupConstant(op);
    473   ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    474   return constant->handle(isolate());
    475 }
    476 
    477 
    478 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    479   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    480 }
    481 
    482 
    483 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    484   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    485 }
    486 
    487 
    488 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    489   return ToRepresentation(op, Representation::Integer32());
    490 }
    491 
    492 
    493 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
    494                                    const Representation& r) const {
    495   HConstant* constant = chunk_->LookupConstant(op);
    496   int32_t value = constant->Integer32Value();
    497   if (r.IsInteger32()) return value;
    498   ASSERT(r.IsSmiOrTagged());
    499   return reinterpret_cast<int32_t>(Smi::FromInt(value));
    500 }
    501 
    502 
    503 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    504   HConstant* constant = chunk_->LookupConstant(op);
    505   return Smi::FromInt(constant->Integer32Value());
    506 }
    507 
    508 
    509 double LCodeGen::ToDouble(LConstantOperand* op) const {
    510   HConstant* constant = chunk_->LookupConstant(op);
    511   ASSERT(constant->HasDoubleValue());
    512   return constant->DoubleValue();
    513 }
    514 
    515 
    516 Operand LCodeGen::ToOperand(LOperand* op) {
    517   if (op->IsConstantOperand()) {
    518     LConstantOperand* const_op = LConstantOperand::cast(op);
    519     HConstant* constant = chunk()->LookupConstant(const_op);
    520     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    521     if (r.IsSmi()) {
    522       ASSERT(constant->HasSmiValue());
    523       return Operand(Smi::FromInt(constant->Integer32Value()));
    524     } else if (r.IsInteger32()) {
    525       ASSERT(constant->HasInteger32Value());
    526       return Operand(constant->Integer32Value());
    527     } else if (r.IsDouble()) {
    528       Abort(kToOperandUnsupportedDoubleImmediate);
    529     }
    530     ASSERT(r.IsTagged());
    531     return Operand(constant->handle(isolate()));
    532   } else if (op->IsRegister()) {
    533     return Operand(ToRegister(op));
    534   } else if (op->IsDoubleRegister()) {
    535     Abort(kToOperandIsDoubleRegisterUnimplemented);
    536     return Operand::Zero();
    537   }
    538   // Stack slots not implemented, use ToMemOperand instead.
    539   UNREACHABLE();
    540   return Operand::Zero();
    541 }
    542 
    543 
    544 static int ArgumentsOffsetWithoutFrame(int index) {
    545   ASSERT(index < 0);
    546   return -(index + 1) * kPointerSize;
    547 }
    548 
    549 
    550 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    551   ASSERT(!op->IsRegister());
    552   ASSERT(!op->IsDoubleRegister());
    553   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
    554   if (NeedsEagerFrame()) {
    555     return MemOperand(fp, StackSlotOffset(op->index()));
    556   } else {
    557     // Retrieve parameter without eager stack-frame relative to the
    558     // stack-pointer.
    559     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    560   }
    561 }
    562 
    563 
    564 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    565   ASSERT(op->IsDoubleStackSlot());
    566   if (NeedsEagerFrame()) {
    567     return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
    568   } else {
    569     // Retrieve parameter without eager stack-frame relative to the
    570     // stack-pointer.
    571     return MemOperand(
    572         sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    573   }
    574 }
    575 
    576 
    577 void LCodeGen::WriteTranslation(LEnvironment* environment,
    578                                 Translation* translation) {
    579   if (environment == NULL) return;
    580 
    581   // The translation includes one command per value in the environment.
    582   int translation_size = environment->translation_size();
    583   // The output frame height does not include the parameters.
    584   int height = translation_size - environment->parameter_count();
    585 
    586   WriteTranslation(environment->outer(), translation);
    587   bool has_closure_id = !info()->closure().is_null() &&
    588       !info()->closure().is_identical_to(environment->closure());
    589   int closure_id = has_closure_id
    590       ? DefineDeoptimizationLiteral(environment->closure())
    591       : Translation::kSelfLiteralId;
    592 
    593   switch (environment->frame_type()) {
    594     case JS_FUNCTION:
    595       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
    596       break;
    597     case JS_CONSTRUCT:
    598       translation->BeginConstructStubFrame(closure_id, translation_size);
    599       break;
    600     case JS_GETTER:
    601       ASSERT(translation_size == 1);
    602       ASSERT(height == 0);
    603       translation->BeginGetterStubFrame(closure_id);
    604       break;
    605     case JS_SETTER:
    606       ASSERT(translation_size == 2);
    607       ASSERT(height == 0);
    608       translation->BeginSetterStubFrame(closure_id);
    609       break;
    610     case STUB:
    611       translation->BeginCompiledStubFrame();
    612       break;
    613     case ARGUMENTS_ADAPTOR:
    614       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
    615       break;
    616   }
    617 
    618   int object_index = 0;
    619   int dematerialized_index = 0;
    620   for (int i = 0; i < translation_size; ++i) {
    621     LOperand* value = environment->values()->at(i);
    622     AddToTranslation(environment,
    623                      translation,
    624                      value,
    625                      environment->HasTaggedValueAt(i),
    626                      environment->HasUint32ValueAt(i),
    627                      &object_index,
    628                      &dematerialized_index);
    629   }
    630 }
    631 
    632 
    633 void LCodeGen::AddToTranslation(LEnvironment* environment,
    634                                 Translation* translation,
    635                                 LOperand* op,
    636                                 bool is_tagged,
    637                                 bool is_uint32,
    638                                 int* object_index_pointer,
    639                                 int* dematerialized_index_pointer) {
    640   if (op == LEnvironment::materialization_marker()) {
    641     int object_index = (*object_index_pointer)++;
    642     if (environment->ObjectIsDuplicateAt(object_index)) {
    643       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    644       translation->DuplicateObject(dupe_of);
    645       return;
    646     }
    647     int object_length = environment->ObjectLengthAt(object_index);
    648     if (environment->ObjectIsArgumentsAt(object_index)) {
    649       translation->BeginArgumentsObject(object_length);
    650     } else {
    651       translation->BeginCapturedObject(object_length);
    652     }
    653     int dematerialized_index = *dematerialized_index_pointer;
    654     int env_offset = environment->translation_size() + dematerialized_index;
    655     *dematerialized_index_pointer += object_length;
    656     for (int i = 0; i < object_length; ++i) {
    657       LOperand* value = environment->values()->at(env_offset + i);
    658       AddToTranslation(environment,
    659                        translation,
    660                        value,
    661                        environment->HasTaggedValueAt(env_offset + i),
    662                        environment->HasUint32ValueAt(env_offset + i),
    663                        object_index_pointer,
    664                        dematerialized_index_pointer);
    665     }
    666     return;
    667   }
    668 
    669   if (op->IsStackSlot()) {
    670     if (is_tagged) {
    671       translation->StoreStackSlot(op->index());
    672     } else if (is_uint32) {
    673       translation->StoreUint32StackSlot(op->index());
    674     } else {
    675       translation->StoreInt32StackSlot(op->index());
    676     }
    677   } else if (op->IsDoubleStackSlot()) {
    678     translation->StoreDoubleStackSlot(op->index());
    679   } else if (op->IsRegister()) {
    680     Register reg = ToRegister(op);
    681     if (is_tagged) {
    682       translation->StoreRegister(reg);
    683     } else if (is_uint32) {
    684       translation->StoreUint32Register(reg);
    685     } else {
    686       translation->StoreInt32Register(reg);
    687     }
    688   } else if (op->IsDoubleRegister()) {
    689     DoubleRegister reg = ToDoubleRegister(op);
    690     translation->StoreDoubleRegister(reg);
    691   } else if (op->IsConstantOperand()) {
    692     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    693     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    694     translation->StoreLiteral(src_index);
    695   } else {
    696     UNREACHABLE();
    697   }
    698 }
    699 
    700 
    701 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
    702   int size = masm()->CallSize(code, mode);
    703   if (code->kind() == Code::BINARY_OP_IC ||
    704       code->kind() == Code::COMPARE_IC) {
    705     size += Assembler::kInstrSize;  // extra nop() added in CallCodeGeneric.
    706   }
    707   return size;
    708 }
    709 
    710 
    711 void LCodeGen::CallCode(Handle<Code> code,
    712                         RelocInfo::Mode mode,
    713                         LInstruction* instr,
    714                         TargetAddressStorageMode storage_mode) {
    715   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
    716 }
    717 
    718 
    719 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    720                                RelocInfo::Mode mode,
    721                                LInstruction* instr,
    722                                SafepointMode safepoint_mode,
    723                                TargetAddressStorageMode storage_mode) {
    724   ASSERT(instr != NULL);
    725   // Block literal pool emission to ensure nop indicating no inlined smi code
    726   // is in the correct position.
    727   Assembler::BlockConstPoolScope block_const_pool(masm());
    728   __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
    729   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    730 
    731   // Signal that we don't inline smi code before these stubs in the
    732   // optimizing code generator.
    733   if (code->kind() == Code::BINARY_OP_IC ||
    734       code->kind() == Code::COMPARE_IC) {
    735     __ nop();
    736   }
    737 }
    738 
    739 
    740 void LCodeGen::CallRuntime(const Runtime::Function* function,
    741                            int num_arguments,
    742                            LInstruction* instr,
    743                            SaveFPRegsMode save_doubles) {
    744   ASSERT(instr != NULL);
    745 
    746   __ CallRuntime(function, num_arguments, save_doubles);
    747 
    748   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    749 }
    750 
    751 
    752 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    753   if (context->IsRegister()) {
    754     __ Move(cp, ToRegister(context));
    755   } else if (context->IsStackSlot()) {
    756     __ ldr(cp, ToMemOperand(context));
    757   } else if (context->IsConstantOperand()) {
    758     HConstant* constant =
    759         chunk_->LookupConstant(LConstantOperand::cast(context));
    760     __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
    761   } else {
    762     UNREACHABLE();
    763   }
    764 }
    765 
    766 
    767 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    768                                        int argc,
    769                                        LInstruction* instr,
    770                                        LOperand* context) {
    771   LoadContextFromDeferred(context);
    772   __ CallRuntimeSaveDoubles(id);
    773   RecordSafepointWithRegisters(
    774       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    775 }
    776 
    777 
    778 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    779                                                     Safepoint::DeoptMode mode) {
    780   environment->set_has_been_used();
    781   if (!environment->HasBeenRegistered()) {
    782     // Physical stack frame layout:
    783     // -x ............. -4  0 ..................................... y
    784     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    785 
    786     // Layout of the environment:
    787     // 0 ..................................................... size-1
    788     // [parameters] [locals] [expression stack including arguments]
    789 
    790     // Layout of the translation:
    791     // 0 ........................................................ size - 1 + 4
    792     // [expression stack including arguments] [locals] [4 words] [parameters]
    793     // |>------------  translation_size ------------<|
    794 
    795     int frame_count = 0;
    796     int jsframe_count = 0;
    797     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    798       ++frame_count;
    799       if (e->frame_type() == JS_FUNCTION) {
    800         ++jsframe_count;
    801       }
    802     }
    803     Translation translation(&translations_, frame_count, jsframe_count, zone());
    804     WriteTranslation(environment, &translation);
    805     int deoptimization_index = deoptimizations_.length();
    806     int pc_offset = masm()->pc_offset();
    807     environment->Register(deoptimization_index,
    808                           translation.index(),
    809                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    810     deoptimizations_.Add(environment, zone());
    811   }
    812 }
    813 
    814 
    815 void LCodeGen::DeoptimizeIf(Condition condition,
    816                             LEnvironment* environment,
    817                             Deoptimizer::BailoutType bailout_type) {
    818   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    819   ASSERT(environment->HasBeenRegistered());
    820   int id = environment->deoptimization_index();
    821   ASSERT(info()->IsOptimizing() || info()->IsStub());
    822   Address entry =
    823       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    824   if (entry == NULL) {
    825     Abort(kBailoutWasNotPrepared);
    826     return;
    827   }
    828 
    829   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    830     Register scratch = scratch0();
    831     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    832 
    833     // Store the condition on the stack if necessary
    834     if (condition != al) {
    835       __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
    836       __ mov(scratch, Operand(1), LeaveCC, condition);
    837       __ push(scratch);
    838     }
    839 
    840     __ push(r1);
    841     __ mov(scratch, Operand(count));
    842     __ ldr(r1, MemOperand(scratch));
    843     __ sub(r1, r1, Operand(1), SetCC);
    844     __ movw(r1, FLAG_deopt_every_n_times, eq);
    845     __ str(r1, MemOperand(scratch));
    846     __ pop(r1);
    847 
    848     if (condition != al) {
    849       // Clean up the stack before the deoptimizer call
    850       __ pop(scratch);
    851     }
    852 
    853     __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
    854 
    855     // 'Restore' the condition in a slightly hacky way. (It would be better
    856     // to use 'msr' and 'mrs' instructions here, but they are not supported by
    857     // our ARM simulator).
    858     if (condition != al) {
    859       condition = ne;
    860       __ cmp(scratch, Operand::Zero());
    861     }
    862   }
    863 
    864   if (info()->ShouldTrapOnDeopt()) {
    865     __ stop("trap_on_deopt", condition);
    866   }
    867 
    868   ASSERT(info()->IsStub() || frame_is_built_);
    869   // Go through jump table if we need to handle condition, build frame, or
    870   // restore caller doubles.
    871   if (condition == al && frame_is_built_ &&
    872       !info()->saves_caller_doubles()) {
    873     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    874   } else {
    875     // We often have several deopts to the same entry, reuse the last
    876     // jump entry if this is the case.
    877     if (deopt_jump_table_.is_empty() ||
    878         (deopt_jump_table_.last().address != entry) ||
    879         (deopt_jump_table_.last().bailout_type != bailout_type) ||
    880         (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
    881       Deoptimizer::JumpTableEntry table_entry(entry,
    882                                               bailout_type,
    883                                               !frame_is_built_);
    884       deopt_jump_table_.Add(table_entry, zone());
    885     }
    886     __ b(condition, &deopt_jump_table_.last().label);
    887   }
    888 }
    889 
    890 
    891 void LCodeGen::DeoptimizeIf(Condition condition,
    892                             LEnvironment* environment) {
    893   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    894       ? Deoptimizer::LAZY
    895       : Deoptimizer::EAGER;
    896   DeoptimizeIf(condition, environment, bailout_type);
    897 }
    898 
    899 
    900 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
    901   int length = deoptimizations_.length();
    902   if (length == 0) return;
    903   Handle<DeoptimizationInputData> data =
    904       DeoptimizationInputData::New(isolate(), length, TENURED);
    905 
    906   Handle<ByteArray> translations =
    907       translations_.CreateByteArray(isolate()->factory());
    908   data->SetTranslationByteArray(*translations);
    909   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
    910   data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
    911   if (info_->IsOptimizing()) {
    912     // Reference to shared function info does not change between phases.
    913     AllowDeferredHandleDereference allow_handle_dereference;
    914     data->SetSharedFunctionInfo(*info_->shared_info());
    915   } else {
    916     data->SetSharedFunctionInfo(Smi::FromInt(0));
    917   }
    918 
    919   Handle<FixedArray> literals =
    920       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
    921   { AllowDeferredHandleDereference copy_handles;
    922     for (int i = 0; i < deoptimization_literals_.length(); i++) {
    923       literals->set(i, *deoptimization_literals_[i]);
    924     }
    925     data->SetLiteralArray(*literals);
    926   }
    927 
    928   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
    929   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
    930 
    931   // Populate the deoptimization entries.
    932   for (int i = 0; i < length; i++) {
    933     LEnvironment* env = deoptimizations_[i];
    934     data->SetAstId(i, env->ast_id());
    935     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
    936     data->SetArgumentsStackHeight(i,
    937                                   Smi::FromInt(env->arguments_stack_height()));
    938     data->SetPc(i, Smi::FromInt(env->pc_offset()));
    939   }
    940   code->set_deoptimization_data(*data);
    941 }
    942 
    943 
    944 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
    945   int result = deoptimization_literals_.length();
    946   for (int i = 0; i < deoptimization_literals_.length(); ++i) {
    947     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
    948   }
    949   deoptimization_literals_.Add(literal, zone());
    950   return result;
    951 }
    952 
    953 
    954 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
    955   ASSERT(deoptimization_literals_.length() == 0);
    956 
    957   const ZoneList<Handle<JSFunction> >* inlined_closures =
    958       chunk()->inlined_closures();
    959 
    960   for (int i = 0, length = inlined_closures->length();
    961        i < length;
    962        i++) {
    963     DefineDeoptimizationLiteral(inlined_closures->at(i));
    964   }
    965 
    966   inlined_function_count_ = deoptimization_literals_.length();
    967 }
    968 
    969 
    970 void LCodeGen::RecordSafepointWithLazyDeopt(
    971     LInstruction* instr, SafepointMode safepoint_mode) {
    972   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    973     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    974   } else {
    975     ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    976     RecordSafepointWithRegisters(
    977         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    978   }
    979 }
    980 
    981 
    982 void LCodeGen::RecordSafepoint(
    983     LPointerMap* pointers,
    984     Safepoint::Kind kind,
    985     int arguments,
    986     Safepoint::DeoptMode deopt_mode) {
    987   ASSERT(expected_safepoint_kind_ == kind);
    988 
    989   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    990   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
    991       kind, arguments, deopt_mode);
    992   for (int i = 0; i < operands->length(); i++) {
    993     LOperand* pointer = operands->at(i);
    994     if (pointer->IsStackSlot()) {
    995       safepoint.DefinePointerSlot(pointer->index(), zone());
    996     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    997       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    998     }
    999   }
   1000   if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
   1001     // Register pp always contains a pointer to the constant pool.
   1002     safepoint.DefinePointerRegister(pp, zone());
   1003   }
   1004 }
   1005 
   1006 
   1007 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
   1008                                Safepoint::DeoptMode deopt_mode) {
   1009   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
   1010 }
   1011 
   1012 
   1013 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
   1014   LPointerMap empty_pointers(zone());
   1015   RecordSafepoint(&empty_pointers, deopt_mode);
   1016 }
   1017 
   1018 
   1019 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
   1020                                             int arguments,
   1021                                             Safepoint::DeoptMode deopt_mode) {
   1022   RecordSafepoint(
   1023       pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
   1024 }
   1025 
   1026 
   1027 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
   1028     LPointerMap* pointers,
   1029     int arguments,
   1030     Safepoint::DeoptMode deopt_mode) {
   1031   RecordSafepoint(
   1032       pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
   1033 }
   1034 
   1035 
   1036 void LCodeGen::RecordAndWritePosition(int position) {
   1037   if (position == RelocInfo::kNoPosition) return;
   1038   masm()->positions_recorder()->RecordPosition(position);
   1039   masm()->positions_recorder()->WriteRecordedPositions();
   1040 }
   1041 
   1042 
   1043 static const char* LabelType(LLabel* label) {
   1044   if (label->is_loop_header()) return " (loop header)";
   1045   if (label->is_osr_entry()) return " (OSR entry)";
   1046   return "";
   1047 }
   1048 
   1049 
   1050 void LCodeGen::DoLabel(LLabel* label) {
   1051   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
   1052           current_instruction_,
   1053           label->hydrogen_value()->id(),
   1054           label->block_id(),
   1055           LabelType(label));
   1056   __ bind(label->label());
   1057   current_block_ = label->block_id();
   1058   DoGap(label);
   1059 }
   1060 
   1061 
   1062 void LCodeGen::DoParallelMove(LParallelMove* move) {
   1063   resolver_.Resolve(move);
   1064 }
   1065 
   1066 
   1067 void LCodeGen::DoGap(LGap* gap) {
   1068   for (int i = LGap::FIRST_INNER_POSITION;
   1069        i <= LGap::LAST_INNER_POSITION;
   1070        i++) {
   1071     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
   1072     LParallelMove* move = gap->GetParallelMove(inner_pos);
   1073     if (move != NULL) DoParallelMove(move);
   1074   }
   1075 }
   1076 
   1077 
   1078 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
   1079   DoGap(instr);
   1080 }
   1081 
   1082 
   1083 void LCodeGen::DoParameter(LParameter* instr) {
   1084   // Nothing to do.
   1085 }
   1086 
   1087 
   1088 void LCodeGen::DoCallStub(LCallStub* instr) {
   1089   ASSERT(ToRegister(instr->context()).is(cp));
   1090   ASSERT(ToRegister(instr->result()).is(r0));
   1091   switch (instr->hydrogen()->major_key()) {
   1092     case CodeStub::RegExpExec: {
   1093       RegExpExecStub stub(isolate());
   1094       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1095       break;
   1096     }
   1097     case CodeStub::SubString: {
   1098       SubStringStub stub(isolate());
   1099       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1100       break;
   1101     }
   1102     case CodeStub::StringCompare: {
   1103       StringCompareStub stub(isolate());
   1104       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1105       break;
   1106     }
   1107     default:
   1108       UNREACHABLE();
   1109   }
   1110 }
   1111 
   1112 
   1113 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   1114   GenerateOsrPrologue();
   1115 }
   1116 
   1117 
   1118 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   1119   Register dividend = ToRegister(instr->dividend());
   1120   int32_t divisor = instr->divisor();
   1121   ASSERT(dividend.is(ToRegister(instr->result())));
   1122 
   1123   // Theoretically, a variation of the branch-free code for integer division by
   1124   // a power of 2 (calculating the remainder via an additional multiplication
   1125   // (which gets simplified to an 'and') and subtraction) should be faster, and
   1126   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
   1127   // indicate that positive dividends are heavily favored, so the branching
   1128   // version performs better.
   1129   HMod* hmod = instr->hydrogen();
   1130   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1131   Label dividend_is_not_negative, done;
   1132   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
   1133     __ cmp(dividend, Operand::Zero());
   1134     __ b(pl, &dividend_is_not_negative);
   1135     // Note that this is correct even for kMinInt operands.
   1136     __ rsb(dividend, dividend, Operand::Zero());
   1137     __ and_(dividend, dividend, Operand(mask));
   1138     __ rsb(dividend, dividend, Operand::Zero(), SetCC);
   1139     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1140       DeoptimizeIf(eq, instr->environment());
   1141     }
   1142     __ b(&done);
   1143   }
   1144 
   1145   __ bind(&dividend_is_not_negative);
   1146   __ and_(dividend, dividend, Operand(mask));
   1147   __ bind(&done);
   1148 }
   1149 
   1150 
   1151 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   1152   Register dividend = ToRegister(instr->dividend());
   1153   int32_t divisor = instr->divisor();
   1154   Register result = ToRegister(instr->result());
   1155   ASSERT(!dividend.is(result));
   1156 
   1157   if (divisor == 0) {
   1158     DeoptimizeIf(al, instr->environment());
   1159     return;
   1160   }
   1161 
   1162   __ TruncatingDiv(result, dividend, Abs(divisor));
   1163   __ mov(ip, Operand(Abs(divisor)));
   1164   __ smull(result, ip, result, ip);
   1165   __ sub(result, dividend, result, SetCC);
   1166 
   1167   // Check for negative zero.
   1168   HMod* hmod = instr->hydrogen();
   1169   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1170     Label remainder_not_zero;
   1171     __ b(ne, &remainder_not_zero);
   1172     __ cmp(dividend, Operand::Zero());
   1173     DeoptimizeIf(lt, instr->environment());
   1174     __ bind(&remainder_not_zero);
   1175   }
   1176 }
   1177 
   1178 
   1179 void LCodeGen::DoModI(LModI* instr) {
   1180   HMod* hmod = instr->hydrogen();
   1181   if (CpuFeatures::IsSupported(SUDIV)) {
   1182     CpuFeatureScope scope(masm(), SUDIV);
   1183 
   1184     Register left_reg = ToRegister(instr->left());
   1185     Register right_reg = ToRegister(instr->right());
   1186     Register result_reg = ToRegister(instr->result());
   1187 
   1188     Label done;
   1189     // Check for x % 0, sdiv might signal an exception. We have to deopt in this
   1190     // case because we can't return a NaN.
   1191     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1192       __ cmp(right_reg, Operand::Zero());
   1193       DeoptimizeIf(eq, instr->environment());
   1194     }
   1195 
   1196     // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
   1197     // want. We have to deopt if we care about -0, because we can't return that.
   1198     if (hmod->CheckFlag(HValue::kCanOverflow)) {
   1199       Label no_overflow_possible;
   1200       __ cmp(left_reg, Operand(kMinInt));
   1201       __ b(ne, &no_overflow_possible);
   1202       __ cmp(right_reg, Operand(-1));
   1203       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1204         DeoptimizeIf(eq, instr->environment());
   1205       } else {
   1206         __ b(ne, &no_overflow_possible);
   1207         __ mov(result_reg, Operand::Zero());
   1208         __ jmp(&done);
   1209       }
   1210       __ bind(&no_overflow_possible);
   1211     }
   1212 
   1213     // For 'r3 = r1 % r2' we can have the following ARM code:
   1214     //   sdiv r3, r1, r2
   1215     //   mls r3, r3, r2, r1
   1216 
   1217     __ sdiv(result_reg, left_reg, right_reg);
   1218     __ Mls(result_reg, result_reg, right_reg, left_reg);
   1219 
   1220     // If we care about -0, test if the dividend is <0 and the result is 0.
   1221     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1222       __ cmp(result_reg, Operand::Zero());
   1223       __ b(ne, &done);
   1224       __ cmp(left_reg, Operand::Zero());
   1225       DeoptimizeIf(lt, instr->environment());
   1226     }
   1227     __ bind(&done);
   1228 
   1229   } else {
   1230     // General case, without any SDIV support.
   1231     Register left_reg = ToRegister(instr->left());
   1232     Register right_reg = ToRegister(instr->right());
   1233     Register result_reg = ToRegister(instr->result());
   1234     Register scratch = scratch0();
   1235     ASSERT(!scratch.is(left_reg));
   1236     ASSERT(!scratch.is(right_reg));
   1237     ASSERT(!scratch.is(result_reg));
   1238     DwVfpRegister dividend = ToDoubleRegister(instr->temp());
   1239     DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
   1240     ASSERT(!divisor.is(dividend));
   1241     LowDwVfpRegister quotient = double_scratch0();
   1242     ASSERT(!quotient.is(dividend));
   1243     ASSERT(!quotient.is(divisor));
   1244 
   1245     Label done;
   1246     // Check for x % 0, we have to deopt in this case because we can't return a
   1247     // NaN.
   1248     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1249       __ cmp(right_reg, Operand::Zero());
   1250       DeoptimizeIf(eq, instr->environment());
   1251     }
   1252 
   1253     __ Move(result_reg, left_reg);
   1254     // Load the arguments in VFP registers. The divisor value is preloaded
   1255     // before. Be careful that 'right_reg' is only live on entry.
   1256     // TODO(svenpanne) The last comments seems to be wrong nowadays.
   1257     __ vmov(double_scratch0().low(), left_reg);
   1258     __ vcvt_f64_s32(dividend, double_scratch0().low());
   1259     __ vmov(double_scratch0().low(), right_reg);
   1260     __ vcvt_f64_s32(divisor, double_scratch0().low());
   1261 
   1262     // We do not care about the sign of the divisor. Note that we still handle
   1263     // the kMinInt % -1 case correctly, though.
   1264     __ vabs(divisor, divisor);
   1265     // Compute the quotient and round it to a 32bit integer.
   1266     __ vdiv(quotient, dividend, divisor);
   1267     __ vcvt_s32_f64(quotient.low(), quotient);
   1268     __ vcvt_f64_s32(quotient, quotient.low());
   1269 
   1270     // Compute the remainder in result.
   1271     __ vmul(double_scratch0(), divisor, quotient);
   1272     __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
   1273     __ vmov(scratch, double_scratch0().low());
   1274     __ sub(result_reg, left_reg, scratch, SetCC);
   1275 
   1276     // If we care about -0, test if the dividend is <0 and the result is 0.
   1277     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1278       __ b(ne, &done);
   1279       __ cmp(left_reg, Operand::Zero());
   1280       DeoptimizeIf(mi, instr->environment());
   1281     }
   1282     __ bind(&done);
   1283   }
   1284 }
   1285 
   1286 
   1287 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1288   Register dividend = ToRegister(instr->dividend());
   1289   int32_t divisor = instr->divisor();
   1290   Register result = ToRegister(instr->result());
   1291   ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
   1292   ASSERT(!result.is(dividend));
   1293 
   1294   // Check for (0 / -x) that will produce negative zero.
   1295   HDiv* hdiv = instr->hydrogen();
   1296   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1297     __ cmp(dividend, Operand::Zero());
   1298     DeoptimizeIf(eq, instr->environment());
   1299   }
   1300   // Check for (kMinInt / -1).
   1301   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1302     __ cmp(dividend, Operand(kMinInt));
   1303     DeoptimizeIf(eq, instr->environment());
   1304   }
   1305   // Deoptimize if remainder will not be 0.
   1306   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   1307       divisor != 1 && divisor != -1) {
   1308     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1309     __ tst(dividend, Operand(mask));
   1310     DeoptimizeIf(ne, instr->environment());
   1311   }
   1312 
   1313   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   1314     __ rsb(result, dividend, Operand(0));
   1315     return;
   1316   }
   1317   int32_t shift = WhichPowerOf2Abs(divisor);
   1318   if (shift == 0) {
   1319     __ mov(result, dividend);
   1320   } else if (shift == 1) {
   1321     __ add(result, dividend, Operand(dividend, LSR, 31));
   1322   } else {
   1323     __ mov(result, Operand(dividend, ASR, 31));
   1324     __ add(result, dividend, Operand(result, LSR, 32 - shift));
   1325   }
   1326   if (shift > 0) __ mov(result, Operand(result, ASR, shift));
   1327   if (divisor < 0) __ rsb(result, result, Operand(0));
   1328 }
   1329 
   1330 
   1331 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1332   Register dividend = ToRegister(instr->dividend());
   1333   int32_t divisor = instr->divisor();
   1334   Register result = ToRegister(instr->result());
   1335   ASSERT(!dividend.is(result));
   1336 
   1337   if (divisor == 0) {
   1338     DeoptimizeIf(al, instr->environment());
   1339     return;
   1340   }
   1341 
   1342   // Check for (0 / -x) that will produce negative zero.
   1343   HDiv* hdiv = instr->hydrogen();
   1344   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1345     __ cmp(dividend, Operand::Zero());
   1346     DeoptimizeIf(eq, instr->environment());
   1347   }
   1348 
   1349   __ TruncatingDiv(result, dividend, Abs(divisor));
   1350   if (divisor < 0) __ rsb(result, result, Operand::Zero());
   1351 
   1352   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1353     __ mov(ip, Operand(divisor));
   1354     __ smull(scratch0(), ip, result, ip);
   1355     __ sub(scratch0(), scratch0(), dividend, SetCC);
   1356     DeoptimizeIf(ne, instr->environment());
   1357   }
   1358 }
   1359 
   1360 
   1361 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1362 void LCodeGen::DoDivI(LDivI* instr) {
   1363   HBinaryOperation* hdiv = instr->hydrogen();
   1364   Register dividend = ToRegister(instr->dividend());
   1365   Register divisor = ToRegister(instr->divisor());
   1366   Register result = ToRegister(instr->result());
   1367 
   1368   // Check for x / 0.
   1369   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1370     __ cmp(divisor, Operand::Zero());
   1371     DeoptimizeIf(eq, instr->environment());
   1372   }
   1373 
   1374   // Check for (0 / -x) that will produce negative zero.
   1375   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1376     Label positive;
   1377     if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
   1378       // Do the test only if it hadn't be done above.
   1379       __ cmp(divisor, Operand::Zero());
   1380     }
   1381     __ b(pl, &positive);
   1382     __ cmp(dividend, Operand::Zero());
   1383     DeoptimizeIf(eq, instr->environment());
   1384     __ bind(&positive);
   1385   }
   1386 
   1387   // Check for (kMinInt / -1).
   1388   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1389       (!CpuFeatures::IsSupported(SUDIV) ||
   1390        !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
   1391     // We don't need to check for overflow when truncating with sdiv
   1392     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
   1393     __ cmp(dividend, Operand(kMinInt));
   1394     __ cmp(divisor, Operand(-1), eq);
   1395     DeoptimizeIf(eq, instr->environment());
   1396   }
   1397 
   1398   if (CpuFeatures::IsSupported(SUDIV)) {
   1399     CpuFeatureScope scope(masm(), SUDIV);
   1400     __ sdiv(result, dividend, divisor);
   1401   } else {
   1402     DoubleRegister vleft = ToDoubleRegister(instr->temp());
   1403     DoubleRegister vright = double_scratch0();
   1404     __ vmov(double_scratch0().low(), dividend);
   1405     __ vcvt_f64_s32(vleft, double_scratch0().low());
   1406     __ vmov(double_scratch0().low(), divisor);
   1407     __ vcvt_f64_s32(vright, double_scratch0().low());
   1408     __ vdiv(vleft, vleft, vright);  // vleft now contains the result.
   1409     __ vcvt_s32_f64(double_scratch0().low(), vleft);
   1410     __ vmov(result, double_scratch0().low());
   1411   }
   1412 
   1413   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1414     // Compute remainder and deopt if it's not zero.
   1415     Register remainder = scratch0();
   1416     __ Mls(remainder, result, divisor, dividend);
   1417     __ cmp(remainder, Operand::Zero());
   1418     DeoptimizeIf(ne, instr->environment());
   1419   }
   1420 }
   1421 
   1422 
   1423 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1424   DwVfpRegister addend = ToDoubleRegister(instr->addend());
   1425   DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
   1426   DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1427 
   1428   // This is computed in-place.
   1429   ASSERT(addend.is(ToDoubleRegister(instr->result())));
   1430 
   1431   __ vmla(addend, multiplier, multiplicand);
   1432 }
   1433 
   1434 
   1435 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
   1436   DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
   1437   DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
   1438   DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1439 
   1440   // This is computed in-place.
   1441   ASSERT(minuend.is(ToDoubleRegister(instr->result())));
   1442 
   1443   __ vmls(minuend, multiplier, multiplicand);
   1444 }
   1445 
   1446 
   1447 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1448   Register dividend = ToRegister(instr->dividend());
   1449   Register result = ToRegister(instr->result());
   1450   int32_t divisor = instr->divisor();
   1451 
   1452   // If the divisor is 1, return the dividend.
   1453   if (divisor == 1) {
   1454     __ Move(result, dividend);
   1455     return;
   1456   }
   1457 
   1458   // If the divisor is positive, things are easy: There can be no deopts and we
   1459   // can simply do an arithmetic right shift.
   1460   int32_t shift = WhichPowerOf2Abs(divisor);
   1461   if (divisor > 1) {
   1462     __ mov(result, Operand(dividend, ASR, shift));
   1463     return;
   1464   }
   1465 
   1466   // If the divisor is negative, we have to negate and handle edge cases.
   1467   __ rsb(result, dividend, Operand::Zero(), SetCC);
   1468   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1469     DeoptimizeIf(eq, instr->environment());
   1470   }
   1471 
   1472   // Dividing by -1 is basically negation, unless we overflow.
   1473   if (divisor == -1) {
   1474     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1475       DeoptimizeIf(vs, instr->environment());
   1476     }
   1477     return;
   1478   }
   1479 
   1480   // If the negation could not overflow, simply shifting is OK.
   1481   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1482     __ mov(result, Operand(result, ASR, shift));
   1483     return;
   1484   }
   1485 
   1486   __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
   1487   __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
   1488 }
   1489 
   1490 
   1491 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1492   Register dividend = ToRegister(instr->dividend());
   1493   int32_t divisor = instr->divisor();
   1494   Register result = ToRegister(instr->result());
   1495   ASSERT(!dividend.is(result));
   1496 
   1497   if (divisor == 0) {
   1498     DeoptimizeIf(al, instr->environment());
   1499     return;
   1500   }
   1501 
   1502   // Check for (0 / -x) that will produce negative zero.
   1503   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1504   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1505     __ cmp(dividend, Operand::Zero());
   1506     DeoptimizeIf(eq, instr->environment());
   1507   }
   1508 
   1509   // Easy case: We need no dynamic check for the dividend and the flooring
   1510   // division is the same as the truncating division.
   1511   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1512       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1513     __ TruncatingDiv(result, dividend, Abs(divisor));
   1514     if (divisor < 0) __ rsb(result, result, Operand::Zero());
   1515     return;
   1516   }
   1517 
   1518   // In the general case we may need to adjust before and after the truncating
   1519   // division to get a flooring division.
   1520   Register temp = ToRegister(instr->temp());
   1521   ASSERT(!temp.is(dividend) && !temp.is(result));
   1522   Label needs_adjustment, done;
   1523   __ cmp(dividend, Operand::Zero());
   1524   __ b(divisor > 0 ? lt : gt, &needs_adjustment);
   1525   __ TruncatingDiv(result, dividend, Abs(divisor));
   1526   if (divisor < 0) __ rsb(result, result, Operand::Zero());
   1527   __ jmp(&done);
   1528   __ bind(&needs_adjustment);
   1529   __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   1530   __ TruncatingDiv(result, temp, Abs(divisor));
   1531   if (divisor < 0) __ rsb(result, result, Operand::Zero());
   1532   __ sub(result, result, Operand(1));
   1533   __ bind(&done);
   1534 }
   1535 
   1536 
   1537 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1538 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1539   HBinaryOperation* hdiv = instr->hydrogen();
   1540   Register left = ToRegister(instr->dividend());
   1541   Register right = ToRegister(instr->divisor());
   1542   Register result = ToRegister(instr->result());
   1543 
   1544   // Check for x / 0.
   1545   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1546     __ cmp(right, Operand::Zero());
   1547     DeoptimizeIf(eq, instr->environment());
   1548   }
   1549 
   1550   // Check for (0 / -x) that will produce negative zero.
   1551   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1552     Label positive;
   1553     if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
   1554       // Do the test only if it hadn't be done above.
   1555       __ cmp(right, Operand::Zero());
   1556     }
   1557     __ b(pl, &positive);
   1558     __ cmp(left, Operand::Zero());
   1559     DeoptimizeIf(eq, instr->environment());
   1560     __ bind(&positive);
   1561   }
   1562 
   1563   // Check for (kMinInt / -1).
   1564   if (hdiv->CheckFlag(HValue::kCanOverflow) &&
   1565       (!CpuFeatures::IsSupported(SUDIV) ||
   1566        !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
   1567     // We don't need to check for overflow when truncating with sdiv
   1568     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
   1569     __ cmp(left, Operand(kMinInt));
   1570     __ cmp(right, Operand(-1), eq);
   1571     DeoptimizeIf(eq, instr->environment());
   1572   }
   1573 
   1574   if (CpuFeatures::IsSupported(SUDIV)) {
   1575     CpuFeatureScope scope(masm(), SUDIV);
   1576     __ sdiv(result, left, right);
   1577   } else {
   1578     DoubleRegister vleft = ToDoubleRegister(instr->temp());
   1579     DoubleRegister vright = double_scratch0();
   1580     __ vmov(double_scratch0().low(), left);
   1581     __ vcvt_f64_s32(vleft, double_scratch0().low());
   1582     __ vmov(double_scratch0().low(), right);
   1583     __ vcvt_f64_s32(vright, double_scratch0().low());
   1584     __ vdiv(vleft, vleft, vright);  // vleft now contains the result.
   1585     __ vcvt_s32_f64(double_scratch0().low(), vleft);
   1586     __ vmov(result, double_scratch0().low());
   1587   }
   1588 
   1589   Label done;
   1590   Register remainder = scratch0();
   1591   __ Mls(remainder, result, right, left);
   1592   __ cmp(remainder, Operand::Zero());
   1593   __ b(eq, &done);
   1594   __ eor(remainder, remainder, Operand(right));
   1595   __ add(result, result, Operand(remainder, ASR, 31));
   1596   __ bind(&done);
   1597 }
   1598 
   1599 
   1600 void LCodeGen::DoMulI(LMulI* instr) {
   1601   Register result = ToRegister(instr->result());
   1602   // Note that result may alias left.
   1603   Register left = ToRegister(instr->left());
   1604   LOperand* right_op = instr->right();
   1605 
   1606   bool bailout_on_minus_zero =
   1607     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1608   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1609 
   1610   if (right_op->IsConstantOperand()) {
   1611     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1612 
   1613     if (bailout_on_minus_zero && (constant < 0)) {
   1614       // The case of a null constant will be handled separately.
   1615       // If constant is negative and left is null, the result should be -0.
   1616       __ cmp(left, Operand::Zero());
   1617       DeoptimizeIf(eq, instr->environment());
   1618     }
   1619 
   1620     switch (constant) {
   1621       case -1:
   1622         if (overflow) {
   1623           __ rsb(result, left, Operand::Zero(), SetCC);
   1624           DeoptimizeIf(vs, instr->environment());
   1625         } else {
   1626           __ rsb(result, left, Operand::Zero());
   1627         }
   1628         break;
   1629       case 0:
   1630         if (bailout_on_minus_zero) {
   1631           // If left is strictly negative and the constant is null, the
   1632           // result is -0. Deoptimize if required, otherwise return 0.
   1633           __ cmp(left, Operand::Zero());
   1634           DeoptimizeIf(mi, instr->environment());
   1635         }
   1636         __ mov(result, Operand::Zero());
   1637         break;
   1638       case 1:
   1639         __ Move(result, left);
   1640         break;
   1641       default:
   1642         // Multiplying by powers of two and powers of two plus or minus
   1643         // one can be done faster with shifted operands.
   1644         // For other constants we emit standard code.
   1645         int32_t mask = constant >> 31;
   1646         uint32_t constant_abs = (constant + mask) ^ mask;
   1647 
   1648         if (IsPowerOf2(constant_abs)) {
   1649           int32_t shift = WhichPowerOf2(constant_abs);
   1650           __ mov(result, Operand(left, LSL, shift));
   1651           // Correct the sign of the result is the constant is negative.
   1652           if (constant < 0)  __ rsb(result, result, Operand::Zero());
   1653         } else if (IsPowerOf2(constant_abs - 1)) {
   1654           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1655           __ add(result, left, Operand(left, LSL, shift));
   1656           // Correct the sign of the result is the constant is negative.
   1657           if (constant < 0)  __ rsb(result, result, Operand::Zero());
   1658         } else if (IsPowerOf2(constant_abs + 1)) {
   1659           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1660           __ rsb(result, left, Operand(left, LSL, shift));
   1661           // Correct the sign of the result is the constant is negative.
   1662           if (constant < 0)  __ rsb(result, result, Operand::Zero());
   1663         } else {
   1664           // Generate standard code.
   1665           __ mov(ip, Operand(constant));
   1666           __ mul(result, left, ip);
   1667         }
   1668     }
   1669 
   1670   } else {
   1671     ASSERT(right_op->IsRegister());
   1672     Register right = ToRegister(right_op);
   1673 
   1674     if (overflow) {
   1675       Register scratch = scratch0();
   1676       // scratch:result = left * right.
   1677       if (instr->hydrogen()->representation().IsSmi()) {
   1678         __ SmiUntag(result, left);
   1679         __ smull(result, scratch, result, right);
   1680       } else {
   1681         __ smull(result, scratch, left, right);
   1682       }
   1683       __ cmp(scratch, Operand(result, ASR, 31));
   1684       DeoptimizeIf(ne, instr->environment());
   1685     } else {
   1686       if (instr->hydrogen()->representation().IsSmi()) {
   1687         __ SmiUntag(result, left);
   1688         __ mul(result, result, right);
   1689       } else {
   1690         __ mul(result, left, right);
   1691       }
   1692     }
   1693 
   1694     if (bailout_on_minus_zero) {
   1695       Label done;
   1696       __ teq(left, Operand(right));
   1697       __ b(pl, &done);
   1698       // Bail out if the result is minus zero.
   1699       __ cmp(result, Operand::Zero());
   1700       DeoptimizeIf(eq, instr->environment());
   1701       __ bind(&done);
   1702     }
   1703   }
   1704 }
   1705 
   1706 
   1707 void LCodeGen::DoBitI(LBitI* instr) {
   1708   LOperand* left_op = instr->left();
   1709   LOperand* right_op = instr->right();
   1710   ASSERT(left_op->IsRegister());
   1711   Register left = ToRegister(left_op);
   1712   Register result = ToRegister(instr->result());
   1713   Operand right(no_reg);
   1714 
   1715   if (right_op->IsStackSlot()) {
   1716     right = Operand(EmitLoadRegister(right_op, ip));
   1717   } else {
   1718     ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
   1719     right = ToOperand(right_op);
   1720   }
   1721 
   1722   switch (instr->op()) {
   1723     case Token::BIT_AND:
   1724       __ and_(result, left, right);
   1725       break;
   1726     case Token::BIT_OR:
   1727       __ orr(result, left, right);
   1728       break;
   1729     case Token::BIT_XOR:
   1730       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
   1731         __ mvn(result, Operand(left));
   1732       } else {
   1733         __ eor(result, left, right);
   1734       }
   1735       break;
   1736     default:
   1737       UNREACHABLE();
   1738       break;
   1739   }
   1740 }
   1741 
   1742 
   1743 void LCodeGen::DoShiftI(LShiftI* instr) {
   1744   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1745   // result may alias either of them.
   1746   LOperand* right_op = instr->right();
   1747   Register left = ToRegister(instr->left());
   1748   Register result = ToRegister(instr->result());
   1749   Register scratch = scratch0();
   1750   if (right_op->IsRegister()) {
   1751     // Mask the right_op operand.
   1752     __ and_(scratch, ToRegister(right_op), Operand(0x1F));
   1753     switch (instr->op()) {
   1754       case Token::ROR:
   1755         __ mov(result, Operand(left, ROR, scratch));
   1756         break;
   1757       case Token::SAR:
   1758         __ mov(result, Operand(left, ASR, scratch));
   1759         break;
   1760       case Token::SHR:
   1761         if (instr->can_deopt()) {
   1762           __ mov(result, Operand(left, LSR, scratch), SetCC);
   1763           DeoptimizeIf(mi, instr->environment());
   1764         } else {
   1765           __ mov(result, Operand(left, LSR, scratch));
   1766         }
   1767         break;
   1768       case Token::SHL:
   1769         __ mov(result, Operand(left, LSL, scratch));
   1770         break;
   1771       default:
   1772         UNREACHABLE();
   1773         break;
   1774     }
   1775   } else {
   1776     // Mask the right_op operand.
   1777     int value = ToInteger32(LConstantOperand::cast(right_op));
   1778     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1779     switch (instr->op()) {
   1780       case Token::ROR:
   1781           if (shift_count != 0) {
   1782           __ mov(result, Operand(left, ROR, shift_count));
   1783         } else {
   1784           __ Move(result, left);
   1785         }
   1786         break;
   1787       case Token::SAR:
   1788         if (shift_count != 0) {
   1789           __ mov(result, Operand(left, ASR, shift_count));
   1790         } else {
   1791           __ Move(result, left);
   1792         }
   1793         break;
   1794       case Token::SHR:
   1795         if (shift_count != 0) {
   1796           __ mov(result, Operand(left, LSR, shift_count));
   1797         } else {
   1798           if (instr->can_deopt()) {
   1799             __ tst(left, Operand(0x80000000));
   1800             DeoptimizeIf(ne, instr->environment());
   1801           }
   1802           __ Move(result, left);
   1803         }
   1804         break;
   1805       case Token::SHL:
   1806         if (shift_count != 0) {
   1807           if (instr->hydrogen_value()->representation().IsSmi() &&
   1808               instr->can_deopt()) {
   1809             if (shift_count != 1) {
   1810               __ mov(result, Operand(left, LSL, shift_count - 1));
   1811               __ SmiTag(result, result, SetCC);
   1812             } else {
   1813               __ SmiTag(result, left, SetCC);
   1814             }
   1815             DeoptimizeIf(vs, instr->environment());
   1816           } else {
   1817             __ mov(result, Operand(left, LSL, shift_count));
   1818           }
   1819         } else {
   1820           __ Move(result, left);
   1821         }
   1822         break;
   1823       default:
   1824         UNREACHABLE();
   1825         break;
   1826     }
   1827   }
   1828 }
   1829 
   1830 
   1831 void LCodeGen::DoSubI(LSubI* instr) {
   1832   LOperand* left = instr->left();
   1833   LOperand* right = instr->right();
   1834   LOperand* result = instr->result();
   1835   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1836   SBit set_cond = can_overflow ? SetCC : LeaveCC;
   1837 
   1838   if (right->IsStackSlot()) {
   1839     Register right_reg = EmitLoadRegister(right, ip);
   1840     __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   1841   } else {
   1842     ASSERT(right->IsRegister() || right->IsConstantOperand());
   1843     __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   1844   }
   1845 
   1846   if (can_overflow) {
   1847     DeoptimizeIf(vs, instr->environment());
   1848   }
   1849 }
   1850 
   1851 
   1852 void LCodeGen::DoRSubI(LRSubI* instr) {
   1853   LOperand* left = instr->left();
   1854   LOperand* right = instr->right();
   1855   LOperand* result = instr->result();
   1856   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1857   SBit set_cond = can_overflow ? SetCC : LeaveCC;
   1858 
   1859   if (right->IsStackSlot()) {
   1860     Register right_reg = EmitLoadRegister(right, ip);
   1861     __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   1862   } else {
   1863     ASSERT(right->IsRegister() || right->IsConstantOperand());
   1864     __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   1865   }
   1866 
   1867   if (can_overflow) {
   1868     DeoptimizeIf(vs, instr->environment());
   1869   }
   1870 }
   1871 
   1872 
   1873 void LCodeGen::DoConstantI(LConstantI* instr) {
   1874   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1875 }
   1876 
   1877 
   1878 void LCodeGen::DoConstantS(LConstantS* instr) {
   1879   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1880 }
   1881 
   1882 
   1883 void LCodeGen::DoConstantD(LConstantD* instr) {
   1884   ASSERT(instr->result()->IsDoubleRegister());
   1885   DwVfpRegister result = ToDoubleRegister(instr->result());
   1886   double v = instr->value();
   1887   __ Vmov(result, v, scratch0());
   1888 }
   1889 
   1890 
   1891 void LCodeGen::DoConstantE(LConstantE* instr) {
   1892   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1893 }
   1894 
   1895 
   1896 void LCodeGen::DoConstantT(LConstantT* instr) {
   1897   Handle<Object> object = instr->value(isolate());
   1898   AllowDeferredHandleDereference smi_check;
   1899   __ Move(ToRegister(instr->result()), object);
   1900 }
   1901 
   1902 
   1903 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1904   Register result = ToRegister(instr->result());
   1905   Register map = ToRegister(instr->value());
   1906   __ EnumLength(result, map);
   1907 }
   1908 
   1909 
   1910 void LCodeGen::DoDateField(LDateField* instr) {
   1911   Register object = ToRegister(instr->date());
   1912   Register result = ToRegister(instr->result());
   1913   Register scratch = ToRegister(instr->temp());
   1914   Smi* index = instr->index();
   1915   Label runtime, done;
   1916   ASSERT(object.is(result));
   1917   ASSERT(object.is(r0));
   1918   ASSERT(!scratch.is(scratch0()));
   1919   ASSERT(!scratch.is(object));
   1920 
   1921   __ SmiTst(object);
   1922   DeoptimizeIf(eq, instr->environment());
   1923   __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
   1924   DeoptimizeIf(ne, instr->environment());
   1925 
   1926   if (index->value() == 0) {
   1927     __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
   1928   } else {
   1929     if (index->value() < JSDate::kFirstUncachedField) {
   1930       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
   1931       __ mov(scratch, Operand(stamp));
   1932       __ ldr(scratch, MemOperand(scratch));
   1933       __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
   1934       __ cmp(scratch, scratch0());
   1935       __ b(ne, &runtime);
   1936       __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
   1937                                              kPointerSize * index->value()));
   1938       __ jmp(&done);
   1939     }
   1940     __ bind(&runtime);
   1941     __ PrepareCallCFunction(2, scratch);
   1942     __ mov(r1, Operand(index));
   1943     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
   1944     __ bind(&done);
   1945   }
   1946 }
   1947 
   1948 
   1949 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
   1950                                            LOperand* index,
   1951                                            String::Encoding encoding) {
   1952   if (index->IsConstantOperand()) {
   1953     int offset = ToInteger32(LConstantOperand::cast(index));
   1954     if (encoding == String::TWO_BYTE_ENCODING) {
   1955       offset *= kUC16Size;
   1956     }
   1957     STATIC_ASSERT(kCharSize == 1);
   1958     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1959   }
   1960   Register scratch = scratch0();
   1961   ASSERT(!scratch.is(string));
   1962   ASSERT(!scratch.is(ToRegister(index)));
   1963   if (encoding == String::ONE_BYTE_ENCODING) {
   1964     __ add(scratch, string, Operand(ToRegister(index)));
   1965   } else {
   1966     STATIC_ASSERT(kUC16Size == 2);
   1967     __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
   1968   }
   1969   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1970 }
   1971 
   1972 
   1973 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1974   String::Encoding encoding = instr->hydrogen()->encoding();
   1975   Register string = ToRegister(instr->string());
   1976   Register result = ToRegister(instr->result());
   1977 
   1978   if (FLAG_debug_code) {
   1979     Register scratch = scratch0();
   1980     __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   1981     __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1982 
   1983     __ and_(scratch, scratch,
   1984             Operand(kStringRepresentationMask | kStringEncodingMask));
   1985     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1986     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1987     __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
   1988                             ? one_byte_seq_type : two_byte_seq_type));
   1989     __ Check(eq, kUnexpectedStringType);
   1990   }
   1991 
   1992   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1993   if (encoding == String::ONE_BYTE_ENCODING) {
   1994     __ ldrb(result, operand);
   1995   } else {
   1996     __ ldrh(result, operand);
   1997   }
   1998 }
   1999 
   2000 
   2001 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   2002   String::Encoding encoding = instr->hydrogen()->encoding();
   2003   Register string = ToRegister(instr->string());
   2004   Register value = ToRegister(instr->value());
   2005 
   2006   if (FLAG_debug_code) {
   2007     Register index = ToRegister(instr->index());
   2008     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   2009     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   2010     int encoding_mask =
   2011         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   2012         ? one_byte_seq_type : two_byte_seq_type;
   2013     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
   2014   }
   2015 
   2016   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   2017   if (encoding == String::ONE_BYTE_ENCODING) {
   2018     __ strb(value, operand);
   2019   } else {
   2020     __ strh(value, operand);
   2021   }
   2022 }
   2023 
   2024 
   2025 void LCodeGen::DoAddI(LAddI* instr) {
   2026   LOperand* left = instr->left();
   2027   LOperand* right = instr->right();
   2028   LOperand* result = instr->result();
   2029   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   2030   SBit set_cond = can_overflow ? SetCC : LeaveCC;
   2031 
   2032   if (right->IsStackSlot()) {
   2033     Register right_reg = EmitLoadRegister(right, ip);
   2034     __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   2035   } else {
   2036     ASSERT(right->IsRegister() || right->IsConstantOperand());
   2037     __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   2038   }
   2039 
   2040   if (can_overflow) {
   2041     DeoptimizeIf(vs, instr->environment());
   2042   }
   2043 }
   2044 
   2045 
   2046 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   2047   LOperand* left = instr->left();
   2048   LOperand* right = instr->right();
   2049   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   2050   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   2051     Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
   2052     Register left_reg = ToRegister(left);
   2053     Operand right_op = (right->IsRegister() || right->IsConstantOperand())
   2054         ? ToOperand(right)
   2055         : Operand(EmitLoadRegister(right, ip));
   2056     Register result_reg = ToRegister(instr->result());
   2057     __ cmp(left_reg, right_op);
   2058     __ Move(result_reg, left_reg, condition);
   2059     __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
   2060   } else {
   2061     ASSERT(instr->hydrogen()->representation().IsDouble());
   2062     DwVfpRegister left_reg = ToDoubleRegister(left);
   2063     DwVfpRegister right_reg = ToDoubleRegister(right);
   2064     DwVfpRegister result_reg = ToDoubleRegister(instr->result());
   2065     Label result_is_nan, return_left, return_right, check_zero, done;
   2066     __ VFPCompareAndSetFlags(left_reg, right_reg);
   2067     if (operation == HMathMinMax::kMathMin) {
   2068       __ b(mi, &return_left);
   2069       __ b(gt, &return_right);
   2070     } else {
   2071       __ b(mi, &return_right);
   2072       __ b(gt, &return_left);
   2073     }
   2074     __ b(vs, &result_is_nan);
   2075     // Left equals right => check for -0.
   2076     __ VFPCompareAndSetFlags(left_reg, 0.0);
   2077     if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
   2078       __ b(ne, &done);  // left == right != 0.
   2079     } else {
   2080       __ b(ne, &return_left);  // left == right != 0.
   2081     }
   2082     // At this point, both left and right are either 0 or -0.
   2083     if (operation == HMathMinMax::kMathMin) {
   2084       // We could use a single 'vorr' instruction here if we had NEON support.
   2085       __ vneg(left_reg, left_reg);
   2086       __ vsub(result_reg, left_reg, right_reg);
   2087       __ vneg(result_reg, result_reg);
   2088     } else {
   2089       // Since we operate on +0 and/or -0, vadd and vand have the same effect;
   2090       // the decision for vadd is easy because vand is a NEON instruction.
   2091       __ vadd(result_reg, left_reg, right_reg);
   2092     }
   2093     __ b(&done);
   2094 
   2095     __ bind(&result_is_nan);
   2096     __ vadd(result_reg, left_reg, right_reg);
   2097     __ b(&done);
   2098 
   2099     __ bind(&return_right);
   2100     __ Move(result_reg, right_reg);
   2101     if (!left_reg.is(result_reg)) {
   2102       __ b(&done);
   2103     }
   2104 
   2105     __ bind(&return_left);
   2106     __ Move(result_reg, left_reg);
   2107 
   2108     __ bind(&done);
   2109   }
   2110 }
   2111 
   2112 
   2113 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   2114   DwVfpRegister left = ToDoubleRegister(instr->left());
   2115   DwVfpRegister right = ToDoubleRegister(instr->right());
   2116   DwVfpRegister result = ToDoubleRegister(instr->result());
   2117   switch (instr->op()) {
   2118     case Token::ADD:
   2119       __ vadd(result, left, right);
   2120       break;
   2121     case Token::SUB:
   2122       __ vsub(result, left, right);
   2123       break;
   2124     case Token::MUL:
   2125       __ vmul(result, left, right);
   2126       break;
   2127     case Token::DIV:
   2128       __ vdiv(result, left, right);
   2129       break;
   2130     case Token::MOD: {
   2131       __ PrepareCallCFunction(0, 2, scratch0());
   2132       __ MovToFloatParameters(left, right);
   2133       __ CallCFunction(
   2134           ExternalReference::mod_two_doubles_operation(isolate()),
   2135           0, 2);
   2136       // Move the result in the double result register.
   2137       __ MovFromFloatResult(result);
   2138       break;
   2139     }
   2140     default:
   2141       UNREACHABLE();
   2142       break;
   2143   }
   2144 }
   2145 
   2146 
   2147 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   2148   ASSERT(ToRegister(instr->context()).is(cp));
   2149   ASSERT(ToRegister(instr->left()).is(r1));
   2150   ASSERT(ToRegister(instr->right()).is(r0));
   2151   ASSERT(ToRegister(instr->result()).is(r0));
   2152 
   2153   BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
   2154   // Block literal pool emission to ensure nop indicating no inlined smi code
   2155   // is in the correct position.
   2156   Assembler::BlockConstPoolScope block_const_pool(masm());
   2157   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2158 }
   2159 
   2160 
   2161 template<class InstrType>
   2162 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
   2163   int left_block = instr->TrueDestination(chunk_);
   2164   int right_block = instr->FalseDestination(chunk_);
   2165 
   2166   int next_block = GetNextEmittedBlock();
   2167 
   2168   if (right_block == left_block || condition == al) {
   2169     EmitGoto(left_block);
   2170   } else if (left_block == next_block) {
   2171     __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
   2172   } else if (right_block == next_block) {
   2173     __ b(condition, chunk_->GetAssemblyLabel(left_block));
   2174   } else {
   2175     __ b(condition, chunk_->GetAssemblyLabel(left_block));
   2176     __ b(chunk_->GetAssemblyLabel(right_block));
   2177   }
   2178 }
   2179 
   2180 
   2181 template<class InstrType>
   2182 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
   2183   int false_block = instr->FalseDestination(chunk_);
   2184   __ b(condition, chunk_->GetAssemblyLabel(false_block));
   2185 }
   2186 
   2187 
   2188 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   2189   __ stop("LBreak");
   2190 }
   2191 
   2192 
   2193 void LCodeGen::DoBranch(LBranch* instr) {
   2194   Representation r = instr->hydrogen()->value()->representation();
   2195   if (r.IsInteger32() || r.IsSmi()) {
   2196     ASSERT(!info()->IsStub());
   2197     Register reg = ToRegister(instr->value());
   2198     __ cmp(reg, Operand::Zero());
   2199     EmitBranch(instr, ne);
   2200   } else if (r.IsDouble()) {
   2201     ASSERT(!info()->IsStub());
   2202     DwVfpRegister reg = ToDoubleRegister(instr->value());
   2203     // Test the double value. Zero and NaN are false.
   2204     __ VFPCompareAndSetFlags(reg, 0.0);
   2205     __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN -> false)
   2206     EmitBranch(instr, ne);
   2207   } else {
   2208     ASSERT(r.IsTagged());
   2209     Register reg = ToRegister(instr->value());
   2210     HType type = instr->hydrogen()->value()->type();
   2211     if (type.IsBoolean()) {
   2212       ASSERT(!info()->IsStub());
   2213       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2214       EmitBranch(instr, eq);
   2215     } else if (type.IsSmi()) {
   2216       ASSERT(!info()->IsStub());
   2217       __ cmp(reg, Operand::Zero());
   2218       EmitBranch(instr, ne);
   2219     } else if (type.IsJSArray()) {
   2220       ASSERT(!info()->IsStub());
   2221       EmitBranch(instr, al);
   2222     } else if (type.IsHeapNumber()) {
   2223       ASSERT(!info()->IsStub());
   2224       DwVfpRegister dbl_scratch = double_scratch0();
   2225       __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2226       // Test the double value. Zero and NaN are false.
   2227       __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
   2228       __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN)
   2229       EmitBranch(instr, ne);
   2230     } else if (type.IsString()) {
   2231       ASSERT(!info()->IsStub());
   2232       __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
   2233       __ cmp(ip, Operand::Zero());
   2234       EmitBranch(instr, ne);
   2235     } else {
   2236       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   2237       // Avoid deopts in the case where we've never executed this path before.
   2238       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   2239 
   2240       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   2241         // undefined -> false.
   2242         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
   2243         __ b(eq, instr->FalseLabel(chunk_));
   2244       }
   2245       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   2246         // Boolean -> its value.
   2247         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2248         __ b(eq, instr->TrueLabel(chunk_));
   2249         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
   2250         __ b(eq, instr->FalseLabel(chunk_));
   2251       }
   2252       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   2253         // 'null' -> false.
   2254         __ CompareRoot(reg, Heap::kNullValueRootIndex);
   2255         __ b(eq, instr->FalseLabel(chunk_));
   2256       }
   2257 
   2258       if (expected.Contains(ToBooleanStub::SMI)) {
   2259         // Smis: 0 -> false, all other -> true.
   2260         __ cmp(reg, Operand::Zero());
   2261         __ b(eq, instr->FalseLabel(chunk_));
   2262         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2263       } else if (expected.NeedsMap()) {
   2264         // If we need a map later and have a Smi -> deopt.
   2265         __ SmiTst(reg);
   2266         DeoptimizeIf(eq, instr->environment());
   2267       }
   2268 
   2269       const Register map = scratch0();
   2270       if (expected.NeedsMap()) {
   2271         __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2272 
   2273         if (expected.CanBeUndetectable()) {
   2274           // Undetectable -> false.
   2275           __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
   2276           __ tst(ip, Operand(1 << Map::kIsUndetectable));
   2277           __ b(ne, instr->FalseLabel(chunk_));
   2278         }
   2279       }
   2280 
   2281       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2282         // spec object -> true.
   2283         __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
   2284         __ b(ge, instr->TrueLabel(chunk_));
   2285       }
   2286 
   2287       if (expected.Contains(ToBooleanStub::STRING)) {
   2288         // String value -> false iff empty.
   2289         Label not_string;
   2290         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
   2291         __ b(ge, &not_string);
   2292         __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
   2293         __ cmp(ip, Operand::Zero());
   2294         __ b(ne, instr->TrueLabel(chunk_));
   2295         __ b(instr->FalseLabel(chunk_));
   2296         __ bind(&not_string);
   2297       }
   2298 
   2299       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2300         // Symbol value -> true.
   2301         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
   2302         __ b(eq, instr->TrueLabel(chunk_));
   2303       }
   2304 
   2305       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2306         // heap number -> false iff +0, -0, or NaN.
   2307         DwVfpRegister dbl_scratch = double_scratch0();
   2308         Label not_heap_number;
   2309         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   2310         __ b(ne, &not_heap_number);
   2311         __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2312         __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
   2313         __ cmp(r0, r0, vs);  // NaN -> false.
   2314         __ b(eq, instr->FalseLabel(chunk_));  // +0, -0 -> false.
   2315         __ b(instr->TrueLabel(chunk_));
   2316         __ bind(&not_heap_number);
   2317       }
   2318 
   2319       if (!expected.IsGeneric()) {
   2320         // We've seen something for the first time -> deopt.
   2321         // This can only happen if we are not generic already.
   2322         DeoptimizeIf(al, instr->environment());
   2323       }
   2324     }
   2325   }
   2326 }
   2327 
   2328 
   2329 void LCodeGen::EmitGoto(int block) {
   2330   if (!IsNextEmittedBlock(block)) {
   2331     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2332   }
   2333 }
   2334 
   2335 
   2336 void LCodeGen::DoGoto(LGoto* instr) {
   2337   EmitGoto(instr->block_id());
   2338 }
   2339 
   2340 
   2341 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2342   Condition cond = kNoCondition;
   2343   switch (op) {
   2344     case Token::EQ:
   2345     case Token::EQ_STRICT:
   2346       cond = eq;
   2347       break;
   2348     case Token::NE:
   2349     case Token::NE_STRICT:
   2350       cond = ne;
   2351       break;
   2352     case Token::LT:
   2353       cond = is_unsigned ? lo : lt;
   2354       break;
   2355     case Token::GT:
   2356       cond = is_unsigned ? hi : gt;
   2357       break;
   2358     case Token::LTE:
   2359       cond = is_unsigned ? ls : le;
   2360       break;
   2361     case Token::GTE:
   2362       cond = is_unsigned ? hs : ge;
   2363       break;
   2364     case Token::IN:
   2365     case Token::INSTANCEOF:
   2366     default:
   2367       UNREACHABLE();
   2368   }
   2369   return cond;
   2370 }
   2371 
   2372 
   2373 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2374   LOperand* left = instr->left();
   2375   LOperand* right = instr->right();
   2376   bool is_unsigned =
   2377       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2378       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2379   Condition cond = TokenToCondition(instr->op(), is_unsigned);
   2380 
   2381   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2382     // We can statically evaluate the comparison.
   2383     double left_val = ToDouble(LConstantOperand::cast(left));
   2384     double right_val = ToDouble(LConstantOperand::cast(right));
   2385     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2386         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2387     EmitGoto(next_block);
   2388   } else {
   2389     if (instr->is_double()) {
   2390       // Compare left and right operands as doubles and load the
   2391       // resulting flags into the normal status register.
   2392       __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
   2393       // If a NaN is involved, i.e. the result is unordered (V set),
   2394       // jump to false block label.
   2395       __ b(vs, instr->FalseLabel(chunk_));
   2396     } else {
   2397       if (right->IsConstantOperand()) {
   2398         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2399         if (instr->hydrogen_value()->representation().IsSmi()) {
   2400           __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
   2401         } else {
   2402           __ cmp(ToRegister(left), Operand(value));
   2403         }
   2404       } else if (left->IsConstantOperand()) {
   2405         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2406         if (instr->hydrogen_value()->representation().IsSmi()) {
   2407           __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
   2408         } else {
   2409           __ cmp(ToRegister(right), Operand(value));
   2410         }
   2411         // We commuted the operands, so commute the condition.
   2412         cond = CommuteCondition(cond);
   2413       } else {
   2414         __ cmp(ToRegister(left), ToRegister(right));
   2415       }
   2416     }
   2417     EmitBranch(instr, cond);
   2418   }
   2419 }
   2420 
   2421 
   2422 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2423   Register left = ToRegister(instr->left());
   2424   Register right = ToRegister(instr->right());
   2425 
   2426   __ cmp(left, Operand(right));
   2427   EmitBranch(instr, eq);
   2428 }
   2429 
   2430 
   2431 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2432   if (instr->hydrogen()->representation().IsTagged()) {
   2433     Register input_reg = ToRegister(instr->object());
   2434     __ mov(ip, Operand(factory()->the_hole_value()));
   2435     __ cmp(input_reg, ip);
   2436     EmitBranch(instr, eq);
   2437     return;
   2438   }
   2439 
   2440   DwVfpRegister input_reg = ToDoubleRegister(instr->object());
   2441   __ VFPCompareAndSetFlags(input_reg, input_reg);
   2442   EmitFalseBranch(instr, vc);
   2443 
   2444   Register scratch = scratch0();
   2445   __ VmovHigh(scratch, input_reg);
   2446   __ cmp(scratch, Operand(kHoleNanUpper32));
   2447   EmitBranch(instr, eq);
   2448 }
   2449 
   2450 
   2451 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   2452   Representation rep = instr->hydrogen()->value()->representation();
   2453   ASSERT(!rep.IsInteger32());
   2454   Register scratch = ToRegister(instr->temp());
   2455 
   2456   if (rep.IsDouble()) {
   2457     DwVfpRegister value = ToDoubleRegister(instr->value());
   2458     __ VFPCompareAndSetFlags(value, 0.0);
   2459     EmitFalseBranch(instr, ne);
   2460     __ VmovHigh(scratch, value);
   2461     __ cmp(scratch, Operand(0x80000000));
   2462   } else {
   2463     Register value = ToRegister(instr->value());
   2464     __ CheckMap(value,
   2465                 scratch,
   2466                 Heap::kHeapNumberMapRootIndex,
   2467                 instr->FalseLabel(chunk()),
   2468                 DO_SMI_CHECK);
   2469     __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
   2470     __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
   2471     __ cmp(scratch, Operand(0x80000000));
   2472     __ cmp(ip, Operand(0x00000000), eq);
   2473   }
   2474   EmitBranch(instr, eq);
   2475 }
   2476 
   2477 
   2478 Condition LCodeGen::EmitIsObject(Register input,
   2479                                  Register temp1,
   2480                                  Label* is_not_object,
   2481                                  Label* is_object) {
   2482   Register temp2 = scratch0();
   2483   __ JumpIfSmi(input, is_not_object);
   2484 
   2485   __ LoadRoot(temp2, Heap::kNullValueRootIndex);
   2486   __ cmp(input, temp2);
   2487   __ b(eq, is_object);
   2488 
   2489   // Load map.
   2490   __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
   2491   // Undetectable objects behave like undefined.
   2492   __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
   2493   __ tst(temp2, Operand(1 << Map::kIsUndetectable));
   2494   __ b(ne, is_not_object);
   2495 
   2496   // Load instance type and check that it is in object type range.
   2497   __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
   2498   __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2499   __ b(lt, is_not_object);
   2500   __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2501   return le;
   2502 }
   2503 
   2504 
   2505 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   2506   Register reg = ToRegister(instr->value());
   2507   Register temp1 = ToRegister(instr->temp());
   2508 
   2509   Condition true_cond =
   2510       EmitIsObject(reg, temp1,
   2511           instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
   2512 
   2513   EmitBranch(instr, true_cond);
   2514 }
   2515 
   2516 
   2517 Condition LCodeGen::EmitIsString(Register input,
   2518                                  Register temp1,
   2519                                  Label* is_not_string,
   2520                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2521   if (check_needed == INLINE_SMI_CHECK) {
   2522     __ JumpIfSmi(input, is_not_string);
   2523   }
   2524   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
   2525 
   2526   return lt;
   2527 }
   2528 
   2529 
   2530 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2531   Register reg = ToRegister(instr->value());
   2532   Register temp1 = ToRegister(instr->temp());
   2533 
   2534   SmiCheck check_needed =
   2535       instr->hydrogen()->value()->type().IsHeapObject()
   2536           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2537   Condition true_cond =
   2538       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2539 
   2540   EmitBranch(instr, true_cond);
   2541 }
   2542 
   2543 
   2544 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2545   Register input_reg = EmitLoadRegister(instr->value(), ip);
   2546   __ SmiTst(input_reg);
   2547   EmitBranch(instr, eq);
   2548 }
   2549 
   2550 
   2551 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2552   Register input = ToRegister(instr->value());
   2553   Register temp = ToRegister(instr->temp());
   2554 
   2555   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2556     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2557   }
   2558   __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2559   __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2560   __ tst(temp, Operand(1 << Map::kIsUndetectable));
   2561   EmitBranch(instr, ne);
   2562 }
   2563 
   2564 
   2565 static Condition ComputeCompareCondition(Token::Value op) {
   2566   switch (op) {
   2567     case Token::EQ_STRICT:
   2568     case Token::EQ:
   2569       return eq;
   2570     case Token::LT:
   2571       return lt;
   2572     case Token::GT:
   2573       return gt;
   2574     case Token::LTE:
   2575       return le;
   2576     case Token::GTE:
   2577       return ge;
   2578     default:
   2579       UNREACHABLE();
   2580       return kNoCondition;
   2581   }
   2582 }
   2583 
   2584 
   2585 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2586   ASSERT(ToRegister(instr->context()).is(cp));
   2587   Token::Value op = instr->op();
   2588 
   2589   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2590   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2591   // This instruction also signals no smi code inlined.
   2592   __ cmp(r0, Operand::Zero());
   2593 
   2594   Condition condition = ComputeCompareCondition(op);
   2595 
   2596   EmitBranch(instr, condition);
   2597 }
   2598 
   2599 
   2600 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2601   InstanceType from = instr->from();
   2602   InstanceType to = instr->to();
   2603   if (from == FIRST_TYPE) return to;
   2604   ASSERT(from == to || to == LAST_TYPE);
   2605   return from;
   2606 }
   2607 
   2608 
   2609 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2610   InstanceType from = instr->from();
   2611   InstanceType to = instr->to();
   2612   if (from == to) return eq;
   2613   if (to == LAST_TYPE) return hs;
   2614   if (from == FIRST_TYPE) return ls;
   2615   UNREACHABLE();
   2616   return eq;
   2617 }
   2618 
   2619 
   2620 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2621   Register scratch = scratch0();
   2622   Register input = ToRegister(instr->value());
   2623 
   2624   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2625     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2626   }
   2627 
   2628   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
   2629   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2630 }
   2631 
   2632 
   2633 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2634   Register input = ToRegister(instr->value());
   2635   Register result = ToRegister(instr->result());
   2636 
   2637   __ AssertString(input);
   2638 
   2639   __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
   2640   __ IndexFromHash(result, result);
   2641 }
   2642 
   2643 
   2644 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2645     LHasCachedArrayIndexAndBranch* instr) {
   2646   Register input = ToRegister(instr->value());
   2647   Register scratch = scratch0();
   2648 
   2649   __ ldr(scratch,
   2650          FieldMemOperand(input, String::kHashFieldOffset));
   2651   __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
   2652   EmitBranch(instr, eq);
   2653 }
   2654 
   2655 
   2656 // Branches to a label or falls through with the answer in flags.  Trashes
   2657 // the temp registers, but not the input.
   2658 void LCodeGen::EmitClassOfTest(Label* is_true,
   2659                                Label* is_false,
   2660                                Handle<String>class_name,
   2661                                Register input,
   2662                                Register temp,
   2663                                Register temp2) {
   2664   ASSERT(!input.is(temp));
   2665   ASSERT(!input.is(temp2));
   2666   ASSERT(!temp.is(temp2));
   2667 
   2668   __ JumpIfSmi(input, is_false);
   2669 
   2670   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
   2671     // Assuming the following assertions, we can use the same compares to test
   2672     // for both being a function type and being in the object type range.
   2673     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   2674     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2675                   FIRST_SPEC_OBJECT_TYPE + 1);
   2676     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2677                   LAST_SPEC_OBJECT_TYPE - 1);
   2678     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
   2679     __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
   2680     __ b(lt, is_false);
   2681     __ b(eq, is_true);
   2682     __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
   2683     __ b(eq, is_true);
   2684   } else {
   2685     // Faster code path to avoid two compares: subtract lower bound from the
   2686     // actual type and do a signed compare with the width of the type range.
   2687     __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2688     __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
   2689     __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2690     __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
   2691                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2692     __ b(gt, is_false);
   2693   }
   2694 
   2695   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   2696   // Check if the constructor in the map is a function.
   2697   __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
   2698 
   2699   // Objects with a non-function constructor have class 'Object'.
   2700   __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
   2701   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
   2702     __ b(ne, is_true);
   2703   } else {
   2704     __ b(ne, is_false);
   2705   }
   2706 
   2707   // temp now contains the constructor function. Grab the
   2708   // instance class name from there.
   2709   __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2710   __ ldr(temp, FieldMemOperand(temp,
   2711                                SharedFunctionInfo::kInstanceClassNameOffset));
   2712   // The class name we are testing against is internalized since it's a literal.
   2713   // The name in the constructor is internalized because of the way the context
   2714   // is booted.  This routine isn't expected to work for random API-created
   2715   // classes and it doesn't have to because you can't access it with natives
   2716   // syntax.  Since both sides are internalized it is sufficient to use an
   2717   // identity comparison.
   2718   __ cmp(temp, Operand(class_name));
   2719   // End with the answer in flags.
   2720 }
   2721 
   2722 
   2723 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2724   Register input = ToRegister(instr->value());
   2725   Register temp = scratch0();
   2726   Register temp2 = ToRegister(instr->temp());
   2727   Handle<String> class_name = instr->hydrogen()->class_name();
   2728 
   2729   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2730       class_name, input, temp, temp2);
   2731 
   2732   EmitBranch(instr, eq);
   2733 }
   2734 
   2735 
   2736 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2737   Register reg = ToRegister(instr->value());
   2738   Register temp = ToRegister(instr->temp());
   2739 
   2740   __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2741   __ cmp(temp, Operand(instr->map()));
   2742   EmitBranch(instr, eq);
   2743 }
   2744 
   2745 
   2746 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2747   ASSERT(ToRegister(instr->context()).is(cp));
   2748   ASSERT(ToRegister(instr->left()).is(r0));  // Object is in r0.
   2749   ASSERT(ToRegister(instr->right()).is(r1));  // Function is in r1.
   2750 
   2751   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
   2752   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2753 
   2754   __ cmp(r0, Operand::Zero());
   2755   __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
   2756   __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
   2757 }
   2758 
   2759 
   2760 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   2761   class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
   2762    public:
   2763     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
   2764                                   LInstanceOfKnownGlobal* instr)
   2765         : LDeferredCode(codegen), instr_(instr) { }
   2766     virtual void Generate() V8_OVERRIDE {
   2767       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
   2768     }
   2769     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   2770     Label* map_check() { return &map_check_; }
   2771    private:
   2772     LInstanceOfKnownGlobal* instr_;
   2773     Label map_check_;
   2774   };
   2775 
   2776   DeferredInstanceOfKnownGlobal* deferred;
   2777   deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
   2778 
   2779   Label done, false_result;
   2780   Register object = ToRegister(instr->value());
   2781   Register temp = ToRegister(instr->temp());
   2782   Register result = ToRegister(instr->result());
   2783 
   2784   // A Smi is not instance of anything.
   2785   __ JumpIfSmi(object, &false_result);
   2786 
   2787   // This is the inlined call site instanceof cache. The two occurences of the
   2788   // hole value will be patched to the last map/result pair generated by the
   2789   // instanceof stub.
   2790   Label cache_miss;
   2791   Register map = temp;
   2792   __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
   2793   {
   2794     // Block constant pool emission to ensure the positions of instructions are
   2795     // as expected by the patcher. See InstanceofStub::Generate().
   2796     Assembler::BlockConstPoolScope block_const_pool(masm());
   2797     __ bind(deferred->map_check());  // Label for calculating code patching.
   2798     // We use Factory::the_hole_value() on purpose instead of loading from the
   2799     // root array to force relocation to be able to later patch with
   2800     // the cached map.
   2801     PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
   2802     Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
   2803     __ mov(ip, Operand(Handle<Object>(cell)));
   2804     __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
   2805     __ cmp(map, Operand(ip));
   2806     __ b(ne, &cache_miss);
   2807     // We use Factory::the_hole_value() on purpose instead of loading from the
   2808     // root array to force relocation to be able to later patch
   2809     // with true or false.
   2810     __ mov(result, Operand(factory()->the_hole_value()));
   2811   }
   2812   __ b(&done);
   2813 
   2814   // The inlined call site cache did not match. Check null and string before
   2815   // calling the deferred code.
   2816   __ bind(&cache_miss);
   2817   // Null is not instance of anything.
   2818   __ LoadRoot(ip, Heap::kNullValueRootIndex);
   2819   __ cmp(object, Operand(ip));
   2820   __ b(eq, &false_result);
   2821 
   2822   // String values is not instance of anything.
   2823   Condition is_string = masm_->IsObjectStringType(object, temp);
   2824   __ b(is_string, &false_result);
   2825 
   2826   // Go to the deferred code.
   2827   __ b(deferred->entry());
   2828 
   2829   __ bind(&false_result);
   2830   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   2831 
   2832   // Here result has either true or false. Deferred code also produces true or
   2833   // false object.
   2834   __ bind(deferred->exit());
   2835   __ bind(&done);
   2836 }
   2837 
   2838 
   2839 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
   2840                                                Label* map_check) {
   2841   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   2842   flags = static_cast<InstanceofStub::Flags>(
   2843       flags | InstanceofStub::kArgsInRegisters);
   2844   flags = static_cast<InstanceofStub::Flags>(
   2845       flags | InstanceofStub::kCallSiteInlineCheck);
   2846   flags = static_cast<InstanceofStub::Flags>(
   2847       flags | InstanceofStub::kReturnTrueFalseObject);
   2848   InstanceofStub stub(isolate(), flags);
   2849 
   2850   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   2851   LoadContextFromDeferred(instr->context());
   2852 
   2853   __ Move(InstanceofStub::right(), instr->function());
   2854   static const int kAdditionalDelta = 4;
   2855   // Make sure that code size is predicable, since we use specific constants
   2856   // offsets in the code to find embedded values..
   2857   PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
   2858   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
   2859   Label before_push_delta;
   2860   __ bind(&before_push_delta);
   2861   __ BlockConstPoolFor(kAdditionalDelta);
   2862   // r5 is used to communicate the offset to the location of the map check.
   2863   __ mov(r5, Operand(delta * kPointerSize));
   2864   // The mov above can generate one or two instructions. The delta was computed
   2865   // for two instructions, so we need to pad here in case of one instruction.
   2866   if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
   2867     ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
   2868     __ nop();
   2869   }
   2870   CallCodeGeneric(stub.GetCode(),
   2871                   RelocInfo::CODE_TARGET,
   2872                   instr,
   2873                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   2874   LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   2875   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2876   // Put the result value (r0) into the result register slot and
   2877   // restore all registers.
   2878   __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
   2879 }
   2880 
   2881 
   2882 void LCodeGen::DoCmpT(LCmpT* instr) {
   2883   ASSERT(ToRegister(instr->context()).is(cp));
   2884   Token::Value op = instr->op();
   2885 
   2886   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2887   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2888   // This instruction also signals no smi code inlined.
   2889   __ cmp(r0, Operand::Zero());
   2890 
   2891   Condition condition = ComputeCompareCondition(op);
   2892   __ LoadRoot(ToRegister(instr->result()),
   2893               Heap::kTrueValueRootIndex,
   2894               condition);
   2895   __ LoadRoot(ToRegister(instr->result()),
   2896               Heap::kFalseValueRootIndex,
   2897               NegateCondition(condition));
   2898 }
   2899 
   2900 
   2901 void LCodeGen::DoReturn(LReturn* instr) {
   2902   if (FLAG_trace && info()->IsOptimizing()) {
   2903     // Push the return value on the stack as the parameter.
   2904     // Runtime::TraceExit returns its parameter in r0.  We're leaving the code
   2905     // managed by the register allocator and tearing down the frame, it's
   2906     // safe to write to the context register.
   2907     __ push(r0);
   2908     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2909     __ CallRuntime(Runtime::kTraceExit, 1);
   2910   }
   2911   if (info()->saves_caller_doubles()) {
   2912     RestoreCallerDoubles();
   2913   }
   2914   int no_frame_start = -1;
   2915   if (NeedsEagerFrame()) {
   2916     no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
   2917   }
   2918   if (instr->has_constant_parameter_count()) {
   2919     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2920     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2921     if (sp_delta != 0) {
   2922       __ add(sp, sp, Operand(sp_delta));
   2923     }
   2924   } else {
   2925     Register reg = ToRegister(instr->parameter_count());
   2926     // The argument count parameter is a smi
   2927     __ SmiUntag(reg);
   2928     __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
   2929   }
   2930 
   2931   __ Jump(lr);
   2932 
   2933   if (no_frame_start != -1) {
   2934     info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
   2935   }
   2936 }
   2937 
   2938 
   2939 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   2940   Register result = ToRegister(instr->result());
   2941   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
   2942   __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
   2943   if (instr->hydrogen()->RequiresHoleCheck()) {
   2944     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   2945     __ cmp(result, ip);
   2946     DeoptimizeIf(eq, instr->environment());
   2947   }
   2948 }
   2949 
   2950 
   2951 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   2952   ASSERT(ToRegister(instr->context()).is(cp));
   2953   ASSERT(ToRegister(instr->global_object()).is(r0));
   2954   ASSERT(ToRegister(instr->result()).is(r0));
   2955 
   2956   __ mov(r2, Operand(instr->name()));
   2957   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
   2958   Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
   2959   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2960 }
   2961 
   2962 
   2963 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   2964   Register value = ToRegister(instr->value());
   2965   Register cell = scratch0();
   2966 
   2967   // Load the cell.
   2968   __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
   2969 
   2970   // If the cell we are storing to contains the hole it could have
   2971   // been deleted from the property dictionary. In that case, we need
   2972   // to update the property details in the property dictionary to mark
   2973   // it as no longer deleted.
   2974   if (instr->hydrogen()->RequiresHoleCheck()) {
   2975     // We use a temp to check the payload (CompareRoot might clobber ip).
   2976     Register payload = ToRegister(instr->temp());
   2977     __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
   2978     __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
   2979     DeoptimizeIf(eq, instr->environment());
   2980   }
   2981 
   2982   // Store the value.
   2983   __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
   2984   // Cells are always rescanned, so no write barrier here.
   2985 }
   2986 
   2987 
   2988 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2989   Register context = ToRegister(instr->context());
   2990   Register result = ToRegister(instr->result());
   2991   __ ldr(result, ContextOperand(context, instr->slot_index()));
   2992   if (instr->hydrogen()->RequiresHoleCheck()) {
   2993     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   2994     __ cmp(result, ip);
   2995     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2996       DeoptimizeIf(eq, instr->environment());
   2997     } else {
   2998       __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
   2999     }
   3000   }
   3001 }
   3002 
   3003 
   3004 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   3005   Register context = ToRegister(instr->context());
   3006   Register value = ToRegister(instr->value());
   3007   Register scratch = scratch0();
   3008   MemOperand target = ContextOperand(context, instr->slot_index());
   3009 
   3010   Label skip_assignment;
   3011 
   3012   if (instr->hydrogen()->RequiresHoleCheck()) {
   3013     __ ldr(scratch, target);
   3014     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   3015     __ cmp(scratch, ip);
   3016     if (instr->hydrogen()->DeoptimizesOnHole()) {
   3017       DeoptimizeIf(eq, instr->environment());
   3018     } else {
   3019       __ b(ne, &skip_assignment);
   3020     }
   3021   }
   3022 
   3023   __ str(value, target);
   3024   if (instr->hydrogen()->NeedsWriteBarrier()) {
   3025     SmiCheck check_needed =
   3026         instr->hydrogen()->value()->type().IsHeapObject()
   3027             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   3028     __ RecordWriteContextSlot(context,
   3029                               target.offset(),
   3030                               value,
   3031                               scratch,
   3032                               GetLinkRegisterState(),
   3033                               kSaveFPRegs,
   3034                               EMIT_REMEMBERED_SET,
   3035                               check_needed);
   3036   }
   3037 
   3038   __ bind(&skip_assignment);
   3039 }
   3040 
   3041 
   3042 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   3043   HObjectAccess access = instr->hydrogen()->access();
   3044   int offset = access.offset();
   3045   Register object = ToRegister(instr->object());
   3046 
   3047   if (access.IsExternalMemory()) {
   3048     Register result = ToRegister(instr->result());
   3049     MemOperand operand = MemOperand(object, offset);
   3050     __ Load(result, operand, access.representation());
   3051     return;
   3052   }
   3053 
   3054   if (instr->hydrogen()->representation().IsDouble()) {
   3055     DwVfpRegister result = ToDoubleRegister(instr->result());
   3056     __ vldr(result, FieldMemOperand(object, offset));
   3057     return;
   3058   }
   3059 
   3060   Register result = ToRegister(instr->result());
   3061   if (!access.IsInobject()) {
   3062     __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   3063     object = result;
   3064   }
   3065   MemOperand operand = FieldMemOperand(object, offset);
   3066   __ Load(result, operand, access.representation());
   3067 }
   3068 
   3069 
   3070 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   3071   ASSERT(ToRegister(instr->context()).is(cp));
   3072   ASSERT(ToRegister(instr->object()).is(r0));
   3073   ASSERT(ToRegister(instr->result()).is(r0));
   3074 
   3075   // Name is always in r2.
   3076   __ mov(r2, Operand(instr->name()));
   3077   Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
   3078   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
   3079 }
   3080 
   3081 
   3082 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   3083   Register scratch = scratch0();
   3084   Register function = ToRegister(instr->function());
   3085   Register result = ToRegister(instr->result());
   3086 
   3087   // Check that the function really is a function. Load map into the
   3088   // result register.
   3089   __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
   3090   DeoptimizeIf(ne, instr->environment());
   3091 
   3092   // Make sure that the function has an instance prototype.
   3093   Label non_instance;
   3094   __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
   3095   __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
   3096   __ b(ne, &non_instance);
   3097 
   3098   // Get the prototype or initial map from the function.
   3099   __ ldr(result,
   3100          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   3101 
   3102   // Check that the function has a prototype or an initial map.
   3103   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   3104   __ cmp(result, ip);
   3105   DeoptimizeIf(eq, instr->environment());
   3106 
   3107   // If the function does not have an initial map, we're done.
   3108   Label done;
   3109   __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
   3110   __ b(ne, &done);
   3111 
   3112   // Get the prototype from the initial map.
   3113   __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
   3114   __ jmp(&done);
   3115 
   3116   // Non-instance prototype: Fetch prototype from constructor field
   3117   // in initial map.
   3118   __ bind(&non_instance);
   3119   __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
   3120 
   3121   // All done.
   3122   __ bind(&done);
   3123 }
   3124 
   3125 
   3126 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   3127   Register result = ToRegister(instr->result());
   3128   __ LoadRoot(result, instr->index());
   3129 }
   3130 
   3131 
   3132 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   3133   Register arguments = ToRegister(instr->arguments());
   3134   Register result = ToRegister(instr->result());
   3135   // There are two words between the frame pointer and the last argument.
   3136   // Subtracting from length accounts for one of them add one more.
   3137   if (instr->length()->IsConstantOperand()) {
   3138     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   3139     if (instr->index()->IsConstantOperand()) {
   3140       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3141       int index = (const_length - const_index) + 1;
   3142       __ ldr(result, MemOperand(arguments, index * kPointerSize));
   3143     } else {
   3144       Register index = ToRegister(instr->index());
   3145       __ rsb(result, index, Operand(const_length + 1));
   3146       __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
   3147     }
   3148   } else if (instr->index()->IsConstantOperand()) {
   3149       Register length = ToRegister(instr->length());
   3150       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3151       int loc = const_index - 1;
   3152       if (loc != 0) {
   3153         __ sub(result, length, Operand(loc));
   3154         __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
   3155       } else {
   3156         __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
   3157       }
   3158     } else {
   3159     Register length = ToRegister(instr->length());
   3160     Register index = ToRegister(instr->index());
   3161     __ sub(result, length, index);
   3162     __ add(result, result, Operand(1));
   3163     __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
   3164   }
   3165 }
   3166 
   3167 
   3168 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   3169   Register external_pointer = ToRegister(instr->elements());
   3170   Register key = no_reg;
   3171   ElementsKind elements_kind = instr->elements_kind();
   3172   bool key_is_constant = instr->key()->IsConstantOperand();
   3173   int constant_key = 0;
   3174   if (key_is_constant) {
   3175     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3176     if (constant_key & 0xF0000000) {
   3177       Abort(kArrayIndexConstantValueTooBig);
   3178     }
   3179   } else {
   3180     key = ToRegister(instr->key());
   3181   }
   3182   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3183   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3184       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3185   int base_offset = instr->base_offset();
   3186 
   3187   if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   3188       elements_kind == FLOAT32_ELEMENTS ||
   3189       elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
   3190       elements_kind == FLOAT64_ELEMENTS) {
   3191     int base_offset = instr->base_offset();
   3192     DwVfpRegister result = ToDoubleRegister(instr->result());
   3193     Operand operand = key_is_constant
   3194         ? Operand(constant_key << element_size_shift)
   3195         : Operand(key, LSL, shift_size);
   3196     __ add(scratch0(), external_pointer, operand);
   3197     if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   3198         elements_kind == FLOAT32_ELEMENTS) {
   3199       __ vldr(double_scratch0().low(), scratch0(), base_offset);
   3200       __ vcvt_f64_f32(result, double_scratch0().low());
   3201     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   3202       __ vldr(result, scratch0(), base_offset);
   3203     }
   3204   } else {
   3205     Register result = ToRegister(instr->result());
   3206     MemOperand mem_operand = PrepareKeyedOperand(
   3207         key, external_pointer, key_is_constant, constant_key,
   3208         element_size_shift, shift_size, base_offset);
   3209     switch (elements_kind) {
   3210       case EXTERNAL_INT8_ELEMENTS:
   3211       case INT8_ELEMENTS:
   3212         __ ldrsb(result, mem_operand);
   3213         break;
   3214       case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
   3215       case EXTERNAL_UINT8_ELEMENTS:
   3216       case UINT8_ELEMENTS:
   3217       case UINT8_CLAMPED_ELEMENTS:
   3218         __ ldrb(result, mem_operand);
   3219         break;
   3220       case EXTERNAL_INT16_ELEMENTS:
   3221       case INT16_ELEMENTS:
   3222         __ ldrsh(result, mem_operand);
   3223         break;
   3224       case EXTERNAL_UINT16_ELEMENTS:
   3225       case UINT16_ELEMENTS:
   3226         __ ldrh(result, mem_operand);
   3227         break;
   3228       case EXTERNAL_INT32_ELEMENTS:
   3229       case INT32_ELEMENTS:
   3230         __ ldr(result, mem_operand);
   3231         break;
   3232       case EXTERNAL_UINT32_ELEMENTS:
   3233       case UINT32_ELEMENTS:
   3234         __ ldr(result, mem_operand);
   3235         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   3236           __ cmp(result, Operand(0x80000000));
   3237           DeoptimizeIf(cs, instr->environment());
   3238         }
   3239         break;
   3240       case FLOAT32_ELEMENTS:
   3241       case FLOAT64_ELEMENTS:
   3242       case EXTERNAL_FLOAT32_ELEMENTS:
   3243       case EXTERNAL_FLOAT64_ELEMENTS:
   3244       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3245       case FAST_HOLEY_ELEMENTS:
   3246       case FAST_HOLEY_SMI_ELEMENTS:
   3247       case FAST_DOUBLE_ELEMENTS:
   3248       case FAST_ELEMENTS:
   3249       case FAST_SMI_ELEMENTS:
   3250       case DICTIONARY_ELEMENTS:
   3251       case SLOPPY_ARGUMENTS_ELEMENTS:
   3252         UNREACHABLE();
   3253         break;
   3254     }
   3255   }
   3256 }
   3257 
   3258 
   3259 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   3260   Register elements = ToRegister(instr->elements());
   3261   bool key_is_constant = instr->key()->IsConstantOperand();
   3262   Register key = no_reg;
   3263   DwVfpRegister result = ToDoubleRegister(instr->result());
   3264   Register scratch = scratch0();
   3265 
   3266   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   3267 
   3268   int base_offset = instr->base_offset();
   3269   if (key_is_constant) {
   3270     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3271     if (constant_key & 0xF0000000) {
   3272       Abort(kArrayIndexConstantValueTooBig);
   3273     }
   3274     base_offset += constant_key * kDoubleSize;
   3275   }
   3276   __ add(scratch, elements, Operand(base_offset));
   3277 
   3278   if (!key_is_constant) {
   3279     key = ToRegister(instr->key());
   3280     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   3281         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   3282     __ add(scratch, scratch, Operand(key, LSL, shift_size));
   3283   }
   3284 
   3285   __ vldr(result, scratch, 0);
   3286 
   3287   if (instr->hydrogen()->RequiresHoleCheck()) {
   3288     __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
   3289     __ cmp(scratch, Operand(kHoleNanUpper32));
   3290     DeoptimizeIf(eq, instr->environment());
   3291   }
   3292 }
   3293 
   3294 
   3295 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   3296   Register elements = ToRegister(instr->elements());
   3297   Register result = ToRegister(instr->result());
   3298   Register scratch = scratch0();
   3299   Register store_base = scratch;
   3300   int offset = instr->base_offset();
   3301 
   3302   if (instr->key()->IsConstantOperand()) {
   3303     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   3304     offset += ToInteger32(const_operand) * kPointerSize;
   3305     store_base = elements;
   3306   } else {
   3307     Register key = ToRegister(instr->key());
   3308     // Even though the HLoadKeyed instruction forces the input
   3309     // representation for the key to be an integer, the input gets replaced
   3310     // during bound check elimination with the index argument to the bounds
   3311     // check, which can be tagged, so that case must be handled here, too.
   3312     if (instr->hydrogen()->key()->representation().IsSmi()) {
   3313       __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
   3314     } else {
   3315       __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
   3316     }
   3317   }
   3318   __ ldr(result, MemOperand(store_base, offset));
   3319 
   3320   // Check for the hole value.
   3321   if (instr->hydrogen()->RequiresHoleCheck()) {
   3322     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   3323       __ SmiTst(result);
   3324       DeoptimizeIf(ne, instr->environment());
   3325     } else {
   3326       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   3327       __ cmp(result, scratch);
   3328       DeoptimizeIf(eq, instr->environment());
   3329     }
   3330   }
   3331 }
   3332 
   3333 
   3334 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3335   if (instr->is_typed_elements()) {
   3336     DoLoadKeyedExternalArray(instr);
   3337   } else if (instr->hydrogen()->representation().IsDouble()) {
   3338     DoLoadKeyedFixedDoubleArray(instr);
   3339   } else {
   3340     DoLoadKeyedFixedArray(instr);
   3341   }
   3342 }
   3343 
   3344 
   3345 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
   3346                                          Register base,
   3347                                          bool key_is_constant,
   3348                                          int constant_key,
   3349                                          int element_size,
   3350                                          int shift_size,
   3351                                          int base_offset) {
   3352   if (key_is_constant) {
   3353     return MemOperand(base, (constant_key << element_size) + base_offset);
   3354   }
   3355 
   3356   if (base_offset == 0) {
   3357     if (shift_size >= 0) {
   3358       return MemOperand(base, key, LSL, shift_size);
   3359     } else {
   3360       ASSERT_EQ(-1, shift_size);
   3361       return MemOperand(base, key, LSR, 1);
   3362     }
   3363   }
   3364 
   3365   if (shift_size >= 0) {
   3366     __ add(scratch0(), base, Operand(key, LSL, shift_size));
   3367     return MemOperand(scratch0(), base_offset);
   3368   } else {
   3369     ASSERT_EQ(-1, shift_size);
   3370     __ add(scratch0(), base, Operand(key, ASR, 1));
   3371     return MemOperand(scratch0(), base_offset);
   3372   }
   3373 }
   3374 
   3375 
   3376 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3377   ASSERT(ToRegister(instr->context()).is(cp));
   3378   ASSERT(ToRegister(instr->object()).is(r1));
   3379   ASSERT(ToRegister(instr->key()).is(r0));
   3380 
   3381   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
   3382   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
   3383 }
   3384 
   3385 
   3386 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3387   Register scratch = scratch0();
   3388   Register result = ToRegister(instr->result());
   3389 
   3390   if (instr->hydrogen()->from_inlined()) {
   3391     __ sub(result, sp, Operand(2 * kPointerSize));
   3392   } else {
   3393     // Check if the calling frame is an arguments adaptor frame.
   3394     Label done, adapted;
   3395     __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3396     __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
   3397     __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3398 
   3399     // Result is the frame pointer for the frame if not adapted and for the real
   3400     // frame below the adaptor frame if adapted.
   3401     __ mov(result, fp, LeaveCC, ne);
   3402     __ mov(result, scratch, LeaveCC, eq);
   3403   }
   3404 }
   3405 
   3406 
   3407 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3408   Register elem = ToRegister(instr->elements());
   3409   Register result = ToRegister(instr->result());
   3410 
   3411   Label done;
   3412 
   3413   // If no arguments adaptor frame the number of arguments is fixed.
   3414   __ cmp(fp, elem);
   3415   __ mov(result, Operand(scope()->num_parameters()));
   3416   __ b(eq, &done);
   3417 
   3418   // Arguments adaptor frame present. Get argument length from there.
   3419   __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3420   __ ldr(result,
   3421          MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3422   __ SmiUntag(result);
   3423 
   3424   // Argument length is in result register.
   3425   __ bind(&done);
   3426 }
   3427 
   3428 
   3429 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3430   Register receiver = ToRegister(instr->receiver());
   3431   Register function = ToRegister(instr->function());
   3432   Register result = ToRegister(instr->result());
   3433   Register scratch = scratch0();
   3434 
   3435   // If the receiver is null or undefined, we have to pass the global
   3436   // object as a receiver to normal functions. Values have to be
   3437   // passed unchanged to builtins and strict-mode functions.
   3438   Label global_object, result_in_receiver;
   3439 
   3440   if (!instr->hydrogen()->known_function()) {
   3441     // Do not transform the receiver to object for strict mode
   3442     // functions.
   3443     __ ldr(scratch,
   3444            FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3445     __ ldr(scratch,
   3446            FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3447     int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
   3448     __ tst(scratch, Operand(mask));
   3449     __ b(ne, &result_in_receiver);
   3450 
   3451     // Do not transform the receiver to object for builtins.
   3452     __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
   3453     __ b(ne, &result_in_receiver);
   3454   }
   3455 
   3456   // Normal function. Replace undefined or null with global receiver.
   3457   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
   3458   __ cmp(receiver, scratch);
   3459   __ b(eq, &global_object);
   3460   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3461   __ cmp(receiver, scratch);
   3462   __ b(eq, &global_object);
   3463 
   3464   // Deoptimize if the receiver is not a JS object.
   3465   __ SmiTst(receiver);
   3466   DeoptimizeIf(eq, instr->environment());
   3467   __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
   3468   DeoptimizeIf(lt, instr->environment());
   3469 
   3470   __ b(&result_in_receiver);
   3471   __ bind(&global_object);
   3472   __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
   3473   __ ldr(result,
   3474          ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
   3475   __ ldr(result,
   3476          FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
   3477 
   3478   if (result.is(receiver)) {
   3479     __ bind(&result_in_receiver);
   3480   } else {
   3481     Label result_ok;
   3482     __ b(&result_ok);
   3483     __ bind(&result_in_receiver);
   3484     __ mov(result, receiver);
   3485     __ bind(&result_ok);
   3486   }
   3487 }
   3488 
   3489 
   3490 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3491   Register receiver = ToRegister(instr->receiver());
   3492   Register function = ToRegister(instr->function());
   3493   Register length = ToRegister(instr->length());
   3494   Register elements = ToRegister(instr->elements());
   3495   Register scratch = scratch0();
   3496   ASSERT(receiver.is(r0));  // Used for parameter count.
   3497   ASSERT(function.is(r1));  // Required by InvokeFunction.
   3498   ASSERT(ToRegister(instr->result()).is(r0));
   3499 
   3500   // Copy the arguments to this function possibly from the
   3501   // adaptor frame below it.
   3502   const uint32_t kArgumentsLimit = 1 * KB;
   3503   __ cmp(length, Operand(kArgumentsLimit));
   3504   DeoptimizeIf(hi, instr->environment());
   3505 
   3506   // Push the receiver and use the register to keep the original
   3507   // number of arguments.
   3508   __ push(receiver);
   3509   __ mov(receiver, length);
   3510   // The arguments are at a one pointer size offset from elements.
   3511   __ add(elements, elements, Operand(1 * kPointerSize));
   3512 
   3513   // Loop through the arguments pushing them onto the execution
   3514   // stack.
   3515   Label invoke, loop;
   3516   // length is a small non-negative integer, due to the test above.
   3517   __ cmp(length, Operand::Zero());
   3518   __ b(eq, &invoke);
   3519   __ bind(&loop);
   3520   __ ldr(scratch, MemOperand(elements, length, LSL, 2));
   3521   __ push(scratch);
   3522   __ sub(length, length, Operand(1), SetCC);
   3523   __ b(ne, &loop);
   3524 
   3525   __ bind(&invoke);
   3526   ASSERT(instr->HasPointerMap());
   3527   LPointerMap* pointers = instr->pointer_map();
   3528   SafepointGenerator safepoint_generator(
   3529       this, pointers, Safepoint::kLazyDeopt);
   3530   // The number of arguments is stored in receiver which is r0, as expected
   3531   // by InvokeFunction.
   3532   ParameterCount actual(receiver);
   3533   __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
   3534 }
   3535 
   3536 
   3537 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3538   LOperand* argument = instr->value();
   3539   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3540     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3541   } else {
   3542     Register argument_reg = EmitLoadRegister(argument, ip);
   3543     __ push(argument_reg);
   3544   }
   3545 }
   3546 
   3547 
   3548 void LCodeGen::DoDrop(LDrop* instr) {
   3549   __ Drop(instr->count());
   3550 }
   3551 
   3552 
   3553 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3554   Register result = ToRegister(instr->result());
   3555   __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3556 }
   3557 
   3558 
   3559 void LCodeGen::DoContext(LContext* instr) {
   3560   // If there is a non-return use, the context must be moved to a register.
   3561   Register result = ToRegister(instr->result());
   3562   if (info()->IsOptimizing()) {
   3563     __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3564   } else {
   3565     // If there is no frame, the context must be in cp.
   3566     ASSERT(result.is(cp));
   3567   }
   3568 }
   3569 
   3570 
   3571 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3572   ASSERT(ToRegister(instr->context()).is(cp));
   3573   __ push(cp);  // The context is the first argument.
   3574   __ Move(scratch0(), instr->hydrogen()->pairs());
   3575   __ push(scratch0());
   3576   __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   3577   __ push(scratch0());
   3578   CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
   3579 }
   3580 
   3581 
   3582 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3583                                  int formal_parameter_count,
   3584                                  int arity,
   3585                                  LInstruction* instr,
   3586                                  R1State r1_state) {
   3587   bool dont_adapt_arguments =
   3588       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3589   bool can_invoke_directly =
   3590       dont_adapt_arguments || formal_parameter_count == arity;
   3591 
   3592   LPointerMap* pointers = instr->pointer_map();
   3593 
   3594   if (can_invoke_directly) {
   3595     if (r1_state == R1_UNINITIALIZED) {
   3596       __ Move(r1, function);
   3597     }
   3598 
   3599     // Change context.
   3600     __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
   3601 
   3602     // Set r0 to arguments count if adaption is not needed. Assumes that r0
   3603     // is available to write to at this point.
   3604     if (dont_adapt_arguments) {
   3605       __ mov(r0, Operand(arity));
   3606     }
   3607 
   3608     // Invoke function.
   3609     __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   3610     __ Call(ip);
   3611 
   3612     // Set up deoptimization.
   3613     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3614   } else {
   3615     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3616     ParameterCount count(arity);
   3617     ParameterCount expected(formal_parameter_count);
   3618     __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
   3619   }
   3620 }
   3621 
   3622 
   3623 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3624   ASSERT(instr->context() != NULL);
   3625   ASSERT(ToRegister(instr->context()).is(cp));
   3626   Register input = ToRegister(instr->value());
   3627   Register result = ToRegister(instr->result());
   3628   Register scratch = scratch0();
   3629 
   3630   // Deoptimize if not a heap number.
   3631   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3632   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   3633   __ cmp(scratch, Operand(ip));
   3634   DeoptimizeIf(ne, instr->environment());
   3635 
   3636   Label done;
   3637   Register exponent = scratch0();
   3638   scratch = no_reg;
   3639   __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3640   // Check the sign of the argument. If the argument is positive, just
   3641   // return it.
   3642   __ tst(exponent, Operand(HeapNumber::kSignMask));
   3643   // Move the input to the result if necessary.
   3644   __ Move(result, input);
   3645   __ b(eq, &done);
   3646 
   3647   // Input is negative. Reverse its sign.
   3648   // Preserve the value of all registers.
   3649   {
   3650     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   3651 
   3652     // Registers were saved at the safepoint, so we can use
   3653     // many scratch registers.
   3654     Register tmp1 = input.is(r1) ? r0 : r1;
   3655     Register tmp2 = input.is(r2) ? r0 : r2;
   3656     Register tmp3 = input.is(r3) ? r0 : r3;
   3657     Register tmp4 = input.is(r4) ? r0 : r4;
   3658 
   3659     // exponent: floating point exponent value.
   3660 
   3661     Label allocated, slow;
   3662     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3663     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3664     __ b(&allocated);
   3665 
   3666     // Slow case: Call the runtime system to do the number allocation.
   3667     __ bind(&slow);
   3668 
   3669     CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
   3670                             instr->context());
   3671     // Set the pointer to the new heap number in tmp.
   3672     if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
   3673     // Restore input_reg after call to runtime.
   3674     __ LoadFromSafepointRegisterSlot(input, input);
   3675     __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3676 
   3677     __ bind(&allocated);
   3678     // exponent: floating point exponent value.
   3679     // tmp1: allocated heap number.
   3680     __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
   3681     __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3682     __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3683     __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3684 
   3685     __ StoreToSafepointRegisterSlot(tmp1, result);
   3686   }
   3687 
   3688   __ bind(&done);
   3689 }
   3690 
   3691 
   3692 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3693   Register input = ToRegister(instr->value());
   3694   Register result = ToRegister(instr->result());
   3695   __ cmp(input, Operand::Zero());
   3696   __ Move(result, input, pl);
   3697   // We can make rsb conditional because the previous cmp instruction
   3698   // will clear the V (overflow) flag and rsb won't set this flag
   3699   // if input is positive.
   3700   __ rsb(result, input, Operand::Zero(), SetCC, mi);
   3701   // Deoptimize on overflow.
   3702   DeoptimizeIf(vs, instr->environment());
   3703 }
   3704 
   3705 
   3706 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3707   // Class for deferred case.
   3708   class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
   3709    public:
   3710     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3711         : LDeferredCode(codegen), instr_(instr) { }
   3712     virtual void Generate() V8_OVERRIDE {
   3713       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3714     }
   3715     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   3716    private:
   3717     LMathAbs* instr_;
   3718   };
   3719 
   3720   Representation r = instr->hydrogen()->value()->representation();
   3721   if (r.IsDouble()) {
   3722     DwVfpRegister input = ToDoubleRegister(instr->value());
   3723     DwVfpRegister result = ToDoubleRegister(instr->result());
   3724     __ vabs(result, input);
   3725   } else if (r.IsSmiOrInteger32()) {
   3726     EmitIntegerMathAbs(instr);
   3727   } else {
   3728     // Representation is tagged.
   3729     DeferredMathAbsTaggedHeapNumber* deferred =
   3730         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3731     Register input = ToRegister(instr->value());
   3732     // Smi check.
   3733     __ JumpIfNotSmi(input, deferred->entry());
   3734     // If smi, handle it directly.
   3735     EmitIntegerMathAbs(instr);
   3736     __ bind(deferred->exit());
   3737   }
   3738 }
   3739 
   3740 
   3741 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3742   DwVfpRegister input = ToDoubleRegister(instr->value());
   3743   Register result = ToRegister(instr->result());
   3744   Register input_high = scratch0();
   3745   Label done, exact;
   3746 
   3747   __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
   3748   DeoptimizeIf(al, instr->environment());
   3749 
   3750   __ bind(&exact);
   3751   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3752     // Test for -0.
   3753     __ cmp(result, Operand::Zero());
   3754     __ b(ne, &done);
   3755     __ cmp(input_high, Operand::Zero());
   3756     DeoptimizeIf(mi, instr->environment());
   3757   }
   3758   __ bind(&done);
   3759 }
   3760 
   3761 
   3762 void LCodeGen::DoMathRound(LMathRound* instr) {
   3763   DwVfpRegister input = ToDoubleRegister(instr->value());
   3764   Register result = ToRegister(instr->result());
   3765   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3766   DwVfpRegister input_plus_dot_five = double_scratch1;
   3767   Register input_high = scratch0();
   3768   DwVfpRegister dot_five = double_scratch0();
   3769   Label convert, done;
   3770 
   3771   __ Vmov(dot_five, 0.5, scratch0());
   3772   __ vabs(double_scratch1, input);
   3773   __ VFPCompareAndSetFlags(double_scratch1, dot_five);
   3774   // If input is in [-0.5, -0], the result is -0.
   3775   // If input is in [+0, +0.5[, the result is +0.
   3776   // If the input is +0.5, the result is 1.
   3777   __ b(hi, &convert);  // Out of [-0.5, +0.5].
   3778   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3779     __ VmovHigh(input_high, input);
   3780     __ cmp(input_high, Operand::Zero());
   3781     DeoptimizeIf(mi, instr->environment());  // [-0.5, -0].
   3782   }
   3783   __ VFPCompareAndSetFlags(input, dot_five);
   3784   __ mov(result, Operand(1), LeaveCC, eq);  // +0.5.
   3785   // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
   3786   // flag kBailoutOnMinusZero.
   3787   __ mov(result, Operand::Zero(), LeaveCC, ne);
   3788   __ b(&done);
   3789 
   3790   __ bind(&convert);
   3791   __ vadd(input_plus_dot_five, input, dot_five);
   3792   // Reuse dot_five (double_scratch0) as we no longer need this value.
   3793   __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
   3794                    &done, &done);
   3795   DeoptimizeIf(al, instr->environment());
   3796   __ bind(&done);
   3797 }
   3798 
   3799 
   3800 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3801   DwVfpRegister input = ToDoubleRegister(instr->value());
   3802   DwVfpRegister result = ToDoubleRegister(instr->result());
   3803   __ vsqrt(result, input);
   3804 }
   3805 
   3806 
   3807 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3808   DwVfpRegister input = ToDoubleRegister(instr->value());
   3809   DwVfpRegister result = ToDoubleRegister(instr->result());
   3810   DwVfpRegister temp = double_scratch0();
   3811 
   3812   // Note that according to ECMA-262 15.8.2.13:
   3813   // Math.pow(-Infinity, 0.5) == Infinity
   3814   // Math.sqrt(-Infinity) == NaN
   3815   Label done;
   3816   __ vmov(temp, -V8_INFINITY, scratch0());
   3817   __ VFPCompareAndSetFlags(input, temp);
   3818   __ vneg(result, temp, eq);
   3819   __ b(&done, eq);
   3820 
   3821   // Add +0 to convert -0 to +0.
   3822   __ vadd(result, input, kDoubleRegZero);
   3823   __ vsqrt(result, result);
   3824   __ bind(&done);
   3825 }
   3826 
   3827 
   3828 void LCodeGen::DoPower(LPower* instr) {
   3829   Representation exponent_type = instr->hydrogen()->right()->representation();
   3830   // Having marked this as a call, we can use any registers.
   3831   // Just make sure that the input/output registers are the expected ones.
   3832   ASSERT(!instr->right()->IsDoubleRegister() ||
   3833          ToDoubleRegister(instr->right()).is(d1));
   3834   ASSERT(!instr->right()->IsRegister() ||
   3835          ToRegister(instr->right()).is(r2));
   3836   ASSERT(ToDoubleRegister(instr->left()).is(d0));
   3837   ASSERT(ToDoubleRegister(instr->result()).is(d2));
   3838 
   3839   if (exponent_type.IsSmi()) {
   3840     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3841     __ CallStub(&stub);
   3842   } else if (exponent_type.IsTagged()) {
   3843     Label no_deopt;
   3844     __ JumpIfSmi(r2, &no_deopt);
   3845     __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
   3846     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   3847     __ cmp(r6, Operand(ip));
   3848     DeoptimizeIf(ne, instr->environment());
   3849     __ bind(&no_deopt);
   3850     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3851     __ CallStub(&stub);
   3852   } else if (exponent_type.IsInteger32()) {
   3853     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3854     __ CallStub(&stub);
   3855   } else {
   3856     ASSERT(exponent_type.IsDouble());
   3857     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3858     __ CallStub(&stub);
   3859   }
   3860 }
   3861 
   3862 
   3863 void LCodeGen::DoMathExp(LMathExp* instr) {
   3864   DwVfpRegister input = ToDoubleRegister(instr->value());
   3865   DwVfpRegister result = ToDoubleRegister(instr->result());
   3866   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
   3867   DwVfpRegister double_scratch2 = double_scratch0();
   3868   Register temp1 = ToRegister(instr->temp1());
   3869   Register temp2 = ToRegister(instr->temp2());
   3870 
   3871   MathExpGenerator::EmitMathExp(
   3872       masm(), input, result, double_scratch1, double_scratch2,
   3873       temp1, temp2, scratch0());
   3874 }
   3875 
   3876 
   3877 void LCodeGen::DoMathLog(LMathLog* instr) {
   3878   __ PrepareCallCFunction(0, 1, scratch0());
   3879   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3880   __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
   3881                    0, 1);
   3882   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3883 }
   3884 
   3885 
   3886 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3887   Register input = ToRegister(instr->value());
   3888   Register result = ToRegister(instr->result());
   3889   __ clz(result, input);
   3890 }
   3891 
   3892 
   3893 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3894   ASSERT(ToRegister(instr->context()).is(cp));
   3895   ASSERT(ToRegister(instr->function()).is(r1));
   3896   ASSERT(instr->HasPointerMap());
   3897 
   3898   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   3899   if (known_function.is_null()) {
   3900     LPointerMap* pointers = instr->pointer_map();
   3901     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3902     ParameterCount count(instr->arity());
   3903     __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
   3904   } else {
   3905     CallKnownFunction(known_function,
   3906                       instr->hydrogen()->formal_parameter_count(),
   3907                       instr->arity(),
   3908                       instr,
   3909                       R1_CONTAINS_TARGET);
   3910   }
   3911 }
   3912 
   3913 
   3914 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3915   ASSERT(ToRegister(instr->result()).is(r0));
   3916 
   3917   LPointerMap* pointers = instr->pointer_map();
   3918   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3919 
   3920   if (instr->target()->IsConstantOperand()) {
   3921     LConstantOperand* target = LConstantOperand::cast(instr->target());
   3922     Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3923     generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   3924     PlatformCallInterfaceDescriptor* call_descriptor =
   3925         instr->descriptor()->platform_specific_descriptor();
   3926     __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
   3927             call_descriptor->storage_mode());
   3928   } else {
   3929     ASSERT(instr->target()->IsRegister());
   3930     Register target = ToRegister(instr->target());
   3931     generator.BeforeCall(__ CallSize(target));
   3932     __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3933     __ Call(target);
   3934   }
   3935   generator.AfterCall();
   3936 }
   3937 
   3938 
   3939 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
   3940   ASSERT(ToRegister(instr->function()).is(r1));
   3941   ASSERT(ToRegister(instr->result()).is(r0));
   3942 
   3943   if (instr->hydrogen()->pass_argument_count()) {
   3944     __ mov(r0, Operand(instr->arity()));
   3945   }
   3946 
   3947   // Change context.
   3948   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
   3949 
   3950   // Load the code entry address
   3951   __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   3952   __ Call(ip);
   3953 
   3954   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3955 }
   3956 
   3957 
   3958 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   3959   ASSERT(ToRegister(instr->context()).is(cp));
   3960   ASSERT(ToRegister(instr->function()).is(r1));
   3961   ASSERT(ToRegister(instr->result()).is(r0));
   3962 
   3963   int arity = instr->arity();
   3964   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
   3965   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3966 }
   3967 
   3968 
   3969 void LCodeGen::DoCallNew(LCallNew* instr) {
   3970   ASSERT(ToRegister(instr->context()).is(cp));
   3971   ASSERT(ToRegister(instr->constructor()).is(r1));
   3972   ASSERT(ToRegister(instr->result()).is(r0));
   3973 
   3974   __ mov(r0, Operand(instr->arity()));
   3975   // No cell in r2 for construct type feedback in optimized code
   3976   __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
   3977   CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
   3978   CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   3979 }
   3980 
   3981 
   3982 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3983   ASSERT(ToRegister(instr->context()).is(cp));
   3984   ASSERT(ToRegister(instr->constructor()).is(r1));
   3985   ASSERT(ToRegister(instr->result()).is(r0));
   3986 
   3987   __ mov(r0, Operand(instr->arity()));
   3988   __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
   3989   ElementsKind kind = instr->hydrogen()->elements_kind();
   3990   AllocationSiteOverrideMode override_mode =
   3991       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3992           ? DISABLE_ALLOCATION_SITES
   3993           : DONT_OVERRIDE;
   3994 
   3995   if (instr->arity() == 0) {
   3996     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3997     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   3998   } else if (instr->arity() == 1) {
   3999     Label done;
   4000     if (IsFastPackedElementsKind(kind)) {
   4001       Label packed_case;
   4002       // We might need a change here
   4003       // look at the first argument
   4004       __ ldr(r5, MemOperand(sp, 0));
   4005       __ cmp(r5, Operand::Zero());
   4006       __ b(eq, &packed_case);
   4007 
   4008       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   4009       ArraySingleArgumentConstructorStub stub(isolate(),
   4010                                               holey_kind,
   4011                                               override_mode);
   4012       CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4013       __ jmp(&done);
   4014       __ bind(&packed_case);
   4015     }
   4016 
   4017     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   4018     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4019     __ bind(&done);
   4020   } else {
   4021     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
   4022     CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   4023   }
   4024 }
   4025 
   4026 
   4027 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   4028   CallRuntime(instr->function(), instr->arity(), instr);
   4029 }
   4030 
   4031 
   4032 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   4033   Register function = ToRegister(instr->function());
   4034   Register code_object = ToRegister(instr->code_object());
   4035   __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
   4036   __ str(code_object,
   4037          FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   4038 }
   4039 
   4040 
   4041 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   4042   Register result = ToRegister(instr->result());
   4043   Register base = ToRegister(instr->base_object());
   4044   if (instr->offset()->IsConstantOperand()) {
   4045     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   4046     __ add(result, base, Operand(ToInteger32(offset)));
   4047   } else {
   4048     Register offset = ToRegister(instr->offset());
   4049     __ add(result, base, offset);
   4050   }
   4051 }
   4052 
   4053 
   4054 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   4055   Representation representation = instr->representation();
   4056 
   4057   Register object = ToRegister(instr->object());
   4058   Register scratch = scratch0();
   4059   HObjectAccess access = instr->hydrogen()->access();
   4060   int offset = access.offset();
   4061 
   4062   if (access.IsExternalMemory()) {
   4063     Register value = ToRegister(instr->value());
   4064     MemOperand operand = MemOperand(object, offset);
   4065     __ Store(value, operand, representation);
   4066     return;
   4067   }
   4068 
   4069   __ AssertNotSmi(object);
   4070 
   4071   ASSERT(!representation.IsSmi() ||
   4072          !instr->value()->IsConstantOperand() ||
   4073          IsSmi(LConstantOperand::cast(instr->value())));
   4074   if (representation.IsDouble()) {
   4075     ASSERT(access.IsInobject());
   4076     ASSERT(!instr->hydrogen()->has_transition());
   4077     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4078     DwVfpRegister value = ToDoubleRegister(instr->value());
   4079     __ vstr(value, FieldMemOperand(object, offset));
   4080     return;
   4081   }
   4082 
   4083   if (instr->hydrogen()->has_transition()) {
   4084     Handle<Map> transition = instr->hydrogen()->transition_map();
   4085     AddDeprecationDependency(transition);
   4086     __ mov(scratch, Operand(transition));
   4087     __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   4088     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   4089       Register temp = ToRegister(instr->temp());
   4090       // Update the write barrier for the map field.
   4091       __ RecordWriteForMap(object,
   4092                            scratch,
   4093                            temp,
   4094                            GetLinkRegisterState(),
   4095                            kSaveFPRegs);
   4096     }
   4097   }
   4098 
   4099   // Do the store.
   4100   Register value = ToRegister(instr->value());
   4101   if (access.IsInobject()) {
   4102     MemOperand operand = FieldMemOperand(object, offset);
   4103     __ Store(value, operand, representation);
   4104     if (instr->hydrogen()->NeedsWriteBarrier()) {
   4105       // Update the write barrier for the object for in-object properties.
   4106       __ RecordWriteField(object,
   4107                           offset,
   4108                           value,
   4109                           scratch,
   4110                           GetLinkRegisterState(),
   4111                           kSaveFPRegs,
   4112                           EMIT_REMEMBERED_SET,
   4113                           instr->hydrogen()->SmiCheckForWriteBarrier(),
   4114                           instr->hydrogen()->PointersToHereCheckForValue());
   4115     }
   4116   } else {
   4117     __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
   4118     MemOperand operand = FieldMemOperand(scratch, offset);
   4119     __ Store(value, operand, representation);
   4120     if (instr->hydrogen()->NeedsWriteBarrier()) {
   4121       // Update the write barrier for the properties array.
   4122       // object is used as a scratch register.
   4123       __ RecordWriteField(scratch,
   4124                           offset,
   4125                           value,
   4126                           object,
   4127                           GetLinkRegisterState(),
   4128                           kSaveFPRegs,
   4129                           EMIT_REMEMBERED_SET,
   4130                           instr->hydrogen()->SmiCheckForWriteBarrier(),
   4131                           instr->hydrogen()->PointersToHereCheckForValue());
   4132     }
   4133   }
   4134 }
   4135 
   4136 
   4137 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   4138   ASSERT(ToRegister(instr->context()).is(cp));
   4139   ASSERT(ToRegister(instr->object()).is(r1));
   4140   ASSERT(ToRegister(instr->value()).is(r0));
   4141 
   4142   // Name is always in r2.
   4143   __ mov(r2, Operand(instr->name()));
   4144   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   4145   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
   4146 }
   4147 
   4148 
   4149 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   4150   Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
   4151   if (instr->index()->IsConstantOperand()) {
   4152     Operand index = ToOperand(instr->index());
   4153     Register length = ToRegister(instr->length());
   4154     __ cmp(length, index);
   4155     cc = CommuteCondition(cc);
   4156   } else {
   4157     Register index = ToRegister(instr->index());
   4158     Operand length = ToOperand(instr->length());
   4159     __ cmp(index, length);
   4160   }
   4161   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   4162     Label done;
   4163     __ b(NegateCondition(cc), &done);
   4164     __ stop("eliminated bounds check failed");
   4165     __ bind(&done);
   4166   } else {
   4167     DeoptimizeIf(cc, instr->environment());
   4168   }
   4169 }
   4170 
   4171 
   4172 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   4173   Register external_pointer = ToRegister(instr->elements());
   4174   Register key = no_reg;
   4175   ElementsKind elements_kind = instr->elements_kind();
   4176   bool key_is_constant = instr->key()->IsConstantOperand();
   4177   int constant_key = 0;
   4178   if (key_is_constant) {
   4179     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4180     if (constant_key & 0xF0000000) {
   4181       Abort(kArrayIndexConstantValueTooBig);
   4182     }
   4183   } else {
   4184     key = ToRegister(instr->key());
   4185   }
   4186   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   4187   int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4188       ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4189   int base_offset = instr->base_offset();
   4190 
   4191   if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   4192       elements_kind == FLOAT32_ELEMENTS ||
   4193       elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
   4194       elements_kind == FLOAT64_ELEMENTS) {
   4195     Register address = scratch0();
   4196     DwVfpRegister value(ToDoubleRegister(instr->value()));
   4197     if (key_is_constant) {
   4198       if (constant_key != 0) {
   4199         __ add(address, external_pointer,
   4200                Operand(constant_key << element_size_shift));
   4201       } else {
   4202         address = external_pointer;
   4203       }
   4204     } else {
   4205       __ add(address, external_pointer, Operand(key, LSL, shift_size));
   4206     }
   4207     if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
   4208         elements_kind == FLOAT32_ELEMENTS) {
   4209       __ vcvt_f32_f64(double_scratch0().low(), value);
   4210       __ vstr(double_scratch0().low(), address, base_offset);
   4211     } else {  // Storing doubles, not floats.
   4212       __ vstr(value, address, base_offset);
   4213     }
   4214   } else {
   4215     Register value(ToRegister(instr->value()));
   4216     MemOperand mem_operand = PrepareKeyedOperand(
   4217         key, external_pointer, key_is_constant, constant_key,
   4218         element_size_shift, shift_size,
   4219         base_offset);
   4220     switch (elements_kind) {
   4221       case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
   4222       case EXTERNAL_INT8_ELEMENTS:
   4223       case EXTERNAL_UINT8_ELEMENTS:
   4224       case UINT8_ELEMENTS:
   4225       case UINT8_CLAMPED_ELEMENTS:
   4226       case INT8_ELEMENTS:
   4227         __ strb(value, mem_operand);
   4228         break;
   4229       case EXTERNAL_INT16_ELEMENTS:
   4230       case EXTERNAL_UINT16_ELEMENTS:
   4231       case INT16_ELEMENTS:
   4232       case UINT16_ELEMENTS:
   4233         __ strh(value, mem_operand);
   4234         break;
   4235       case EXTERNAL_INT32_ELEMENTS:
   4236       case EXTERNAL_UINT32_ELEMENTS:
   4237       case INT32_ELEMENTS:
   4238       case UINT32_ELEMENTS:
   4239         __ str(value, mem_operand);
   4240         break;
   4241       case FLOAT32_ELEMENTS:
   4242       case FLOAT64_ELEMENTS:
   4243       case EXTERNAL_FLOAT32_ELEMENTS:
   4244       case EXTERNAL_FLOAT64_ELEMENTS:
   4245       case FAST_DOUBLE_ELEMENTS:
   4246       case FAST_ELEMENTS:
   4247       case FAST_SMI_ELEMENTS:
   4248       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4249       case FAST_HOLEY_ELEMENTS:
   4250       case FAST_HOLEY_SMI_ELEMENTS:
   4251       case DICTIONARY_ELEMENTS:
   4252       case SLOPPY_ARGUMENTS_ELEMENTS:
   4253         UNREACHABLE();
   4254         break;
   4255     }
   4256   }
   4257 }
   4258 
   4259 
   4260 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4261   DwVfpRegister value = ToDoubleRegister(instr->value());
   4262   Register elements = ToRegister(instr->elements());
   4263   Register scratch = scratch0();
   4264   DwVfpRegister double_scratch = double_scratch0();
   4265   bool key_is_constant = instr->key()->IsConstantOperand();
   4266   int base_offset = instr->base_offset();
   4267 
   4268   // Calculate the effective address of the slot in the array to store the
   4269   // double value.
   4270   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4271   if (key_is_constant) {
   4272     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4273     if (constant_key & 0xF0000000) {
   4274       Abort(kArrayIndexConstantValueTooBig);
   4275     }
   4276     __ add(scratch, elements,
   4277            Operand((constant_key << element_size_shift) + base_offset));
   4278   } else {
   4279     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
   4280         ? (element_size_shift - kSmiTagSize) : element_size_shift;
   4281     __ add(scratch, elements, Operand(base_offset));
   4282     __ add(scratch, scratch,
   4283            Operand(ToRegister(instr->key()), LSL, shift_size));
   4284   }
   4285 
   4286   if (instr->NeedsCanonicalization()) {
   4287     // Force a canonical NaN.
   4288     if (masm()->emit_debug_code()) {
   4289       __ vmrs(ip);
   4290       __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
   4291       __ Assert(ne, kDefaultNaNModeNotSet);
   4292     }
   4293     __ VFPCanonicalizeNaN(double_scratch, value);
   4294     __ vstr(double_scratch, scratch, 0);
   4295   } else {
   4296     __ vstr(value, scratch, 0);
   4297   }
   4298 }
   4299 
   4300 
   4301 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4302   Register value = ToRegister(instr->value());
   4303   Register elements = ToRegister(instr->elements());
   4304   Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
   4305       : no_reg;
   4306   Register scratch = scratch0();
   4307   Register store_base = scratch;
   4308   int offset = instr->base_offset();
   4309 
   4310   // Do the store.
   4311   if (instr->key()->IsConstantOperand()) {
   4312     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4313     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4314     offset += ToInteger32(const_operand) * kPointerSize;
   4315     store_base = elements;
   4316   } else {
   4317     // Even though the HLoadKeyed instruction forces the input
   4318     // representation for the key to be an integer, the input gets replaced
   4319     // during bound check elimination with the index argument to the bounds
   4320     // check, which can be tagged, so that case must be handled here, too.
   4321     if (instr->hydrogen()->key()->representation().IsSmi()) {
   4322       __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
   4323     } else {
   4324       __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
   4325     }
   4326   }
   4327   __ str(value, MemOperand(store_base, offset));
   4328 
   4329   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4330     SmiCheck check_needed =
   4331         instr->hydrogen()->value()->type().IsHeapObject()
   4332             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4333     // Compute address of modified element and store it into key register.
   4334     __ add(key, store_base, Operand(offset));
   4335     __ RecordWrite(elements,
   4336                    key,
   4337                    value,
   4338                    GetLinkRegisterState(),
   4339                    kSaveFPRegs,
   4340                    EMIT_REMEMBERED_SET,
   4341                    check_needed,
   4342                    instr->hydrogen()->PointersToHereCheckForValue());
   4343   }
   4344 }
   4345 
   4346 
   4347 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4348   // By cases: external, fast double
   4349   if (instr->is_typed_elements()) {
   4350     DoStoreKeyedExternalArray(instr);
   4351   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4352     DoStoreKeyedFixedDoubleArray(instr);
   4353   } else {
   4354     DoStoreKeyedFixedArray(instr);
   4355   }
   4356 }
   4357 
   4358 
   4359 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4360   ASSERT(ToRegister(instr->context()).is(cp));
   4361   ASSERT(ToRegister(instr->object()).is(r2));
   4362   ASSERT(ToRegister(instr->key()).is(r1));
   4363   ASSERT(ToRegister(instr->value()).is(r0));
   4364 
   4365   Handle<Code> ic = instr->strict_mode() == STRICT
   4366       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
   4367       : isolate()->builtins()->KeyedStoreIC_Initialize();
   4368   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
   4369 }
   4370 
   4371 
   4372 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4373   Register object_reg = ToRegister(instr->object());
   4374   Register scratch = scratch0();
   4375 
   4376   Handle<Map> from_map = instr->original_map();
   4377   Handle<Map> to_map = instr->transitioned_map();
   4378   ElementsKind from_kind = instr->from_kind();
   4379   ElementsKind to_kind = instr->to_kind();
   4380 
   4381   Label not_applicable;
   4382   __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4383   __ cmp(scratch, Operand(from_map));
   4384   __ b(ne, &not_applicable);
   4385 
   4386   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4387     Register new_map_reg = ToRegister(instr->new_map_temp());
   4388     __ mov(new_map_reg, Operand(to_map));
   4389     __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4390     // Write barrier.
   4391     __ RecordWriteForMap(object_reg,
   4392                          new_map_reg,
   4393                          scratch,
   4394                          GetLinkRegisterState(),
   4395                          kDontSaveFPRegs);
   4396   } else {
   4397     ASSERT(ToRegister(instr->context()).is(cp));
   4398     ASSERT(object_reg.is(r0));
   4399     PushSafepointRegistersScope scope(
   4400         this, Safepoint::kWithRegistersAndDoubles);
   4401     __ Move(r1, to_map);
   4402     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
   4403     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
   4404     __ CallStub(&stub);
   4405     RecordSafepointWithRegistersAndDoubles(
   4406         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   4407   }
   4408   __ bind(&not_applicable);
   4409 }
   4410 
   4411 
   4412 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4413   Register object = ToRegister(instr->object());
   4414   Register temp = ToRegister(instr->temp());
   4415   Label no_memento_found;
   4416   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
   4417   DeoptimizeIf(eq, instr->environment());
   4418   __ bind(&no_memento_found);
   4419 }
   4420 
   4421 
   4422 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4423   ASSERT(ToRegister(instr->context()).is(cp));
   4424   ASSERT(ToRegister(instr->left()).is(r1));
   4425   ASSERT(ToRegister(instr->right()).is(r0));
   4426   StringAddStub stub(isolate(),
   4427                      instr->hydrogen()->flags(),
   4428                      instr->hydrogen()->pretenure_flag());
   4429   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4430 }
   4431 
   4432 
   4433 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4434   class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
   4435    public:
   4436     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4437         : LDeferredCode(codegen), instr_(instr) { }
   4438     virtual void Generate() V8_OVERRIDE {
   4439       codegen()->DoDeferredStringCharCodeAt(instr_);
   4440     }
   4441     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4442    private:
   4443     LStringCharCodeAt* instr_;
   4444   };
   4445 
   4446   DeferredStringCharCodeAt* deferred =
   4447       new(zone()) DeferredStringCharCodeAt(this, instr);
   4448 
   4449   StringCharLoadGenerator::Generate(masm(),
   4450                                     ToRegister(instr->string()),
   4451                                     ToRegister(instr->index()),
   4452                                     ToRegister(instr->result()),
   4453                                     deferred->entry());
   4454   __ bind(deferred->exit());
   4455 }
   4456 
   4457 
   4458 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4459   Register string = ToRegister(instr->string());
   4460   Register result = ToRegister(instr->result());
   4461   Register scratch = scratch0();
   4462 
   4463   // TODO(3095996): Get rid of this. For now, we need to make the
   4464   // result register contain a valid pointer because it is already
   4465   // contained in the register pointer map.
   4466   __ mov(result, Operand::Zero());
   4467 
   4468   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4469   __ push(string);
   4470   // Push the index as a smi. This is safe because of the checks in
   4471   // DoStringCharCodeAt above.
   4472   if (instr->index()->IsConstantOperand()) {
   4473     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4474     __ mov(scratch, Operand(Smi::FromInt(const_index)));
   4475     __ push(scratch);
   4476   } else {
   4477     Register index = ToRegister(instr->index());
   4478     __ SmiTag(index);
   4479     __ push(index);
   4480   }
   4481   CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
   4482                           instr->context());
   4483   __ AssertSmi(r0);
   4484   __ SmiUntag(r0);
   4485   __ StoreToSafepointRegisterSlot(r0, result);
   4486 }
   4487 
   4488 
   4489 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4490   class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
   4491    public:
   4492     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4493         : LDeferredCode(codegen), instr_(instr) { }
   4494     virtual void Generate() V8_OVERRIDE {
   4495       codegen()->DoDeferredStringCharFromCode(instr_);
   4496     }
   4497     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4498    private:
   4499     LStringCharFromCode* instr_;
   4500   };
   4501 
   4502   DeferredStringCharFromCode* deferred =
   4503       new(zone()) DeferredStringCharFromCode(this, instr);
   4504 
   4505   ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
   4506   Register char_code = ToRegister(instr->char_code());
   4507   Register result = ToRegister(instr->result());
   4508   ASSERT(!char_code.is(result));
   4509 
   4510   __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
   4511   __ b(hi, deferred->entry());
   4512   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4513   __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
   4514   __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4515   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4516   __ cmp(result, ip);
   4517   __ b(eq, deferred->entry());
   4518   __ bind(deferred->exit());
   4519 }
   4520 
   4521 
   4522 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4523   Register char_code = ToRegister(instr->char_code());
   4524   Register result = ToRegister(instr->result());
   4525 
   4526   // TODO(3095996): Get rid of this. For now, we need to make the
   4527   // result register contain a valid pointer because it is already
   4528   // contained in the register pointer map.
   4529   __ mov(result, Operand::Zero());
   4530 
   4531   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4532   __ SmiTag(char_code);
   4533   __ push(char_code);
   4534   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
   4535   __ StoreToSafepointRegisterSlot(r0, result);
   4536 }
   4537 
   4538 
   4539 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4540   LOperand* input = instr->value();
   4541   ASSERT(input->IsRegister() || input->IsStackSlot());
   4542   LOperand* output = instr->result();
   4543   ASSERT(output->IsDoubleRegister());
   4544   SwVfpRegister single_scratch = double_scratch0().low();
   4545   if (input->IsStackSlot()) {
   4546     Register scratch = scratch0();
   4547     __ ldr(scratch, ToMemOperand(input));
   4548     __ vmov(single_scratch, scratch);
   4549   } else {
   4550     __ vmov(single_scratch, ToRegister(input));
   4551   }
   4552   __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
   4553 }
   4554 
   4555 
   4556 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4557   LOperand* input = instr->value();
   4558   LOperand* output = instr->result();
   4559 
   4560   SwVfpRegister flt_scratch = double_scratch0().low();
   4561   __ vmov(flt_scratch, ToRegister(input));
   4562   __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
   4563 }
   4564 
   4565 
   4566 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4567   class DeferredNumberTagI V8_FINAL : public LDeferredCode {
   4568    public:
   4569     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4570         : LDeferredCode(codegen), instr_(instr) { }
   4571     virtual void Generate() V8_OVERRIDE {
   4572       codegen()->DoDeferredNumberTagIU(instr_,
   4573                                        instr_->value(),
   4574                                        instr_->temp1(),
   4575                                        instr_->temp2(),
   4576                                        SIGNED_INT32);
   4577     }
   4578     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4579    private:
   4580     LNumberTagI* instr_;
   4581   };
   4582 
   4583   Register src = ToRegister(instr->value());
   4584   Register dst = ToRegister(instr->result());
   4585 
   4586   DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
   4587   __ SmiTag(dst, src, SetCC);
   4588   __ b(vs, deferred->entry());
   4589   __ bind(deferred->exit());
   4590 }
   4591 
   4592 
   4593 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4594   class DeferredNumberTagU V8_FINAL : public LDeferredCode {
   4595    public:
   4596     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4597         : LDeferredCode(codegen), instr_(instr) { }
   4598     virtual void Generate() V8_OVERRIDE {
   4599       codegen()->DoDeferredNumberTagIU(instr_,
   4600                                        instr_->value(),
   4601                                        instr_->temp1(),
   4602                                        instr_->temp2(),
   4603                                        UNSIGNED_INT32);
   4604     }
   4605     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4606    private:
   4607     LNumberTagU* instr_;
   4608   };
   4609 
   4610   Register input = ToRegister(instr->value());
   4611   Register result = ToRegister(instr->result());
   4612 
   4613   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4614   __ cmp(input, Operand(Smi::kMaxValue));
   4615   __ b(hi, deferred->entry());
   4616   __ SmiTag(result, input);
   4617   __ bind(deferred->exit());
   4618 }
   4619 
   4620 
   4621 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
   4622                                      LOperand* value,
   4623                                      LOperand* temp1,
   4624                                      LOperand* temp2,
   4625                                      IntegerSignedness signedness) {
   4626   Label done, slow;
   4627   Register src = ToRegister(value);
   4628   Register dst = ToRegister(instr->result());
   4629   Register tmp1 = scratch0();
   4630   Register tmp2 = ToRegister(temp1);
   4631   Register tmp3 = ToRegister(temp2);
   4632   LowDwVfpRegister dbl_scratch = double_scratch0();
   4633 
   4634   if (signedness == SIGNED_INT32) {
   4635     // There was overflow, so bits 30 and 31 of the original integer
   4636     // disagree. Try to allocate a heap number in new space and store
   4637     // the value in there. If that fails, call the runtime system.
   4638     if (dst.is(src)) {
   4639       __ SmiUntag(src, dst);
   4640       __ eor(src, src, Operand(0x80000000));
   4641     }
   4642     __ vmov(dbl_scratch.low(), src);
   4643     __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
   4644   } else {
   4645     __ vmov(dbl_scratch.low(), src);
   4646     __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
   4647   }
   4648 
   4649   if (FLAG_inline_new) {
   4650     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
   4651     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
   4652     __ b(&done);
   4653   }
   4654 
   4655   // Slow case: Call the runtime system to do the number allocation.
   4656   __ bind(&slow);
   4657   {
   4658     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4659     // result register is stored, as this register is in the pointer map, but
   4660     // contains an integer value.
   4661     __ mov(dst, Operand::Zero());
   4662 
   4663     // Preserve the value of all registers.
   4664     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4665 
   4666     // NumberTagI and NumberTagD use the context from the frame, rather than
   4667     // the environment's HContext or HInlinedContext value.
   4668     // They only call Runtime::kHiddenAllocateHeapNumber.
   4669     // The corresponding HChange instructions are added in a phase that does
   4670     // not have easy access to the local context.
   4671     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4672     __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
   4673     RecordSafepointWithRegisters(
   4674         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4675     __ sub(r0, r0, Operand(kHeapObjectTag));
   4676     __ StoreToSafepointRegisterSlot(r0, dst);
   4677   }
   4678 
   4679   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4680   // number.
   4681   __ bind(&done);
   4682   __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
   4683   __ add(dst, dst, Operand(kHeapObjectTag));
   4684 }
   4685 
   4686 
   4687 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4688   class DeferredNumberTagD V8_FINAL : public LDeferredCode {
   4689    public:
   4690     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4691         : LDeferredCode(codegen), instr_(instr) { }
   4692     virtual void Generate() V8_OVERRIDE {
   4693       codegen()->DoDeferredNumberTagD(instr_);
   4694     }
   4695     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4696    private:
   4697     LNumberTagD* instr_;
   4698   };
   4699 
   4700   DwVfpRegister input_reg = ToDoubleRegister(instr->value());
   4701   Register scratch = scratch0();
   4702   Register reg = ToRegister(instr->result());
   4703   Register temp1 = ToRegister(instr->temp());
   4704   Register temp2 = ToRegister(instr->temp2());
   4705 
   4706   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4707   if (FLAG_inline_new) {
   4708     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4709     // We want the untagged address first for performance
   4710     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
   4711                           DONT_TAG_RESULT);
   4712   } else {
   4713     __ jmp(deferred->entry());
   4714   }
   4715   __ bind(deferred->exit());
   4716   __ vstr(input_reg, reg, HeapNumber::kValueOffset);
   4717   // Now that we have finished with the object's real address tag it
   4718   __ add(reg, reg, Operand(kHeapObjectTag));
   4719 }
   4720 
   4721 
   4722 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4723   // TODO(3095996): Get rid of this. For now, we need to make the
   4724   // result register contain a valid pointer because it is already
   4725   // contained in the register pointer map.
   4726   Register reg = ToRegister(instr->result());
   4727   __ mov(reg, Operand::Zero());
   4728 
   4729   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   4730   // NumberTagI and NumberTagD use the context from the frame, rather than
   4731   // the environment's HContext or HInlinedContext value.
   4732   // They only call Runtime::kHiddenAllocateHeapNumber.
   4733   // The corresponding HChange instructions are added in a phase that does
   4734   // not have easy access to the local context.
   4735   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4736   __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
   4737   RecordSafepointWithRegisters(
   4738       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4739   __ sub(r0, r0, Operand(kHeapObjectTag));
   4740   __ StoreToSafepointRegisterSlot(r0, reg);
   4741 }
   4742 
   4743 
   4744 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4745   HChange* hchange = instr->hydrogen();
   4746   Register input = ToRegister(instr->value());
   4747   Register output = ToRegister(instr->result());
   4748   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4749       hchange->value()->CheckFlag(HValue::kUint32)) {
   4750     __ tst(input, Operand(0xc0000000));
   4751     DeoptimizeIf(ne, instr->environment());
   4752   }
   4753   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4754       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4755     __ SmiTag(output, input, SetCC);
   4756     DeoptimizeIf(vs, instr->environment());
   4757   } else {
   4758     __ SmiTag(output, input);
   4759   }
   4760 }
   4761 
   4762 
   4763 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4764   Register input = ToRegister(instr->value());
   4765   Register result = ToRegister(instr->result());
   4766   if (instr->needs_check()) {
   4767     STATIC_ASSERT(kHeapObjectTag == 1);
   4768     // If the input is a HeapObject, SmiUntag will set the carry flag.
   4769     __ SmiUntag(result, input, SetCC);
   4770     DeoptimizeIf(cs, instr->environment());
   4771   } else {
   4772     __ SmiUntag(result, input);
   4773   }
   4774 }
   4775 
   4776 
   4777 void LCodeGen::EmitNumberUntagD(Register input_reg,
   4778                                 DwVfpRegister result_reg,
   4779                                 bool can_convert_undefined_to_nan,
   4780                                 bool deoptimize_on_minus_zero,
   4781                                 LEnvironment* env,
   4782                                 NumberUntagDMode mode) {
   4783   Register scratch = scratch0();
   4784   SwVfpRegister flt_scratch = double_scratch0().low();
   4785   ASSERT(!result_reg.is(double_scratch0()));
   4786   Label convert, load_smi, done;
   4787   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4788     // Smi check.
   4789     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4790     // Heap number map check.
   4791     __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4792     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   4793     __ cmp(scratch, Operand(ip));
   4794     if (can_convert_undefined_to_nan) {
   4795       __ b(ne, &convert);
   4796     } else {
   4797       DeoptimizeIf(ne, env);
   4798     }
   4799     // load heap number
   4800     __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
   4801     if (deoptimize_on_minus_zero) {
   4802       __ VmovLow(scratch, result_reg);
   4803       __ cmp(scratch, Operand::Zero());
   4804       __ b(ne, &done);
   4805       __ VmovHigh(scratch, result_reg);
   4806       __ cmp(scratch, Operand(HeapNumber::kSignMask));
   4807       DeoptimizeIf(eq, env);
   4808     }
   4809     __ jmp(&done);
   4810     if (can_convert_undefined_to_nan) {
   4811       __ bind(&convert);
   4812       // Convert undefined (and hole) to NaN.
   4813       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4814       __ cmp(input_reg, Operand(ip));
   4815       DeoptimizeIf(ne, env);
   4816       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4817       __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
   4818       __ jmp(&done);
   4819     }
   4820   } else {
   4821     __ SmiUntag(scratch, input_reg);
   4822     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   4823   }
   4824   // Smi to double register conversion
   4825   __ bind(&load_smi);
   4826   // scratch: untagged value of input_reg
   4827   __ vmov(flt_scratch, scratch);
   4828   __ vcvt_f64_s32(result_reg, flt_scratch);
   4829   __ bind(&done);
   4830 }
   4831 
   4832 
   4833 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4834   Register input_reg = ToRegister(instr->value());
   4835   Register scratch1 = scratch0();
   4836   Register scratch2 = ToRegister(instr->temp());
   4837   LowDwVfpRegister double_scratch = double_scratch0();
   4838   DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4839 
   4840   ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4841   ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4842 
   4843   Label done;
   4844 
   4845   // The input was optimistically untagged; revert it.
   4846   // The carry flag is set when we reach this deferred code as we just executed
   4847   // SmiUntag(heap_object, SetCC)
   4848   STATIC_ASSERT(kHeapObjectTag == 1);
   4849   __ adc(scratch2, input_reg, Operand(input_reg));
   4850 
   4851   // Heap number map check.
   4852   __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
   4853   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   4854   __ cmp(scratch1, Operand(ip));
   4855 
   4856   if (instr->truncating()) {
   4857     // Performs a truncating conversion of a floating point number as used by
   4858     // the JS bitwise operations.
   4859     Label no_heap_number, check_bools, check_false;
   4860     __ b(ne, &no_heap_number);
   4861     __ TruncateHeapNumberToI(input_reg, scratch2);
   4862     __ b(&done);
   4863 
   4864     // Check for Oddballs. Undefined/False is converted to zero and True to one
   4865     // for truncating conversions.
   4866     __ bind(&no_heap_number);
   4867     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4868     __ cmp(scratch2, Operand(ip));
   4869     __ b(ne, &check_bools);
   4870     __ mov(input_reg, Operand::Zero());
   4871     __ b(&done);
   4872 
   4873     __ bind(&check_bools);
   4874     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
   4875     __ cmp(scratch2, Operand(ip));
   4876     __ b(ne, &check_false);
   4877     __ mov(input_reg, Operand(1));
   4878     __ b(&done);
   4879 
   4880     __ bind(&check_false);
   4881     __ LoadRoot(ip, Heap::kFalseValueRootIndex);
   4882     __ cmp(scratch2, Operand(ip));
   4883     DeoptimizeIf(ne, instr->environment());
   4884     __ mov(input_reg, Operand::Zero());
   4885     __ b(&done);
   4886   } else {
   4887     // Deoptimize if we don't have a heap number.
   4888     DeoptimizeIf(ne, instr->environment());
   4889 
   4890     __ sub(ip, scratch2, Operand(kHeapObjectTag));
   4891     __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
   4892     __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
   4893     DeoptimizeIf(ne, instr->environment());
   4894 
   4895     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4896       __ cmp(input_reg, Operand::Zero());
   4897       __ b(ne, &done);
   4898       __ VmovHigh(scratch1, double_scratch2);
   4899       __ tst(scratch1, Operand(HeapNumber::kSignMask));
   4900       DeoptimizeIf(ne, instr->environment());
   4901     }
   4902   }
   4903   __ bind(&done);
   4904 }
   4905 
   4906 
   4907 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4908   class DeferredTaggedToI V8_FINAL : public LDeferredCode {
   4909    public:
   4910     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4911         : LDeferredCode(codegen), instr_(instr) { }
   4912     virtual void Generate() V8_OVERRIDE {
   4913       codegen()->DoDeferredTaggedToI(instr_);
   4914     }
   4915     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   4916    private:
   4917     LTaggedToI* instr_;
   4918   };
   4919 
   4920   LOperand* input = instr->value();
   4921   ASSERT(input->IsRegister());
   4922   ASSERT(input->Equals(instr->result()));
   4923 
   4924   Register input_reg = ToRegister(input);
   4925 
   4926   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4927     __ SmiUntag(input_reg);
   4928   } else {
   4929     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   4930 
   4931     // Optimistically untag the input.
   4932     // If the input is a HeapObject, SmiUntag will set the carry flag.
   4933     __ SmiUntag(input_reg, SetCC);
   4934     // Branch to deferred code if the input was tagged.
   4935     // The deferred code will take care of restoring the tag.
   4936     __ b(cs, deferred->entry());
   4937     __ bind(deferred->exit());
   4938   }
   4939 }
   4940 
   4941 
   4942 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4943   LOperand* input = instr->value();
   4944   ASSERT(input->IsRegister());
   4945   LOperand* result = instr->result();
   4946   ASSERT(result->IsDoubleRegister());
   4947 
   4948   Register input_reg = ToRegister(input);
   4949   DwVfpRegister result_reg = ToDoubleRegister(result);
   4950 
   4951   HValue* value = instr->hydrogen()->value();
   4952   NumberUntagDMode mode = value->representation().IsSmi()
   4953       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4954 
   4955   EmitNumberUntagD(input_reg, result_reg,
   4956                    instr->hydrogen()->can_convert_undefined_to_nan(),
   4957                    instr->hydrogen()->deoptimize_on_minus_zero(),
   4958                    instr->environment(),
   4959                    mode);
   4960 }
   4961 
   4962 
   4963 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4964   Register result_reg = ToRegister(instr->result());
   4965   Register scratch1 = scratch0();
   4966   DwVfpRegister double_input = ToDoubleRegister(instr->value());
   4967   LowDwVfpRegister double_scratch = double_scratch0();
   4968 
   4969   if (instr->truncating()) {
   4970     __ TruncateDoubleToI(result_reg, double_input);
   4971   } else {
   4972     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
   4973     // Deoptimize if the input wasn't a int32 (inside a double).
   4974     DeoptimizeIf(ne, instr->environment());
   4975     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4976       Label done;
   4977       __ cmp(result_reg, Operand::Zero());
   4978       __ b(ne, &done);
   4979       __ VmovHigh(scratch1, double_input);
   4980       __ tst(scratch1, Operand(HeapNumber::kSignMask));
   4981       DeoptimizeIf(ne, instr->environment());
   4982       __ bind(&done);
   4983     }
   4984   }
   4985 }
   4986 
   4987 
   4988 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   4989   Register result_reg = ToRegister(instr->result());
   4990   Register scratch1 = scratch0();
   4991   DwVfpRegister double_input = ToDoubleRegister(instr->value());
   4992   LowDwVfpRegister double_scratch = double_scratch0();
   4993 
   4994   if (instr->truncating()) {
   4995     __ TruncateDoubleToI(result_reg, double_input);
   4996   } else {
   4997     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
   4998     // Deoptimize if the input wasn't a int32 (inside a double).
   4999     DeoptimizeIf(ne, instr->environment());
   5000     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5001       Label done;
   5002       __ cmp(result_reg, Operand::Zero());
   5003       __ b(ne, &done);
   5004       __ VmovHigh(scratch1, double_input);
   5005       __ tst(scratch1, Operand(HeapNumber::kSignMask));
   5006       DeoptimizeIf(ne, instr->environment());
   5007       __ bind(&done);
   5008     }
   5009   }
   5010   __ SmiTag(result_reg, SetCC);
   5011   DeoptimizeIf(vs, instr->environment());
   5012 }
   5013 
   5014 
   5015 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   5016   LOperand* input = instr->value();
   5017   __ SmiTst(ToRegister(input));
   5018   DeoptimizeIf(ne, instr->environment());
   5019 }
   5020 
   5021 
   5022 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   5023   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   5024     LOperand* input = instr->value();
   5025     __ SmiTst(ToRegister(input));
   5026     DeoptimizeIf(eq, instr->environment());
   5027   }
   5028 }
   5029 
   5030 
   5031 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   5032   Register input = ToRegister(instr->value());
   5033   Register scratch = scratch0();
   5034 
   5035   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5036   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   5037 
   5038   if (instr->hydrogen()->is_interval_check()) {
   5039     InstanceType first;
   5040     InstanceType last;
   5041     instr->hydrogen()->GetCheckInterval(&first, &last);
   5042 
   5043     __ cmp(scratch, Operand(first));
   5044 
   5045     // If there is only one type in the interval check for equality.
   5046     if (first == last) {
   5047       DeoptimizeIf(ne, instr->environment());
   5048     } else {
   5049       DeoptimizeIf(lo, instr->environment());
   5050       // Omit check for the last type.
   5051       if (last != LAST_TYPE) {
   5052         __ cmp(scratch, Operand(last));
   5053         DeoptimizeIf(hi, instr->environment());
   5054       }
   5055     }
   5056   } else {
   5057     uint8_t mask;
   5058     uint8_t tag;
   5059     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   5060 
   5061     if (IsPowerOf2(mask)) {
   5062       ASSERT(tag == 0 || IsPowerOf2(tag));
   5063       __ tst(scratch, Operand(mask));
   5064       DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
   5065     } else {
   5066       __ and_(scratch, scratch, Operand(mask));
   5067       __ cmp(scratch, Operand(tag));
   5068       DeoptimizeIf(ne, instr->environment());
   5069     }
   5070   }
   5071 }
   5072 
   5073 
   5074 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   5075   Register reg = ToRegister(instr->value());
   5076   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   5077   AllowDeferredHandleDereference smi_check;
   5078   if (isolate()->heap()->InNewSpace(*object)) {
   5079     Register reg = ToRegister(instr->value());
   5080     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   5081     __ mov(ip, Operand(Handle<Object>(cell)));
   5082     __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
   5083     __ cmp(reg, ip);
   5084   } else {
   5085     __ cmp(reg, Operand(object));
   5086   }
   5087   DeoptimizeIf(ne, instr->environment());
   5088 }
   5089 
   5090 
   5091 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   5092   {
   5093     PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5094     __ push(object);
   5095     __ mov(cp, Operand::Zero());
   5096     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   5097     RecordSafepointWithRegisters(
   5098         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   5099     __ StoreToSafepointRegisterSlot(r0, scratch0());
   5100   }
   5101   __ tst(scratch0(), Operand(kSmiTagMask));
   5102   DeoptimizeIf(eq, instr->environment());
   5103 }
   5104 
   5105 
   5106 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   5107   class DeferredCheckMaps V8_FINAL : public LDeferredCode {
   5108    public:
   5109     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   5110         : LDeferredCode(codegen), instr_(instr), object_(object) {
   5111       SetExit(check_maps());
   5112     }
   5113     virtual void Generate() V8_OVERRIDE {
   5114       codegen()->DoDeferredInstanceMigration(instr_, object_);
   5115     }
   5116     Label* check_maps() { return &check_maps_; }
   5117     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5118    private:
   5119     LCheckMaps* instr_;
   5120     Label check_maps_;
   5121     Register object_;
   5122   };
   5123 
   5124   if (instr->hydrogen()->IsStabilityCheck()) {
   5125     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5126     for (int i = 0; i < maps->size(); ++i) {
   5127       AddStabilityDependency(maps->at(i).handle());
   5128     }
   5129     return;
   5130   }
   5131 
   5132   Register map_reg = scratch0();
   5133 
   5134   LOperand* input = instr->value();
   5135   ASSERT(input->IsRegister());
   5136   Register reg = ToRegister(input);
   5137 
   5138   __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
   5139 
   5140   DeferredCheckMaps* deferred = NULL;
   5141   if (instr->hydrogen()->HasMigrationTarget()) {
   5142     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   5143     __ bind(deferred->check_maps());
   5144   }
   5145 
   5146   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5147   Label success;
   5148   for (int i = 0; i < maps->size() - 1; i++) {
   5149     Handle<Map> map = maps->at(i).handle();
   5150     __ CompareMap(map_reg, map, &success);
   5151     __ b(eq, &success);
   5152   }
   5153 
   5154   Handle<Map> map = maps->at(maps->size() - 1).handle();
   5155   __ CompareMap(map_reg, map, &success);
   5156   if (instr->hydrogen()->HasMigrationTarget()) {
   5157     __ b(ne, deferred->entry());
   5158   } else {
   5159     DeoptimizeIf(ne, instr->environment());
   5160   }
   5161 
   5162   __ bind(&success);
   5163 }
   5164 
   5165 
   5166 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5167   DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
   5168   Register result_reg = ToRegister(instr->result());
   5169   __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
   5170 }
   5171 
   5172 
   5173 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5174   Register unclamped_reg = ToRegister(instr->unclamped());
   5175   Register result_reg = ToRegister(instr->result());
   5176   __ ClampUint8(result_reg, unclamped_reg);
   5177 }
   5178 
   5179 
   5180 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5181   Register scratch = scratch0();
   5182   Register input_reg = ToRegister(instr->unclamped());
   5183   Register result_reg = ToRegister(instr->result());
   5184   DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
   5185   Label is_smi, done, heap_number;
   5186 
   5187   // Both smi and heap number cases are handled.
   5188   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
   5189 
   5190   // Check for heap number
   5191   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5192   __ cmp(scratch, Operand(factory()->heap_number_map()));
   5193   __ b(eq, &heap_number);
   5194 
   5195   // Check for undefined. Undefined is converted to zero for clamping
   5196   // conversions.
   5197   __ cmp(input_reg, Operand(factory()->undefined_value()));
   5198   DeoptimizeIf(ne, instr->environment());
   5199   __ mov(result_reg, Operand::Zero());
   5200   __ jmp(&done);
   5201 
   5202   // Heap number
   5203   __ bind(&heap_number);
   5204   __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   5205   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
   5206   __ jmp(&done);
   5207 
   5208   // smi
   5209   __ bind(&is_smi);
   5210   __ ClampUint8(result_reg, result_reg);
   5211 
   5212   __ bind(&done);
   5213 }
   5214 
   5215 
   5216 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
   5217   DwVfpRegister value_reg = ToDoubleRegister(instr->value());
   5218   Register result_reg = ToRegister(instr->result());
   5219   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
   5220     __ VmovHigh(result_reg, value_reg);
   5221   } else {
   5222     __ VmovLow(result_reg, value_reg);
   5223   }
   5224 }
   5225 
   5226 
   5227 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
   5228   Register hi_reg = ToRegister(instr->hi());
   5229   Register lo_reg = ToRegister(instr->lo());
   5230   DwVfpRegister result_reg = ToDoubleRegister(instr->result());
   5231   __ VmovHigh(result_reg, hi_reg);
   5232   __ VmovLow(result_reg, lo_reg);
   5233 }
   5234 
   5235 
   5236 void LCodeGen::DoAllocate(LAllocate* instr) {
   5237   class DeferredAllocate V8_FINAL : public LDeferredCode {
   5238    public:
   5239     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5240         : LDeferredCode(codegen), instr_(instr) { }
   5241     virtual void Generate() V8_OVERRIDE {
   5242       codegen()->DoDeferredAllocate(instr_);
   5243     }
   5244     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5245    private:
   5246     LAllocate* instr_;
   5247   };
   5248 
   5249   DeferredAllocate* deferred =
   5250       new(zone()) DeferredAllocate(this, instr);
   5251 
   5252   Register result = ToRegister(instr->result());
   5253   Register scratch = ToRegister(instr->temp1());
   5254   Register scratch2 = ToRegister(instr->temp2());
   5255 
   5256   // Allocate memory for the object.
   5257   AllocationFlags flags = TAG_OBJECT;
   5258   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5259     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5260   }
   5261   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5262     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5263     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5264     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   5265   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5266     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5267     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   5268   }
   5269 
   5270   if (instr->size()->IsConstantOperand()) {
   5271     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5272     if (size <= Page::kMaxRegularHeapObjectSize) {
   5273       __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5274     } else {
   5275       __ jmp(deferred->entry());
   5276     }
   5277   } else {
   5278     Register size = ToRegister(instr->size());
   5279     __ Allocate(size,
   5280                 result,
   5281                 scratch,
   5282                 scratch2,
   5283                 deferred->entry(),
   5284                 flags);
   5285   }
   5286 
   5287   __ bind(deferred->exit());
   5288 
   5289   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5290     if (instr->size()->IsConstantOperand()) {
   5291       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5292       __ mov(scratch, Operand(size));
   5293     } else {
   5294       scratch = ToRegister(instr->size());
   5295     }
   5296     __ sub(scratch, scratch, Operand(kPointerSize));
   5297     __ sub(result, result, Operand(kHeapObjectTag));
   5298     Label loop;
   5299     __ bind(&loop);
   5300     __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5301     __ str(scratch2, MemOperand(result, scratch));
   5302     __ sub(scratch, scratch, Operand(kPointerSize));
   5303     __ cmp(scratch, Operand(0));
   5304     __ b(ge, &loop);
   5305     __ add(result, result, Operand(kHeapObjectTag));
   5306   }
   5307 }
   5308 
   5309 
   5310 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5311   Register result = ToRegister(instr->result());
   5312 
   5313   // TODO(3095996): Get rid of this. For now, we need to make the
   5314   // result register contain a valid pointer because it is already
   5315   // contained in the register pointer map.
   5316   __ mov(result, Operand(Smi::FromInt(0)));
   5317 
   5318   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5319   if (instr->size()->IsRegister()) {
   5320     Register size = ToRegister(instr->size());
   5321     ASSERT(!size.is(result));
   5322     __ SmiTag(size);
   5323     __ push(size);
   5324   } else {
   5325     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5326     if (size >= 0 && size <= Smi::kMaxValue) {
   5327       __ Push(Smi::FromInt(size));
   5328     } else {
   5329       // We should never get here at runtime => abort
   5330       __ stop("invalid allocation size");
   5331       return;
   5332     }
   5333   }
   5334 
   5335   int flags = AllocateDoubleAlignFlag::encode(
   5336       instr->hydrogen()->MustAllocateDoubleAligned());
   5337   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5338     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5339     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5340     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   5341   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5342     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5343     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   5344   } else {
   5345     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5346   }
   5347   __ Push(Smi::FromInt(flags));
   5348 
   5349   CallRuntimeFromDeferred(
   5350       Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
   5351   __ StoreToSafepointRegisterSlot(r0, result);
   5352 }
   5353 
   5354 
   5355 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5356   ASSERT(ToRegister(instr->value()).is(r0));
   5357   __ push(r0);
   5358   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5359 }
   5360 
   5361 
   5362 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
   5363   ASSERT(ToRegister(instr->context()).is(cp));
   5364   Label materialized;
   5365   // Registers will be used as follows:
   5366   // r6 = literals array.
   5367   // r1 = regexp literal.
   5368   // r0 = regexp literal clone.
   5369   // r2-5 are used as temporaries.
   5370   int literal_offset =
   5371       FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
   5372   __ Move(r6, instr->hydrogen()->literals());
   5373   __ ldr(r1, FieldMemOperand(r6, literal_offset));
   5374   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   5375   __ cmp(r1, ip);
   5376   __ b(ne, &materialized);
   5377 
   5378   // Create regexp literal using runtime function
   5379   // Result will be in r0.
   5380   __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
   5381   __ mov(r4, Operand(instr->hydrogen()->pattern()));
   5382   __ mov(r3, Operand(instr->hydrogen()->flags()));
   5383   __ Push(r6, r5, r4, r3);
   5384   CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
   5385   __ mov(r1, r0);
   5386 
   5387   __ bind(&materialized);
   5388   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
   5389   Label allocated, runtime_allocate;
   5390 
   5391   __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
   5392   __ jmp(&allocated);
   5393 
   5394   __ bind(&runtime_allocate);
   5395   __ mov(r0, Operand(Smi::FromInt(size)));
   5396   __ Push(r1, r0);
   5397   CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
   5398   __ pop(r1);
   5399 
   5400   __ bind(&allocated);
   5401   // Copy the content into the newly allocated memory.
   5402   __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
   5403 }
   5404 
   5405 
   5406 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
   5407   ASSERT(ToRegister(instr->context()).is(cp));
   5408   // Use the fast case closure allocation code that allocates in new
   5409   // space for nested functions that don't need literals cloning.
   5410   bool pretenure = instr->hydrogen()->pretenure();
   5411   if (!pretenure && instr->hydrogen()->has_no_literals()) {
   5412     FastNewClosureStub stub(isolate(),
   5413                             instr->hydrogen()->strict_mode(),
   5414                             instr->hydrogen()->is_generator());
   5415     __ mov(r2, Operand(instr->hydrogen()->shared_info()));
   5416     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5417   } else {
   5418     __ mov(r2, Operand(instr->hydrogen()->shared_info()));
   5419     __ mov(r1, Operand(pretenure ? factory()->true_value()
   5420                                  : factory()->false_value()));
   5421     __ Push(cp, r2, r1);
   5422     CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
   5423   }
   5424 }
   5425 
   5426 
   5427 void LCodeGen::DoTypeof(LTypeof* instr) {
   5428   Register input = ToRegister(instr->value());
   5429   __ push(input);
   5430   CallRuntime(Runtime::kTypeof, 1, instr);
   5431 }
   5432 
   5433 
   5434 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5435   Register input = ToRegister(instr->value());
   5436 
   5437   Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
   5438                                                   instr->FalseLabel(chunk_),
   5439                                                   input,
   5440                                                   instr->type_literal());
   5441   if (final_branch_condition != kNoCondition) {
   5442     EmitBranch(instr, final_branch_condition);
   5443   }
   5444 }
   5445 
   5446 
   5447 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   5448                                  Label* false_label,
   5449                                  Register input,
   5450                                  Handle<String> type_name) {
   5451   Condition final_branch_condition = kNoCondition;
   5452   Register scratch = scratch0();
   5453   Factory* factory = isolate()->factory();
   5454   if (String::Equals(type_name, factory->number_string())) {
   5455     __ JumpIfSmi(input, true_label);
   5456     __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5457     __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   5458     final_branch_condition = eq;
   5459 
   5460   } else if (String::Equals(type_name, factory->string_string())) {
   5461     __ JumpIfSmi(input, false_label);
   5462     __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
   5463     __ b(ge, false_label);
   5464     __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5465     __ tst(scratch, Operand(1 << Map::kIsUndetectable));
   5466     final_branch_condition = eq;
   5467 
   5468   } else if (String::Equals(type_name, factory->symbol_string())) {
   5469     __ JumpIfSmi(input, false_label);
   5470     __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
   5471     final_branch_condition = eq;
   5472 
   5473   } else if (String::Equals(type_name, factory->boolean_string())) {
   5474     __ CompareRoot(input, Heap::kTrueValueRootIndex);
   5475     __ b(eq, true_label);
   5476     __ CompareRoot(input, Heap::kFalseValueRootIndex);
   5477     final_branch_condition = eq;
   5478 
   5479   } else if (FLAG_harmony_typeof &&
   5480              String::Equals(type_name, factory->null_string())) {
   5481     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5482     final_branch_condition = eq;
   5483 
   5484   } else if (String::Equals(type_name, factory->undefined_string())) {
   5485     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
   5486     __ b(eq, true_label);
   5487     __ JumpIfSmi(input, false_label);
   5488     // Check for undetectable objects => true.
   5489     __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5490     __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5491     __ tst(scratch, Operand(1 << Map::kIsUndetectable));
   5492     final_branch_condition = ne;
   5493 
   5494   } else if (String::Equals(type_name, factory->function_string())) {
   5495     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   5496     Register type_reg = scratch;
   5497     __ JumpIfSmi(input, false_label);
   5498     __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
   5499     __ b(eq, true_label);
   5500     __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
   5501     final_branch_condition = eq;
   5502 
   5503   } else if (String::Equals(type_name, factory->object_string())) {
   5504     Register map = scratch;
   5505     __ JumpIfSmi(input, false_label);
   5506     if (!FLAG_harmony_typeof) {
   5507       __ CompareRoot(input, Heap::kNullValueRootIndex);
   5508       __ b(eq, true_label);
   5509     }
   5510     __ CheckObjectTypeRange(input,
   5511                             map,
   5512                             FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
   5513                             LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
   5514                             false_label);
   5515     // Check for undetectable objects => false.
   5516     __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
   5517     __ tst(scratch, Operand(1 << Map::kIsUndetectable));
   5518     final_branch_condition = eq;
   5519 
   5520   } else {
   5521     __ b(false_label);
   5522   }
   5523 
   5524   return final_branch_condition;
   5525 }
   5526 
   5527 
   5528 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   5529   Register temp1 = ToRegister(instr->temp());
   5530 
   5531   EmitIsConstructCall(temp1, scratch0());
   5532   EmitBranch(instr, eq);
   5533 }
   5534 
   5535 
   5536 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
   5537   ASSERT(!temp1.is(temp2));
   5538   // Get the frame pointer for the calling frame.
   5539   __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   5540 
   5541   // Skip the arguments adaptor frame if it exists.
   5542   __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
   5543   __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   5544   __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
   5545 
   5546   // Check the marker in the calling frame.
   5547   __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
   5548   __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
   5549 }
   5550 
   5551 
   5552 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5553   if (!info()->IsStub()) {
   5554     // Ensure that we have enough space after the previous lazy-bailout
   5555     // instruction for patching the code here.
   5556     int current_pc = masm()->pc_offset();
   5557     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5558       // Block literal pool emission for duration of padding.
   5559       Assembler::BlockConstPoolScope block_const_pool(masm());
   5560       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5561       ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
   5562       while (padding_size > 0) {
   5563         __ nop();
   5564         padding_size -= Assembler::kInstrSize;
   5565       }
   5566     }
   5567   }
   5568   last_lazy_deopt_pc_ = masm()->pc_offset();
   5569 }
   5570 
   5571 
   5572 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5573   last_lazy_deopt_pc_ = masm()->pc_offset();
   5574   ASSERT(instr->HasEnvironment());
   5575   LEnvironment* env = instr->environment();
   5576   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5577   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5578 }
   5579 
   5580 
   5581 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5582   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5583   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5584   // needed return address), even though the implementation of LAZY and EAGER is
   5585   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5586   // the special case below.
   5587   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5588     type = Deoptimizer::LAZY;
   5589   }
   5590 
   5591   Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
   5592   DeoptimizeIf(al, instr->environment(), type);
   5593 }
   5594 
   5595 
   5596 void LCodeGen::DoDummy(LDummy* instr) {
   5597   // Nothing to see here, move on!
   5598 }
   5599 
   5600 
   5601 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5602   // Nothing to see here, move on!
   5603 }
   5604 
   5605 
   5606 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5607   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5608   LoadContextFromDeferred(instr->context());
   5609   __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
   5610   RecordSafepointWithLazyDeopt(
   5611       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5612   ASSERT(instr->HasEnvironment());
   5613   LEnvironment* env = instr->environment();
   5614   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5615 }
   5616 
   5617 
   5618 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5619   class DeferredStackCheck V8_FINAL : public LDeferredCode {
   5620    public:
   5621     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5622         : LDeferredCode(codegen), instr_(instr) { }
   5623     virtual void Generate() V8_OVERRIDE {
   5624       codegen()->DoDeferredStackCheck(instr_);
   5625     }
   5626     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5627    private:
   5628     LStackCheck* instr_;
   5629   };
   5630 
   5631   ASSERT(instr->HasEnvironment());
   5632   LEnvironment* env = instr->environment();
   5633   // There is no LLazyBailout instruction for stack-checks. We have to
   5634   // prepare for lazy deoptimization explicitly here.
   5635   if (instr->hydrogen()->is_function_entry()) {
   5636     // Perform stack overflow check.
   5637     Label done;
   5638     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
   5639     __ cmp(sp, Operand(ip));
   5640     __ b(hs, &done);
   5641     Handle<Code> stack_check = isolate()->builtins()->StackCheck();
   5642     PredictableCodeSizeScope predictable(masm(),
   5643         CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
   5644     ASSERT(instr->context()->IsRegister());
   5645     ASSERT(ToRegister(instr->context()).is(cp));
   5646     CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
   5647     __ bind(&done);
   5648   } else {
   5649     ASSERT(instr->hydrogen()->is_backwards_branch());
   5650     // Perform stack overflow check if this goto needs it before jumping.
   5651     DeferredStackCheck* deferred_stack_check =
   5652         new(zone()) DeferredStackCheck(this, instr);
   5653     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
   5654     __ cmp(sp, Operand(ip));
   5655     __ b(lo, deferred_stack_check->entry());
   5656     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5657     __ bind(instr->done_label());
   5658     deferred_stack_check->SetExit(instr->done_label());
   5659     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5660     // Don't record a deoptimization index for the safepoint here.
   5661     // This will be done explicitly when emitting call and the safepoint in
   5662     // the deferred code.
   5663   }
   5664 }
   5665 
   5666 
   5667 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5668   // This is a pseudo-instruction that ensures that the environment here is
   5669   // properly registered for deoptimization and records the assembler's PC
   5670   // offset.
   5671   LEnvironment* environment = instr->environment();
   5672 
   5673   // If the environment were already registered, we would have no way of
   5674   // backpatching it with the spill slot operands.
   5675   ASSERT(!environment->HasBeenRegistered());
   5676   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5677 
   5678   GenerateOsrPrologue();
   5679 }
   5680 
   5681 
   5682 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5683   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   5684   __ cmp(r0, ip);
   5685   DeoptimizeIf(eq, instr->environment());
   5686 
   5687   Register null_value = r5;
   5688   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   5689   __ cmp(r0, null_value);
   5690   DeoptimizeIf(eq, instr->environment());
   5691 
   5692   __ SmiTst(r0);
   5693   DeoptimizeIf(eq, instr->environment());
   5694 
   5695   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   5696   __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
   5697   DeoptimizeIf(le, instr->environment());
   5698 
   5699   Label use_cache, call_runtime;
   5700   __ CheckEnumCache(null_value, &call_runtime);
   5701 
   5702   __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
   5703   __ b(&use_cache);
   5704 
   5705   // Get the set of properties to enumerate.
   5706   __ bind(&call_runtime);
   5707   __ push(r0);
   5708   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
   5709 
   5710   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   5711   __ LoadRoot(ip, Heap::kMetaMapRootIndex);
   5712   __ cmp(r1, ip);
   5713   DeoptimizeIf(ne, instr->environment());
   5714   __ bind(&use_cache);
   5715 }
   5716 
   5717 
   5718 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5719   Register map = ToRegister(instr->map());
   5720   Register result = ToRegister(instr->result());
   5721   Label load_cache, done;
   5722   __ EnumLength(result, map);
   5723   __ cmp(result, Operand(Smi::FromInt(0)));
   5724   __ b(ne, &load_cache);
   5725   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   5726   __ jmp(&done);
   5727 
   5728   __ bind(&load_cache);
   5729   __ LoadInstanceDescriptors(map, result);
   5730   __ ldr(result,
   5731          FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5732   __ ldr(result,
   5733          FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5734   __ cmp(result, Operand::Zero());
   5735   DeoptimizeIf(eq, instr->environment());
   5736 
   5737   __ bind(&done);
   5738 }
   5739 
   5740 
   5741 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5742   Register object = ToRegister(instr->value());
   5743   Register map = ToRegister(instr->map());
   5744   __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5745   __ cmp(map, scratch0());
   5746   DeoptimizeIf(ne, instr->environment());
   5747 }
   5748 
   5749 
   5750 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5751                                            Register result,
   5752                                            Register object,
   5753                                            Register index) {
   5754   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   5755   __ Push(object);
   5756   __ Push(index);
   5757   __ mov(cp, Operand::Zero());
   5758   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5759   RecordSafepointWithRegisters(
   5760       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5761   __ StoreToSafepointRegisterSlot(r0, result);
   5762 }
   5763 
   5764 
   5765 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5766   class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
   5767    public:
   5768     DeferredLoadMutableDouble(LCodeGen* codegen,
   5769                               LLoadFieldByIndex* instr,
   5770                               Register result,
   5771                               Register object,
   5772                               Register index)
   5773         : LDeferredCode(codegen),
   5774           instr_(instr),
   5775           result_(result),
   5776           object_(object),
   5777           index_(index) {
   5778     }
   5779     virtual void Generate() V8_OVERRIDE {
   5780       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5781     }
   5782     virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
   5783    private:
   5784     LLoadFieldByIndex* instr_;
   5785     Register result_;
   5786     Register object_;
   5787     Register index_;
   5788   };
   5789 
   5790   Register object = ToRegister(instr->object());
   5791   Register index = ToRegister(instr->index());
   5792   Register result = ToRegister(instr->result());
   5793   Register scratch = scratch0();
   5794 
   5795   DeferredLoadMutableDouble* deferred;
   5796   deferred = new(zone()) DeferredLoadMutableDouble(
   5797       this, instr, result, object, index);
   5798 
   5799   Label out_of_object, done;
   5800 
   5801   __ tst(index, Operand(Smi::FromInt(1)));
   5802   __ b(ne, deferred->entry());
   5803   __ mov(index, Operand(index, ASR, 1));
   5804 
   5805   __ cmp(index, Operand::Zero());
   5806   __ b(lt, &out_of_object);
   5807 
   5808   __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
   5809   __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5810 
   5811   __ b(&done);
   5812 
   5813   __ bind(&out_of_object);
   5814   __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5815   // Index is equal to negated out of object property index plus 1.
   5816   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
   5817   __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
   5818   __ ldr(result, FieldMemOperand(scratch,
   5819                                  FixedArray::kHeaderSize - kPointerSize));
   5820   __ bind(deferred->exit());
   5821   __ bind(&done);
   5822 }
   5823 
   5824 
   5825 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
   5826   Register context = ToRegister(instr->context());
   5827   __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
   5828 }
   5829 
   5830 
   5831 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
   5832   Handle<ScopeInfo> scope_info = instr->scope_info();
   5833   __ Push(scope_info);
   5834   __ push(ToRegister(instr->function()));
   5835   CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
   5836   RecordSafepoint(Safepoint::kNoLazyDeopt);
   5837 }
   5838 
   5839 
   5840 #undef __
   5841 
   5842 } }  // namespace v8::internal
   5843