Home | History | Annotate | Download | only in ia32
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if defined(V8_TARGET_ARCH_IA32)
     31 
     32 #include "ia32/lithium-codegen-ia32.h"
     33 #include "code-stubs.h"
     34 #include "deoptimizer.h"
     35 #include "stub-cache.h"
     36 #include "codegen.h"
     37 
     38 namespace v8 {
     39 namespace internal {
     40 
     41 
     42 // When invoking builtins, we need to record the safepoint in the middle of
     43 // the invoke instruction sequence generated by the macro assembler.
     44 class SafepointGenerator : public CallWrapper {
     45  public:
     46   SafepointGenerator(LCodeGen* codegen,
     47                      LPointerMap* pointers,
     48                      Safepoint::DeoptMode mode)
     49       : codegen_(codegen),
     50         pointers_(pointers),
     51         deopt_mode_(mode) {}
     52   virtual ~SafepointGenerator() { }
     53 
     54   virtual void BeforeCall(int call_size) const {}
     55 
     56   virtual void AfterCall() const {
     57     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     58   }
     59 
     60  private:
     61   LCodeGen* codegen_;
     62   LPointerMap* pointers_;
     63   Safepoint::DeoptMode deopt_mode_;
     64 };
     65 
     66 
     67 #define __ masm()->
     68 
     69 bool LCodeGen::GenerateCode() {
     70   HPhase phase("Z_Code generation", chunk());
     71   ASSERT(is_unused());
     72   status_ = GENERATING;
     73   CpuFeatures::Scope scope(SSE2);
     74 
     75   CodeStub::GenerateFPStubs();
     76 
     77   // Open a frame scope to indicate that there is a frame on the stack.  The
     78   // MANUAL indicates that the scope shouldn't actually generate code to set up
     79   // the frame (that is done in GeneratePrologue).
     80   FrameScope frame_scope(masm_, StackFrame::MANUAL);
     81 
     82   return GeneratePrologue() &&
     83       GenerateBody() &&
     84       GenerateDeferredCode() &&
     85       GenerateSafepointTable();
     86 }
     87 
     88 
     89 void LCodeGen::FinishCode(Handle<Code> code) {
     90   ASSERT(is_done());
     91   code->set_stack_slots(GetStackSlotCount());
     92   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     93   PopulateDeoptimizationData(code);
     94   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
     95 }
     96 
     97 
     98 void LCodeGen::Abort(const char* format, ...) {
     99   if (FLAG_trace_bailout) {
    100     SmartArrayPointer<char> name(
    101         info()->shared_info()->DebugName()->ToCString());
    102     PrintF("Aborting LCodeGen in @\"%s\": ", *name);
    103     va_list arguments;
    104     va_start(arguments, format);
    105     OS::VPrint(format, arguments);
    106     va_end(arguments);
    107     PrintF("\n");
    108   }
    109   status_ = ABORTED;
    110 }
    111 
    112 
    113 void LCodeGen::Comment(const char* format, ...) {
    114   if (!FLAG_code_comments) return;
    115   char buffer[4 * KB];
    116   StringBuilder builder(buffer, ARRAY_SIZE(buffer));
    117   va_list arguments;
    118   va_start(arguments, format);
    119   builder.AddFormattedList(format, arguments);
    120   va_end(arguments);
    121 
    122   // Copy the string before recording it in the assembler to avoid
    123   // issues when the stack allocated buffer goes out of scope.
    124   size_t length = builder.position();
    125   Vector<char> copy = Vector<char>::New(length + 1);
    126   memcpy(copy.start(), builder.Finalize(), copy.length());
    127   masm()->RecordComment(copy.start());
    128 }
    129 
    130 
    131 bool LCodeGen::GeneratePrologue() {
    132   ASSERT(is_generating());
    133 
    134 #ifdef DEBUG
    135   if (strlen(FLAG_stop_at) > 0 &&
    136       info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
    137     __ int3();
    138   }
    139 #endif
    140 
    141   // Strict mode functions and builtins need to replace the receiver
    142   // with undefined when called as functions (without an explicit
    143   // receiver object). ecx is zero for method calls and non-zero for
    144   // function calls.
    145   if (!info_->is_classic_mode() || info_->is_native()) {
    146     Label ok;
    147     __ test(ecx, Operand(ecx));
    148     __ j(zero, &ok, Label::kNear);
    149     // +1 for return address.
    150     int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
    151     __ mov(Operand(esp, receiver_offset),
    152            Immediate(isolate()->factory()->undefined_value()));
    153     __ bind(&ok);
    154   }
    155 
    156   __ push(ebp);  // Caller's frame pointer.
    157   __ mov(ebp, esp);
    158   __ push(esi);  // Callee's context.
    159   __ push(edi);  // Callee's JS function.
    160 
    161   // Reserve space for the stack slots needed by the code.
    162   int slots = GetStackSlotCount();
    163   if (slots > 0) {
    164     if (FLAG_debug_code) {
    165       __ mov(Operand(eax), Immediate(slots));
    166       Label loop;
    167       __ bind(&loop);
    168       __ push(Immediate(kSlotsZapValue));
    169       __ dec(eax);
    170       __ j(not_zero, &loop);
    171     } else {
    172       __ sub(Operand(esp), Immediate(slots * kPointerSize));
    173 #ifdef _MSC_VER
    174       // On windows, you may not access the stack more than one page below
    175       // the most recently mapped page. To make the allocated area randomly
    176       // accessible, we write to each page in turn (the value is irrelevant).
    177       const int kPageSize = 4 * KB;
    178       for (int offset = slots * kPointerSize - kPageSize;
    179            offset > 0;
    180            offset -= kPageSize) {
    181         __ mov(Operand(esp, offset), eax);
    182       }
    183 #endif
    184     }
    185   }
    186 
    187   // Possibly allocate a local context.
    188   int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    189   if (heap_slots > 0) {
    190     Comment(";;; Allocate local context");
    191     // Argument to NewContext is the function, which is still in edi.
    192     __ push(edi);
    193     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
    194       FastNewContextStub stub(heap_slots);
    195       __ CallStub(&stub);
    196     } else {
    197       __ CallRuntime(Runtime::kNewFunctionContext, 1);
    198     }
    199     RecordSafepoint(Safepoint::kNoLazyDeopt);
    200     // Context is returned in both eax and esi.  It replaces the context
    201     // passed to us.  It's saved in the stack and kept live in esi.
    202     __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
    203 
    204     // Copy parameters into context if necessary.
    205     int num_parameters = scope()->num_parameters();
    206     for (int i = 0; i < num_parameters; i++) {
    207       Variable* var = scope()->parameter(i);
    208       if (var->IsContextSlot()) {
    209         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    210             (num_parameters - 1 - i) * kPointerSize;
    211         // Load parameter from stack.
    212         __ mov(eax, Operand(ebp, parameter_offset));
    213         // Store it in the context.
    214         int context_offset = Context::SlotOffset(var->index());
    215         __ mov(Operand(esi, context_offset), eax);
    216         // Update the write barrier. This clobbers eax and ebx.
    217         __ RecordWriteContextSlot(esi,
    218                                   context_offset,
    219                                   eax,
    220                                   ebx,
    221                                   kDontSaveFPRegs);
    222       }
    223     }
    224     Comment(";;; End allocate local context");
    225   }
    226 
    227   // Trace the call.
    228   if (FLAG_trace) {
    229     // We have not executed any compiled code yet, so esi still holds the
    230     // incoming context.
    231     __ CallRuntime(Runtime::kTraceEnter, 0);
    232   }
    233   return !is_aborted();
    234 }
    235 
    236 
    237 bool LCodeGen::GenerateBody() {
    238   ASSERT(is_generating());
    239   bool emit_instructions = true;
    240   for (current_instruction_ = 0;
    241        !is_aborted() && current_instruction_ < instructions_->length();
    242        current_instruction_++) {
    243     LInstruction* instr = instructions_->at(current_instruction_);
    244     if (instr->IsLabel()) {
    245       LLabel* label = LLabel::cast(instr);
    246       emit_instructions = !label->HasReplacement();
    247     }
    248 
    249     if (emit_instructions) {
    250       Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
    251       instr->CompileToNative(this);
    252     }
    253   }
    254   EnsureSpaceForLazyDeopt();
    255   return !is_aborted();
    256 }
    257 
    258 
    259 bool LCodeGen::GenerateDeferredCode() {
    260   ASSERT(is_generating());
    261   if (deferred_.length() > 0) {
    262     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    263       LDeferredCode* code = deferred_[i];
    264       __ bind(code->entry());
    265       Comment(";;; Deferred code @%d: %s.",
    266               code->instruction_index(),
    267               code->instr()->Mnemonic());
    268       code->Generate();
    269       __ jmp(code->exit());
    270     }
    271   }
    272 
    273   // Deferred code is the last part of the instruction sequence. Mark
    274   // the generated code as done unless we bailed out.
    275   if (!is_aborted()) status_ = DONE;
    276   return !is_aborted();
    277 }
    278 
    279 
    280 bool LCodeGen::GenerateSafepointTable() {
    281   ASSERT(is_done());
    282   safepoints_.Emit(masm(), GetStackSlotCount());
    283   return !is_aborted();
    284 }
    285 
    286 
    287 Register LCodeGen::ToRegister(int index) const {
    288   return Register::FromAllocationIndex(index);
    289 }
    290 
    291 
    292 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
    293   return XMMRegister::FromAllocationIndex(index);
    294 }
    295 
    296 
    297 Register LCodeGen::ToRegister(LOperand* op) const {
    298   ASSERT(op->IsRegister());
    299   return ToRegister(op->index());
    300 }
    301 
    302 
    303 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    304   ASSERT(op->IsDoubleRegister());
    305   return ToDoubleRegister(op->index());
    306 }
    307 
    308 
    309 int LCodeGen::ToInteger32(LConstantOperand* op) const {
    310   Handle<Object> value = chunk_->LookupLiteral(op);
    311   ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
    312   ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
    313       value->Number());
    314   return static_cast<int32_t>(value->Number());
    315 }
    316 
    317 
    318 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    319   Handle<Object> literal = chunk_->LookupLiteral(op);
    320   ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
    321   return literal;
    322 }
    323 
    324 
    325 double LCodeGen::ToDouble(LConstantOperand* op) const {
    326   Handle<Object> value = chunk_->LookupLiteral(op);
    327   return value->Number();
    328 }
    329 
    330 
    331 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    332   return chunk_->LookupLiteralRepresentation(op).IsInteger32();
    333 }
    334 
    335 
    336 Operand LCodeGen::ToOperand(LOperand* op) const {
    337   if (op->IsRegister()) return Operand(ToRegister(op));
    338   if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
    339   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
    340   int index = op->index();
    341   if (index >= 0) {
    342     // Local or spill slot. Skip the frame pointer, function, and
    343     // context in the fixed part of the frame.
    344     return Operand(ebp, -(index + 3) * kPointerSize);
    345   } else {
    346     // Incoming parameter. Skip the return address.
    347     return Operand(ebp, -(index - 1) * kPointerSize);
    348   }
    349 }
    350 
    351 
    352 Operand LCodeGen::HighOperand(LOperand* op) {
    353   ASSERT(op->IsDoubleStackSlot());
    354   int index = op->index();
    355   int offset = (index >= 0) ? index + 3 : index - 1;
    356   return Operand(ebp, -offset * kPointerSize);
    357 }
    358 
    359 
    360 void LCodeGen::WriteTranslation(LEnvironment* environment,
    361                                 Translation* translation) {
    362   if (environment == NULL) return;
    363 
    364   // The translation includes one command per value in the environment.
    365   int translation_size = environment->values()->length();
    366   // The output frame height does not include the parameters.
    367   int height = translation_size - environment->parameter_count();
    368 
    369   WriteTranslation(environment->outer(), translation);
    370   int closure_id = DefineDeoptimizationLiteral(environment->closure());
    371   switch (environment->frame_type()) {
    372     case JS_FUNCTION:
    373       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
    374       break;
    375     case JS_CONSTRUCT:
    376       translation->BeginConstructStubFrame(closure_id, translation_size);
    377       break;
    378     case ARGUMENTS_ADAPTOR:
    379       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
    380       break;
    381     default:
    382       UNREACHABLE();
    383   }
    384   for (int i = 0; i < translation_size; ++i) {
    385     LOperand* value = environment->values()->at(i);
    386     // spilled_registers_ and spilled_double_registers_ are either
    387     // both NULL or both set.
    388     if (environment->spilled_registers() != NULL && value != NULL) {
    389       if (value->IsRegister() &&
    390           environment->spilled_registers()[value->index()] != NULL) {
    391         translation->MarkDuplicate();
    392         AddToTranslation(translation,
    393                          environment->spilled_registers()[value->index()],
    394                          environment->HasTaggedValueAt(i));
    395       } else if (
    396           value->IsDoubleRegister() &&
    397           environment->spilled_double_registers()[value->index()] != NULL) {
    398         translation->MarkDuplicate();
    399         AddToTranslation(
    400             translation,
    401             environment->spilled_double_registers()[value->index()],
    402             false);
    403       }
    404     }
    405 
    406     AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
    407   }
    408 }
    409 
    410 
    411 void LCodeGen::AddToTranslation(Translation* translation,
    412                                 LOperand* op,
    413                                 bool is_tagged) {
    414   if (op == NULL) {
    415     // TODO(twuerthinger): Introduce marker operands to indicate that this value
    416     // is not present and must be reconstructed from the deoptimizer. Currently
    417     // this is only used for the arguments object.
    418     translation->StoreArgumentsObject();
    419   } else if (op->IsStackSlot()) {
    420     if (is_tagged) {
    421       translation->StoreStackSlot(op->index());
    422     } else {
    423       translation->StoreInt32StackSlot(op->index());
    424     }
    425   } else if (op->IsDoubleStackSlot()) {
    426     translation->StoreDoubleStackSlot(op->index());
    427   } else if (op->IsArgument()) {
    428     ASSERT(is_tagged);
    429     int src_index = GetStackSlotCount() + op->index();
    430     translation->StoreStackSlot(src_index);
    431   } else if (op->IsRegister()) {
    432     Register reg = ToRegister(op);
    433     if (is_tagged) {
    434       translation->StoreRegister(reg);
    435     } else {
    436       translation->StoreInt32Register(reg);
    437     }
    438   } else if (op->IsDoubleRegister()) {
    439     XMMRegister reg = ToDoubleRegister(op);
    440     translation->StoreDoubleRegister(reg);
    441   } else if (op->IsConstantOperand()) {
    442     Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
    443     int src_index = DefineDeoptimizationLiteral(literal);
    444     translation->StoreLiteral(src_index);
    445   } else {
    446     UNREACHABLE();
    447   }
    448 }
    449 
    450 
    451 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    452                                RelocInfo::Mode mode,
    453                                LInstruction* instr,
    454                                SafepointMode safepoint_mode) {
    455   ASSERT(instr != NULL);
    456   LPointerMap* pointers = instr->pointer_map();
    457   RecordPosition(pointers->position());
    458   __ call(code, mode);
    459   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    460 
    461   // Signal that we don't inline smi code before these stubs in the
    462   // optimizing code generator.
    463   if (code->kind() == Code::BINARY_OP_IC ||
    464       code->kind() == Code::COMPARE_IC) {
    465     __ nop();
    466   }
    467 }
    468 
    469 
    470 void LCodeGen::CallCode(Handle<Code> code,
    471                         RelocInfo::Mode mode,
    472                         LInstruction* instr) {
    473   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    474 }
    475 
    476 
    477 void LCodeGen::CallRuntime(const Runtime::Function* fun,
    478                            int argc,
    479                            LInstruction* instr) {
    480   ASSERT(instr != NULL);
    481   ASSERT(instr->HasPointerMap());
    482   LPointerMap* pointers = instr->pointer_map();
    483   RecordPosition(pointers->position());
    484 
    485   __ CallRuntime(fun, argc);
    486 
    487   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    488 }
    489 
    490 
    491 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    492                                        int argc,
    493                                        LInstruction* instr,
    494                                        LOperand* context) {
    495   if (context->IsRegister()) {
    496     if (!ToRegister(context).is(esi)) {
    497       __ mov(esi, ToRegister(context));
    498     }
    499   } else if (context->IsStackSlot()) {
    500     __ mov(esi, ToOperand(context));
    501   } else if (context->IsConstantOperand()) {
    502     Handle<Object> literal =
    503         chunk_->LookupLiteral(LConstantOperand::cast(context));
    504     __ LoadHeapObject(esi, Handle<Context>::cast(literal));
    505   } else {
    506     UNREACHABLE();
    507   }
    508 
    509   __ CallRuntimeSaveDoubles(id);
    510   RecordSafepointWithRegisters(
    511       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    512 }
    513 
    514 
    515 void LCodeGen::RegisterEnvironmentForDeoptimization(
    516     LEnvironment* environment, Safepoint::DeoptMode mode) {
    517   if (!environment->HasBeenRegistered()) {
    518     // Physical stack frame layout:
    519     // -x ............. -4  0 ..................................... y
    520     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    521 
    522     // Layout of the environment:
    523     // 0 ..................................................... size-1
    524     // [parameters] [locals] [expression stack including arguments]
    525 
    526     // Layout of the translation:
    527     // 0 ........................................................ size - 1 + 4
    528     // [expression stack including arguments] [locals] [4 words] [parameters]
    529     // |>------------  translation_size ------------<|
    530 
    531     int frame_count = 0;
    532     int jsframe_count = 0;
    533     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    534       ++frame_count;
    535       if (e->frame_type() == JS_FUNCTION) {
    536         ++jsframe_count;
    537       }
    538     }
    539     Translation translation(&translations_, frame_count, jsframe_count);
    540     WriteTranslation(environment, &translation);
    541     int deoptimization_index = deoptimizations_.length();
    542     int pc_offset = masm()->pc_offset();
    543     environment->Register(deoptimization_index,
    544                           translation.index(),
    545                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    546     deoptimizations_.Add(environment);
    547   }
    548 }
    549 
    550 
    551 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
    552   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    553   ASSERT(environment->HasBeenRegistered());
    554   int id = environment->deoptimization_index();
    555   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
    556   if (entry == NULL) {
    557     Abort("bailout was not prepared");
    558     return;
    559   }
    560 
    561   if (FLAG_deopt_every_n_times != 0) {
    562     Handle<SharedFunctionInfo> shared(info_->shared_info());
    563     Label no_deopt;
    564     __ pushfd();
    565     __ push(eax);
    566     __ push(ebx);
    567     __ mov(ebx, shared);
    568     __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
    569     __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
    570     __ j(not_zero, &no_deopt, Label::kNear);
    571     if (FLAG_trap_on_deopt) __ int3();
    572     __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
    573     __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
    574     __ pop(ebx);
    575     __ pop(eax);
    576     __ popfd();
    577     __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
    578 
    579     __ bind(&no_deopt);
    580     __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
    581     __ pop(ebx);
    582     __ pop(eax);
    583     __ popfd();
    584   }
    585 
    586   if (cc == no_condition) {
    587     if (FLAG_trap_on_deopt) __ int3();
    588     __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
    589   } else {
    590     if (FLAG_trap_on_deopt) {
    591       Label done;
    592       __ j(NegateCondition(cc), &done, Label::kNear);
    593       __ int3();
    594       __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
    595       __ bind(&done);
    596     } else {
    597       __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
    598     }
    599   }
    600 }
    601 
    602 
    603 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
    604   int length = deoptimizations_.length();
    605   if (length == 0) return;
    606   Handle<DeoptimizationInputData> data =
    607       factory()->NewDeoptimizationInputData(length, TENURED);
    608 
    609   Handle<ByteArray> translations = translations_.CreateByteArray();
    610   data->SetTranslationByteArray(*translations);
    611   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
    612 
    613   Handle<FixedArray> literals =
    614       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
    615   for (int i = 0; i < deoptimization_literals_.length(); i++) {
    616     literals->set(i, *deoptimization_literals_[i]);
    617   }
    618   data->SetLiteralArray(*literals);
    619 
    620   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
    621   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
    622 
    623   // Populate the deoptimization entries.
    624   for (int i = 0; i < length; i++) {
    625     LEnvironment* env = deoptimizations_[i];
    626     data->SetAstId(i, Smi::FromInt(env->ast_id()));
    627     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
    628     data->SetArgumentsStackHeight(i,
    629                                   Smi::FromInt(env->arguments_stack_height()));
    630     data->SetPc(i, Smi::FromInt(env->pc_offset()));
    631   }
    632   code->set_deoptimization_data(*data);
    633 }
    634 
    635 
    636 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
    637   int result = deoptimization_literals_.length();
    638   for (int i = 0; i < deoptimization_literals_.length(); ++i) {
    639     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
    640   }
    641   deoptimization_literals_.Add(literal);
    642   return result;
    643 }
    644 
    645 
    646 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
    647   ASSERT(deoptimization_literals_.length() == 0);
    648 
    649   const ZoneList<Handle<JSFunction> >* inlined_closures =
    650       chunk()->inlined_closures();
    651 
    652   for (int i = 0, length = inlined_closures->length();
    653        i < length;
    654        i++) {
    655     DefineDeoptimizationLiteral(inlined_closures->at(i));
    656   }
    657 
    658   inlined_function_count_ = deoptimization_literals_.length();
    659 }
    660 
    661 
    662 void LCodeGen::RecordSafepointWithLazyDeopt(
    663     LInstruction* instr, SafepointMode safepoint_mode) {
    664   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    665     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    666   } else {
    667     ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    668     RecordSafepointWithRegisters(
    669         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    670   }
    671 }
    672 
    673 
    674 void LCodeGen::RecordSafepoint(
    675     LPointerMap* pointers,
    676     Safepoint::Kind kind,
    677     int arguments,
    678     Safepoint::DeoptMode deopt_mode) {
    679   ASSERT(kind == expected_safepoint_kind_);
    680   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    681   Safepoint safepoint =
    682       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
    683   for (int i = 0; i < operands->length(); i++) {
    684     LOperand* pointer = operands->at(i);
    685     if (pointer->IsStackSlot()) {
    686       safepoint.DefinePointerSlot(pointer->index());
    687     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    688       safepoint.DefinePointerRegister(ToRegister(pointer));
    689     }
    690   }
    691 }
    692 
    693 
    694 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    695                                Safepoint::DeoptMode mode) {
    696   RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
    697 }
    698 
    699 
    700 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
    701   LPointerMap empty_pointers(RelocInfo::kNoPosition);
    702   RecordSafepoint(&empty_pointers, mode);
    703 }
    704 
    705 
    706 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    707                                             int arguments,
    708                                             Safepoint::DeoptMode mode) {
    709   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
    710 }
    711 
    712 
    713 void LCodeGen::RecordPosition(int position) {
    714   if (position == RelocInfo::kNoPosition) return;
    715   masm()->positions_recorder()->RecordPosition(position);
    716 }
    717 
    718 
    719 void LCodeGen::DoLabel(LLabel* label) {
    720   if (label->is_loop_header()) {
    721     Comment(";;; B%d - LOOP entry", label->block_id());
    722   } else {
    723     Comment(";;; B%d", label->block_id());
    724   }
    725   __ bind(label->label());
    726   current_block_ = label->block_id();
    727   DoGap(label);
    728 }
    729 
    730 
    731 void LCodeGen::DoParallelMove(LParallelMove* move) {
    732   resolver_.Resolve(move);
    733 }
    734 
    735 
    736 void LCodeGen::DoGap(LGap* gap) {
    737   for (int i = LGap::FIRST_INNER_POSITION;
    738        i <= LGap::LAST_INNER_POSITION;
    739        i++) {
    740     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    741     LParallelMove* move = gap->GetParallelMove(inner_pos);
    742     if (move != NULL) DoParallelMove(move);
    743   }
    744 }
    745 
    746 
    747 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
    748   DoGap(instr);
    749 }
    750 
    751 
    752 void LCodeGen::DoParameter(LParameter* instr) {
    753   // Nothing to do.
    754 }
    755 
    756 
    757 void LCodeGen::DoCallStub(LCallStub* instr) {
    758   ASSERT(ToRegister(instr->context()).is(esi));
    759   ASSERT(ToRegister(instr->result()).is(eax));
    760   switch (instr->hydrogen()->major_key()) {
    761     case CodeStub::RegExpConstructResult: {
    762       RegExpConstructResultStub stub;
    763       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    764       break;
    765     }
    766     case CodeStub::RegExpExec: {
    767       RegExpExecStub stub;
    768       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    769       break;
    770     }
    771     case CodeStub::SubString: {
    772       SubStringStub stub;
    773       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    774       break;
    775     }
    776     case CodeStub::NumberToString: {
    777       NumberToStringStub stub;
    778       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    779       break;
    780     }
    781     case CodeStub::StringAdd: {
    782       StringAddStub stub(NO_STRING_ADD_FLAGS);
    783       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    784       break;
    785     }
    786     case CodeStub::StringCompare: {
    787       StringCompareStub stub;
    788       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    789       break;
    790     }
    791     case CodeStub::TranscendentalCache: {
    792       TranscendentalCacheStub stub(instr->transcendental_type(),
    793                                    TranscendentalCacheStub::TAGGED);
    794       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    795       break;
    796     }
    797     default:
    798       UNREACHABLE();
    799   }
    800 }
    801 
    802 
    803 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
    804   // Nothing to do.
    805 }
    806 
    807 
    808 void LCodeGen::DoModI(LModI* instr) {
    809   if (instr->hydrogen()->HasPowerOf2Divisor()) {
    810     Register dividend = ToRegister(instr->InputAt(0));
    811 
    812     int32_t divisor =
    813         HConstant::cast(instr->hydrogen()->right())->Integer32Value();
    814 
    815     if (divisor < 0) divisor = -divisor;
    816 
    817     Label positive_dividend, done;
    818     __ test(dividend, Operand(dividend));
    819     __ j(not_sign, &positive_dividend, Label::kNear);
    820     __ neg(dividend);
    821     __ and_(dividend, divisor - 1);
    822     __ neg(dividend);
    823     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
    824       __ j(not_zero, &done, Label::kNear);
    825       DeoptimizeIf(no_condition, instr->environment());
    826     } else {
    827       __ jmp(&done, Label::kNear);
    828     }
    829     __ bind(&positive_dividend);
    830     __ and_(dividend, divisor - 1);
    831     __ bind(&done);
    832   } else {
    833     Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
    834     Register left_reg = ToRegister(instr->InputAt(0));
    835     Register right_reg = ToRegister(instr->InputAt(1));
    836     Register result_reg = ToRegister(instr->result());
    837 
    838     ASSERT(left_reg.is(eax));
    839     ASSERT(result_reg.is(edx));
    840     ASSERT(!right_reg.is(eax));
    841     ASSERT(!right_reg.is(edx));
    842 
    843     // Check for x % 0.
    844     if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
    845       __ test(right_reg, Operand(right_reg));
    846       DeoptimizeIf(zero, instr->environment());
    847     }
    848 
    849     __ test(left_reg, Operand(left_reg));
    850     __ j(zero, &remainder_eq_dividend, Label::kNear);
    851     __ j(sign, &slow, Label::kNear);
    852 
    853     __ test(right_reg, Operand(right_reg));
    854     __ j(not_sign, &both_positive, Label::kNear);
    855     // The sign of the divisor doesn't matter.
    856     __ neg(right_reg);
    857 
    858     __ bind(&both_positive);
    859     // If the dividend is smaller than the nonnegative
    860     // divisor, the dividend is the result.
    861     __ cmp(left_reg, Operand(right_reg));
    862     __ j(less, &remainder_eq_dividend, Label::kNear);
    863 
    864     // Check if the divisor is a PowerOfTwo integer.
    865     Register scratch = ToRegister(instr->TempAt(0));
    866     __ mov(scratch, right_reg);
    867     __ sub(Operand(scratch), Immediate(1));
    868     __ test(scratch, Operand(right_reg));
    869     __ j(not_zero, &do_subtraction, Label::kNear);
    870     __ and_(left_reg, Operand(scratch));
    871     __ jmp(&remainder_eq_dividend, Label::kNear);
    872 
    873     __ bind(&do_subtraction);
    874     const int kUnfolds = 3;
    875     // Try a few subtractions of the dividend.
    876     __ mov(scratch, left_reg);
    877     for (int i = 0; i < kUnfolds; i++) {
    878       // Reduce the dividend by the divisor.
    879       __ sub(left_reg, Operand(right_reg));
    880       // Check if the dividend is less than the divisor.
    881       __ cmp(left_reg, Operand(right_reg));
    882       __ j(less, &remainder_eq_dividend, Label::kNear);
    883     }
    884     __ mov(left_reg, scratch);
    885 
    886     // Slow case, using idiv instruction.
    887     __ bind(&slow);
    888     // Sign extend to edx.
    889     __ cdq();
    890 
    891     // Check for (0 % -x) that will produce negative zero.
    892     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
    893       Label positive_left;
    894       Label done;
    895       __ test(left_reg, Operand(left_reg));
    896       __ j(not_sign, &positive_left, Label::kNear);
    897       __ idiv(right_reg);
    898 
    899       // Test the remainder for 0, because then the result would be -0.
    900       __ test(result_reg, Operand(result_reg));
    901       __ j(not_zero, &done, Label::kNear);
    902 
    903       DeoptimizeIf(no_condition, instr->environment());
    904       __ bind(&positive_left);
    905       __ idiv(right_reg);
    906       __ bind(&done);
    907     } else {
    908       __ idiv(right_reg);
    909     }
    910     __ jmp(&done, Label::kNear);
    911 
    912     __ bind(&remainder_eq_dividend);
    913     __ mov(result_reg, left_reg);
    914 
    915     __ bind(&done);
    916   }
    917 }
    918 
    919 
    920 void LCodeGen::DoDivI(LDivI* instr) {
    921   LOperand* right = instr->InputAt(1);
    922   ASSERT(ToRegister(instr->result()).is(eax));
    923   ASSERT(ToRegister(instr->InputAt(0)).is(eax));
    924   ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
    925   ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
    926 
    927   Register left_reg = eax;
    928 
    929   // Check for x / 0.
    930   Register right_reg = ToRegister(right);
    931   if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
    932     __ test(right_reg, ToOperand(right));
    933     DeoptimizeIf(zero, instr->environment());
    934   }
    935 
    936   // Check for (0 / -x) that will produce negative zero.
    937   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
    938     Label left_not_zero;
    939     __ test(left_reg, Operand(left_reg));
    940     __ j(not_zero, &left_not_zero, Label::kNear);
    941     __ test(right_reg, ToOperand(right));
    942     DeoptimizeIf(sign, instr->environment());
    943     __ bind(&left_not_zero);
    944   }
    945 
    946   // Check for (-kMinInt / -1).
    947   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
    948     Label left_not_min_int;
    949     __ cmp(left_reg, kMinInt);
    950     __ j(not_zero, &left_not_min_int, Label::kNear);
    951     __ cmp(right_reg, -1);
    952     DeoptimizeIf(zero, instr->environment());
    953     __ bind(&left_not_min_int);
    954   }
    955 
    956   // Sign extend to edx.
    957   __ cdq();
    958   __ idiv(right_reg);
    959 
    960   // Deoptimize if remainder is not 0.
    961   __ test(edx, Operand(edx));
    962   DeoptimizeIf(not_zero, instr->environment());
    963 }
    964 
    965 
    966 void LCodeGen::DoMulI(LMulI* instr) {
    967   Register left = ToRegister(instr->InputAt(0));
    968   LOperand* right = instr->InputAt(1);
    969 
    970   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
    971     __ mov(ToRegister(instr->TempAt(0)), left);
    972   }
    973 
    974   if (right->IsConstantOperand()) {
    975     // Try strength reductions on the multiplication.
    976     // All replacement instructions are at most as long as the imul
    977     // and have better latency.
    978     int constant = ToInteger32(LConstantOperand::cast(right));
    979     if (constant == -1) {
    980       __ neg(left);
    981     } else if (constant == 0) {
    982       __ xor_(left, Operand(left));
    983     } else if (constant == 2) {
    984       __ add(left, Operand(left));
    985     } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
    986       // If we know that the multiplication can't overflow, it's safe to
    987       // use instructions that don't set the overflow flag for the
    988       // multiplication.
    989       switch (constant) {
    990         case 1:
    991           // Do nothing.
    992           break;
    993         case 3:
    994           __ lea(left, Operand(left, left, times_2, 0));
    995           break;
    996         case 4:
    997           __ shl(left, 2);
    998           break;
    999         case 5:
   1000           __ lea(left, Operand(left, left, times_4, 0));
   1001           break;
   1002         case 8:
   1003           __ shl(left, 3);
   1004           break;
   1005         case 9:
   1006           __ lea(left, Operand(left, left, times_8, 0));
   1007           break;
   1008        case 16:
   1009          __ shl(left, 4);
   1010          break;
   1011         default:
   1012           __ imul(left, left, constant);
   1013           break;
   1014       }
   1015     } else {
   1016       __ imul(left, left, constant);
   1017     }
   1018   } else {
   1019     __ imul(left, ToOperand(right));
   1020   }
   1021 
   1022   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1023     DeoptimizeIf(overflow, instr->environment());
   1024   }
   1025 
   1026   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1027     // Bail out if the result is supposed to be negative zero.
   1028     Label done;
   1029     __ test(left, Operand(left));
   1030     __ j(not_zero, &done, Label::kNear);
   1031     if (right->IsConstantOperand()) {
   1032       if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
   1033         DeoptimizeIf(no_condition, instr->environment());
   1034       }
   1035     } else {
   1036       // Test the non-zero operand for negative sign.
   1037       __ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
   1038       DeoptimizeIf(sign, instr->environment());
   1039     }
   1040     __ bind(&done);
   1041   }
   1042 }
   1043 
   1044 
   1045 void LCodeGen::DoBitI(LBitI* instr) {
   1046   LOperand* left = instr->InputAt(0);
   1047   LOperand* right = instr->InputAt(1);
   1048   ASSERT(left->Equals(instr->result()));
   1049   ASSERT(left->IsRegister());
   1050 
   1051   if (right->IsConstantOperand()) {
   1052     int right_operand = ToInteger32(LConstantOperand::cast(right));
   1053     switch (instr->op()) {
   1054       case Token::BIT_AND:
   1055         __ and_(ToRegister(left), right_operand);
   1056         break;
   1057       case Token::BIT_OR:
   1058         __ or_(ToRegister(left), right_operand);
   1059         break;
   1060       case Token::BIT_XOR:
   1061         __ xor_(ToRegister(left), right_operand);
   1062         break;
   1063       default:
   1064         UNREACHABLE();
   1065         break;
   1066     }
   1067   } else {
   1068     switch (instr->op()) {
   1069       case Token::BIT_AND:
   1070         __ and_(ToRegister(left), ToOperand(right));
   1071         break;
   1072       case Token::BIT_OR:
   1073         __ or_(ToRegister(left), ToOperand(right));
   1074         break;
   1075       case Token::BIT_XOR:
   1076         __ xor_(ToRegister(left), ToOperand(right));
   1077         break;
   1078       default:
   1079         UNREACHABLE();
   1080         break;
   1081     }
   1082   }
   1083 }
   1084 
   1085 
   1086 void LCodeGen::DoShiftI(LShiftI* instr) {
   1087   LOperand* left = instr->InputAt(0);
   1088   LOperand* right = instr->InputAt(1);
   1089   ASSERT(left->Equals(instr->result()));
   1090   ASSERT(left->IsRegister());
   1091   if (right->IsRegister()) {
   1092     ASSERT(ToRegister(right).is(ecx));
   1093 
   1094     switch (instr->op()) {
   1095       case Token::SAR:
   1096         __ sar_cl(ToRegister(left));
   1097         break;
   1098       case Token::SHR:
   1099         __ shr_cl(ToRegister(left));
   1100         if (instr->can_deopt()) {
   1101           __ test(ToRegister(left), Immediate(0x80000000));
   1102           DeoptimizeIf(not_zero, instr->environment());
   1103         }
   1104         break;
   1105       case Token::SHL:
   1106         __ shl_cl(ToRegister(left));
   1107         break;
   1108       default:
   1109         UNREACHABLE();
   1110         break;
   1111     }
   1112   } else {
   1113     int value = ToInteger32(LConstantOperand::cast(right));
   1114     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1115     switch (instr->op()) {
   1116       case Token::SAR:
   1117         if (shift_count != 0) {
   1118           __ sar(ToRegister(left), shift_count);
   1119         }
   1120         break;
   1121       case Token::SHR:
   1122         if (shift_count == 0 && instr->can_deopt()) {
   1123           __ test(ToRegister(left), Immediate(0x80000000));
   1124           DeoptimizeIf(not_zero, instr->environment());
   1125         } else {
   1126           __ shr(ToRegister(left), shift_count);
   1127         }
   1128         break;
   1129       case Token::SHL:
   1130         if (shift_count != 0) {
   1131           __ shl(ToRegister(left), shift_count);
   1132         }
   1133         break;
   1134       default:
   1135         UNREACHABLE();
   1136         break;
   1137     }
   1138   }
   1139 }
   1140 
   1141 
   1142 void LCodeGen::DoSubI(LSubI* instr) {
   1143   LOperand* left = instr->InputAt(0);
   1144   LOperand* right = instr->InputAt(1);
   1145   ASSERT(left->Equals(instr->result()));
   1146 
   1147   if (right->IsConstantOperand()) {
   1148     __ sub(ToOperand(left), ToInteger32Immediate(right));
   1149   } else {
   1150     __ sub(ToRegister(left), ToOperand(right));
   1151   }
   1152   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1153     DeoptimizeIf(overflow, instr->environment());
   1154   }
   1155 }
   1156 
   1157 
   1158 void LCodeGen::DoConstantI(LConstantI* instr) {
   1159   ASSERT(instr->result()->IsRegister());
   1160   __ Set(ToRegister(instr->result()), Immediate(instr->value()));
   1161 }
   1162 
   1163 
   1164 void LCodeGen::DoConstantD(LConstantD* instr) {
   1165   ASSERT(instr->result()->IsDoubleRegister());
   1166   XMMRegister res = ToDoubleRegister(instr->result());
   1167   double v = instr->value();
   1168   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   1169   // do so if the constant is -0.0.
   1170   if (BitCast<uint64_t, double>(v) == 0) {
   1171     __ xorps(res, res);
   1172   } else {
   1173     Register temp = ToRegister(instr->TempAt(0));
   1174     uint64_t int_val = BitCast<uint64_t, double>(v);
   1175     int32_t lower = static_cast<int32_t>(int_val);
   1176     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
   1177     if (CpuFeatures::IsSupported(SSE4_1)) {
   1178       CpuFeatures::Scope scope(SSE4_1);
   1179       if (lower != 0) {
   1180         __ Set(temp, Immediate(lower));
   1181         __ movd(res, Operand(temp));
   1182         __ Set(temp, Immediate(upper));
   1183         __ pinsrd(res, Operand(temp), 1);
   1184       } else {
   1185         __ xorps(res, res);
   1186         __ Set(temp, Immediate(upper));
   1187         __ pinsrd(res, Operand(temp), 1);
   1188       }
   1189     } else {
   1190       __ Set(temp, Immediate(upper));
   1191       __ movd(res, Operand(temp));
   1192       __ psllq(res, 32);
   1193       if (lower != 0) {
   1194         __ Set(temp, Immediate(lower));
   1195         __ movd(xmm0, Operand(temp));
   1196         __ por(res, xmm0);
   1197       }
   1198     }
   1199   }
   1200 }
   1201 
   1202 
   1203 void LCodeGen::DoConstantT(LConstantT* instr) {
   1204   Register reg = ToRegister(instr->result());
   1205   Handle<Object> handle = instr->value();
   1206   if (handle->IsHeapObject()) {
   1207     __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
   1208   } else {
   1209     __ Set(reg, Immediate(handle));
   1210   }
   1211 }
   1212 
   1213 
   1214 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
   1215   Register result = ToRegister(instr->result());
   1216   Register array = ToRegister(instr->InputAt(0));
   1217   __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
   1218 }
   1219 
   1220 
   1221 void LCodeGen::DoFixedArrayBaseLength(
   1222     LFixedArrayBaseLength* instr) {
   1223   Register result = ToRegister(instr->result());
   1224   Register array = ToRegister(instr->InputAt(0));
   1225   __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
   1226 }
   1227 
   1228 
   1229 void LCodeGen::DoElementsKind(LElementsKind* instr) {
   1230   Register result = ToRegister(instr->result());
   1231   Register input = ToRegister(instr->InputAt(0));
   1232 
   1233   // Load map into |result|.
   1234   __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
   1235   // Load the map's "bit field 2" into |result|. We only need the first byte,
   1236   // but the following masking takes care of that anyway.
   1237   __ mov(result, FieldOperand(result, Map::kBitField2Offset));
   1238   // Retrieve elements_kind from bit field 2.
   1239   __ and_(result, Map::kElementsKindMask);
   1240   __ shr(result, Map::kElementsKindShift);
   1241 }
   1242 
   1243 
   1244 void LCodeGen::DoValueOf(LValueOf* instr) {
   1245   Register input = ToRegister(instr->InputAt(0));
   1246   Register result = ToRegister(instr->result());
   1247   Register map = ToRegister(instr->TempAt(0));
   1248   ASSERT(input.is(result));
   1249 
   1250   Label done;
   1251   // If the object is a smi return the object.
   1252   __ JumpIfSmi(input, &done, Label::kNear);
   1253 
   1254   // If the object is not a value type, return the object.
   1255   __ CmpObjectType(input, JS_VALUE_TYPE, map);
   1256   __ j(not_equal, &done, Label::kNear);
   1257   __ mov(result, FieldOperand(input, JSValue::kValueOffset));
   1258 
   1259   __ bind(&done);
   1260 }
   1261 
   1262 
   1263 void LCodeGen::DoDateField(LDateField* instr) {
   1264   Register object = ToRegister(instr->InputAt(0));
   1265   Register result = ToRegister(instr->result());
   1266   Register scratch = ToRegister(instr->TempAt(0));
   1267   Smi* index = instr->index();
   1268   Label runtime, done;
   1269   ASSERT(object.is(result));
   1270   ASSERT(object.is(eax));
   1271 
   1272 #ifdef DEBUG
   1273   __ AbortIfSmi(object);
   1274   __ CmpObjectType(object, JS_DATE_TYPE, scratch);
   1275   __ Assert(equal, "Trying to get date field from non-date.");
   1276 #endif
   1277 
   1278   if (index->value() == 0) {
   1279     __ mov(result, FieldOperand(object, JSDate::kValueOffset));
   1280   } else {
   1281     if (index->value() < JSDate::kFirstUncachedField) {
   1282       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
   1283       __ mov(scratch, Operand::StaticVariable(stamp));
   1284       __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
   1285       __ j(not_equal, &runtime, Label::kNear);
   1286       __ mov(result, FieldOperand(object, JSDate::kValueOffset +
   1287                                           kPointerSize * index->value()));
   1288       __ jmp(&done);
   1289     }
   1290     __ bind(&runtime);
   1291     __ PrepareCallCFunction(2, scratch);
   1292     __ mov(Operand(esp, 0), object);
   1293     __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
   1294     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
   1295     __ bind(&done);
   1296   }
   1297 }
   1298 
   1299 
   1300 void LCodeGen::DoBitNotI(LBitNotI* instr) {
   1301   LOperand* input = instr->InputAt(0);
   1302   ASSERT(input->Equals(instr->result()));
   1303   __ not_(ToRegister(input));
   1304 }
   1305 
   1306 
   1307 void LCodeGen::DoThrow(LThrow* instr) {
   1308   __ push(ToOperand(instr->value()));
   1309   ASSERT(ToRegister(instr->context()).is(esi));
   1310   CallRuntime(Runtime::kThrow, 1, instr);
   1311 
   1312   if (FLAG_debug_code) {
   1313     Comment("Unreachable code.");
   1314     __ int3();
   1315   }
   1316 }
   1317 
   1318 
   1319 void LCodeGen::DoAddI(LAddI* instr) {
   1320   LOperand* left = instr->InputAt(0);
   1321   LOperand* right = instr->InputAt(1);
   1322   ASSERT(left->Equals(instr->result()));
   1323 
   1324   if (right->IsConstantOperand()) {
   1325     __ add(ToOperand(left), ToInteger32Immediate(right));
   1326   } else {
   1327     __ add(ToRegister(left), ToOperand(right));
   1328   }
   1329 
   1330   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1331     DeoptimizeIf(overflow, instr->environment());
   1332   }
   1333 }
   1334 
   1335 
   1336 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1337   XMMRegister left = ToDoubleRegister(instr->InputAt(0));
   1338   XMMRegister right = ToDoubleRegister(instr->InputAt(1));
   1339   XMMRegister result = ToDoubleRegister(instr->result());
   1340   // Modulo uses a fixed result register.
   1341   ASSERT(instr->op() == Token::MOD || left.is(result));
   1342   switch (instr->op()) {
   1343     case Token::ADD:
   1344       __ addsd(left, right);
   1345       break;
   1346     case Token::SUB:
   1347        __ subsd(left, right);
   1348        break;
   1349     case Token::MUL:
   1350       __ mulsd(left, right);
   1351       break;
   1352     case Token::DIV:
   1353       __ divsd(left, right);
   1354       break;
   1355     case Token::MOD: {
   1356       // Pass two doubles as arguments on the stack.
   1357       __ PrepareCallCFunction(4, eax);
   1358       __ movdbl(Operand(esp, 0 * kDoubleSize), left);
   1359       __ movdbl(Operand(esp, 1 * kDoubleSize), right);
   1360       __ CallCFunction(
   1361           ExternalReference::double_fp_operation(Token::MOD, isolate()),
   1362           4);
   1363 
   1364       // Return value is in st(0) on ia32.
   1365       // Store it into the (fixed) result register.
   1366       __ sub(Operand(esp), Immediate(kDoubleSize));
   1367       __ fstp_d(Operand(esp, 0));
   1368       __ movdbl(result, Operand(esp, 0));
   1369       __ add(Operand(esp), Immediate(kDoubleSize));
   1370       break;
   1371     }
   1372     default:
   1373       UNREACHABLE();
   1374       break;
   1375   }
   1376 }
   1377 
   1378 
   1379 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1380   ASSERT(ToRegister(instr->context()).is(esi));
   1381   ASSERT(ToRegister(instr->left()).is(edx));
   1382   ASSERT(ToRegister(instr->right()).is(eax));
   1383   ASSERT(ToRegister(instr->result()).is(eax));
   1384 
   1385   BinaryOpStub stub(instr->op(), NO_OVERWRITE);
   1386   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1387   __ nop();  // Signals no inlined code.
   1388 }
   1389 
   1390 
   1391 int LCodeGen::GetNextEmittedBlock(int block) {
   1392   for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
   1393     LLabel* label = chunk_->GetLabel(i);
   1394     if (!label->HasReplacement()) return i;
   1395   }
   1396   return -1;
   1397 }
   1398 
   1399 
   1400 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
   1401   int next_block = GetNextEmittedBlock(current_block_);
   1402   right_block = chunk_->LookupDestination(right_block);
   1403   left_block = chunk_->LookupDestination(left_block);
   1404 
   1405   if (right_block == left_block) {
   1406     EmitGoto(left_block);
   1407   } else if (left_block == next_block) {
   1408     __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
   1409   } else if (right_block == next_block) {
   1410     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   1411   } else {
   1412     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   1413     __ jmp(chunk_->GetAssemblyLabel(right_block));
   1414   }
   1415 }
   1416 
   1417 
   1418 void LCodeGen::DoBranch(LBranch* instr) {
   1419   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1420   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1421 
   1422   Representation r = instr->hydrogen()->value()->representation();
   1423   if (r.IsInteger32()) {
   1424     Register reg = ToRegister(instr->InputAt(0));
   1425     __ test(reg, Operand(reg));
   1426     EmitBranch(true_block, false_block, not_zero);
   1427   } else if (r.IsDouble()) {
   1428     XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
   1429     __ xorps(xmm0, xmm0);
   1430     __ ucomisd(reg, xmm0);
   1431     EmitBranch(true_block, false_block, not_equal);
   1432   } else {
   1433     ASSERT(r.IsTagged());
   1434     Register reg = ToRegister(instr->InputAt(0));
   1435     HType type = instr->hydrogen()->value()->type();
   1436     if (type.IsBoolean()) {
   1437       __ cmp(reg, factory()->true_value());
   1438       EmitBranch(true_block, false_block, equal);
   1439     } else if (type.IsSmi()) {
   1440       __ test(reg, Operand(reg));
   1441       EmitBranch(true_block, false_block, not_equal);
   1442     } else {
   1443       Label* true_label = chunk_->GetAssemblyLabel(true_block);
   1444       Label* false_label = chunk_->GetAssemblyLabel(false_block);
   1445 
   1446       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   1447       // Avoid deopts in the case where we've never executed this path before.
   1448       if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
   1449 
   1450       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   1451         // undefined -> false.
   1452         __ cmp(reg, factory()->undefined_value());
   1453         __ j(equal, false_label);
   1454       }
   1455       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   1456         // true -> true.
   1457         __ cmp(reg, factory()->true_value());
   1458         __ j(equal, true_label);
   1459         // false -> false.
   1460         __ cmp(reg, factory()->false_value());
   1461         __ j(equal, false_label);
   1462       }
   1463       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   1464         // 'null' -> false.
   1465         __ cmp(reg, factory()->null_value());
   1466         __ j(equal, false_label);
   1467       }
   1468 
   1469       if (expected.Contains(ToBooleanStub::SMI)) {
   1470         // Smis: 0 -> false, all other -> true.
   1471         __ test(reg, Operand(reg));
   1472         __ j(equal, false_label);
   1473         __ JumpIfSmi(reg, true_label);
   1474       } else if (expected.NeedsMap()) {
   1475         // If we need a map later and have a Smi -> deopt.
   1476         __ test(reg, Immediate(kSmiTagMask));
   1477         DeoptimizeIf(zero, instr->environment());
   1478       }
   1479 
   1480       Register map = no_reg;  // Keep the compiler happy.
   1481       if (expected.NeedsMap()) {
   1482         map = ToRegister(instr->TempAt(0));
   1483         ASSERT(!map.is(reg));
   1484         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
   1485 
   1486         if (expected.CanBeUndetectable()) {
   1487           // Undetectable -> false.
   1488           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
   1489                     1 << Map::kIsUndetectable);
   1490           __ j(not_zero, false_label);
   1491         }
   1492       }
   1493 
   1494       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   1495         // spec object -> true.
   1496         __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
   1497         __ j(above_equal, true_label);
   1498       }
   1499 
   1500       if (expected.Contains(ToBooleanStub::STRING)) {
   1501         // String value -> false iff empty.
   1502         Label not_string;
   1503         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
   1504         __ j(above_equal, &not_string, Label::kNear);
   1505         __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   1506         __ j(not_zero, true_label);
   1507         __ jmp(false_label);
   1508         __ bind(&not_string);
   1509       }
   1510 
   1511       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   1512         // heap number -> false iff +0, -0, or NaN.
   1513         Label not_heap_number;
   1514         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
   1515                factory()->heap_number_map());
   1516         __ j(not_equal, &not_heap_number, Label::kNear);
   1517         __ fldz();
   1518         __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
   1519         __ FCmp();
   1520         __ j(zero, false_label);
   1521         __ jmp(true_label);
   1522         __ bind(&not_heap_number);
   1523       }
   1524 
   1525       // We've seen something for the first time -> deopt.
   1526       DeoptimizeIf(no_condition, instr->environment());
   1527     }
   1528   }
   1529 }
   1530 
   1531 
   1532 void LCodeGen::EmitGoto(int block) {
   1533   block = chunk_->LookupDestination(block);
   1534   int next_block = GetNextEmittedBlock(current_block_);
   1535   if (block != next_block) {
   1536     __ jmp(chunk_->GetAssemblyLabel(block));
   1537   }
   1538 }
   1539 
   1540 
   1541 void LCodeGen::DoGoto(LGoto* instr) {
   1542   EmitGoto(instr->block_id());
   1543 }
   1544 
   1545 
   1546 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   1547   Condition cond = no_condition;
   1548   switch (op) {
   1549     case Token::EQ:
   1550     case Token::EQ_STRICT:
   1551       cond = equal;
   1552       break;
   1553     case Token::LT:
   1554       cond = is_unsigned ? below : less;
   1555       break;
   1556     case Token::GT:
   1557       cond = is_unsigned ? above : greater;
   1558       break;
   1559     case Token::LTE:
   1560       cond = is_unsigned ? below_equal : less_equal;
   1561       break;
   1562     case Token::GTE:
   1563       cond = is_unsigned ? above_equal : greater_equal;
   1564       break;
   1565     case Token::IN:
   1566     case Token::INSTANCEOF:
   1567     default:
   1568       UNREACHABLE();
   1569   }
   1570   return cond;
   1571 }
   1572 
   1573 
   1574 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   1575   LOperand* left = instr->InputAt(0);
   1576   LOperand* right = instr->InputAt(1);
   1577   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1578   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1579   Condition cc = TokenToCondition(instr->op(), instr->is_double());
   1580 
   1581   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   1582     // We can statically evaluate the comparison.
   1583     double left_val = ToDouble(LConstantOperand::cast(left));
   1584     double right_val = ToDouble(LConstantOperand::cast(right));
   1585     int next_block =
   1586       EvalComparison(instr->op(), left_val, right_val) ? true_block
   1587                                                        : false_block;
   1588     EmitGoto(next_block);
   1589   } else {
   1590     if (instr->is_double()) {
   1591       // Don't base result on EFLAGS when a NaN is involved. Instead
   1592       // jump to the false block.
   1593       __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
   1594       __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
   1595     } else {
   1596       if (right->IsConstantOperand()) {
   1597         __ cmp(ToRegister(left), ToInteger32Immediate(right));
   1598       } else if (left->IsConstantOperand()) {
   1599         __ cmp(ToOperand(right), ToInteger32Immediate(left));
   1600         // We transposed the operands. Reverse the condition.
   1601         cc = ReverseCondition(cc);
   1602       } else {
   1603         __ cmp(ToRegister(left), ToOperand(right));
   1604       }
   1605     }
   1606     EmitBranch(true_block, false_block, cc);
   1607   }
   1608 }
   1609 
   1610 
   1611 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   1612   Register left = ToRegister(instr->InputAt(0));
   1613   Operand right = ToOperand(instr->InputAt(1));
   1614   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1615   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1616 
   1617   __ cmp(left, Operand(right));
   1618   EmitBranch(true_block, false_block, equal);
   1619 }
   1620 
   1621 
   1622 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
   1623   Register left = ToRegister(instr->InputAt(0));
   1624   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1625   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1626 
   1627   __ cmp(left, instr->hydrogen()->right());
   1628   EmitBranch(true_block, false_block, equal);
   1629 }
   1630 
   1631 
   1632 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   1633   Register reg = ToRegister(instr->InputAt(0));
   1634   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1635 
   1636   // If the expression is known to be untagged or a smi, then it's definitely
   1637   // not null, and it can't be a an undetectable object.
   1638   if (instr->hydrogen()->representation().IsSpecialization() ||
   1639       instr->hydrogen()->type().IsSmi()) {
   1640     EmitGoto(false_block);
   1641     return;
   1642   }
   1643 
   1644   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1645   Handle<Object> nil_value = instr->nil() == kNullValue ?
   1646       factory()->null_value() :
   1647       factory()->undefined_value();
   1648   __ cmp(reg, nil_value);
   1649   if (instr->kind() == kStrictEquality) {
   1650     EmitBranch(true_block, false_block, equal);
   1651   } else {
   1652     Handle<Object> other_nil_value = instr->nil() == kNullValue ?
   1653         factory()->undefined_value() :
   1654         factory()->null_value();
   1655     Label* true_label = chunk_->GetAssemblyLabel(true_block);
   1656     Label* false_label = chunk_->GetAssemblyLabel(false_block);
   1657     __ j(equal, true_label);
   1658     __ cmp(reg, other_nil_value);
   1659     __ j(equal, true_label);
   1660     __ JumpIfSmi(reg, false_label);
   1661     // Check for undetectable objects by looking in the bit field in
   1662     // the map. The object has already been smi checked.
   1663     Register scratch = ToRegister(instr->TempAt(0));
   1664     __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
   1665     __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
   1666     __ test(scratch, Immediate(1 << Map::kIsUndetectable));
   1667     EmitBranch(true_block, false_block, not_zero);
   1668   }
   1669 }
   1670 
   1671 
   1672 Condition LCodeGen::EmitIsObject(Register input,
   1673                                  Register temp1,
   1674                                  Label* is_not_object,
   1675                                  Label* is_object) {
   1676   __ JumpIfSmi(input, is_not_object);
   1677 
   1678   __ cmp(input, isolate()->factory()->null_value());
   1679   __ j(equal, is_object);
   1680 
   1681   __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
   1682   // Undetectable objects behave like undefined.
   1683   __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
   1684             1 << Map::kIsUndetectable);
   1685   __ j(not_zero, is_not_object);
   1686 
   1687   __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
   1688   __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   1689   __ j(below, is_not_object);
   1690   __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
   1691   return below_equal;
   1692 }
   1693 
   1694 
   1695 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   1696   Register reg = ToRegister(instr->InputAt(0));
   1697   Register temp = ToRegister(instr->TempAt(0));
   1698 
   1699   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1700   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1701   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   1702   Label* false_label = chunk_->GetAssemblyLabel(false_block);
   1703 
   1704   Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
   1705 
   1706   EmitBranch(true_block, false_block, true_cond);
   1707 }
   1708 
   1709 
   1710 Condition LCodeGen::EmitIsString(Register input,
   1711                                  Register temp1,
   1712                                  Label* is_not_string) {
   1713   __ JumpIfSmi(input, is_not_string);
   1714 
   1715   Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
   1716 
   1717   return cond;
   1718 }
   1719 
   1720 
   1721 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   1722   Register reg = ToRegister(instr->InputAt(0));
   1723   Register temp = ToRegister(instr->TempAt(0));
   1724 
   1725   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1726   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1727   Label* false_label = chunk_->GetAssemblyLabel(false_block);
   1728 
   1729   Condition true_cond = EmitIsString(reg, temp, false_label);
   1730 
   1731   EmitBranch(true_block, false_block, true_cond);
   1732 }
   1733 
   1734 
   1735 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   1736   Operand input = ToOperand(instr->InputAt(0));
   1737 
   1738   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1739   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1740 
   1741   __ test(input, Immediate(kSmiTagMask));
   1742   EmitBranch(true_block, false_block, zero);
   1743 }
   1744 
   1745 
   1746 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   1747   Register input = ToRegister(instr->InputAt(0));
   1748   Register temp = ToRegister(instr->TempAt(0));
   1749 
   1750   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1751   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1752 
   1753   STATIC_ASSERT(kSmiTag == 0);
   1754   __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
   1755   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   1756   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
   1757             1 << Map::kIsUndetectable);
   1758   EmitBranch(true_block, false_block, not_zero);
   1759 }
   1760 
   1761 
   1762 static Condition ComputeCompareCondition(Token::Value op) {
   1763   switch (op) {
   1764     case Token::EQ_STRICT:
   1765     case Token::EQ:
   1766       return equal;
   1767     case Token::LT:
   1768       return less;
   1769     case Token::GT:
   1770       return greater;
   1771     case Token::LTE:
   1772       return less_equal;
   1773     case Token::GTE:
   1774       return greater_equal;
   1775     default:
   1776       UNREACHABLE();
   1777       return no_condition;
   1778   }
   1779 }
   1780 
   1781 
   1782 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   1783   Token::Value op = instr->op();
   1784   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1785   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1786 
   1787   Handle<Code> ic = CompareIC::GetUninitialized(op);
   1788   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   1789 
   1790   Condition condition = ComputeCompareCondition(op);
   1791   __ test(eax, Operand(eax));
   1792 
   1793   EmitBranch(true_block, false_block, condition);
   1794 }
   1795 
   1796 
   1797 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   1798   InstanceType from = instr->from();
   1799   InstanceType to = instr->to();
   1800   if (from == FIRST_TYPE) return to;
   1801   ASSERT(from == to || to == LAST_TYPE);
   1802   return from;
   1803 }
   1804 
   1805 
   1806 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   1807   InstanceType from = instr->from();
   1808   InstanceType to = instr->to();
   1809   if (from == to) return equal;
   1810   if (to == LAST_TYPE) return above_equal;
   1811   if (from == FIRST_TYPE) return below_equal;
   1812   UNREACHABLE();
   1813   return equal;
   1814 }
   1815 
   1816 
   1817 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   1818   Register input = ToRegister(instr->InputAt(0));
   1819   Register temp = ToRegister(instr->TempAt(0));
   1820 
   1821   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1822   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1823 
   1824   Label* false_label = chunk_->GetAssemblyLabel(false_block);
   1825 
   1826   __ JumpIfSmi(input, false_label);
   1827 
   1828   __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
   1829   EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
   1830 }
   1831 
   1832 
   1833 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   1834   Register input = ToRegister(instr->InputAt(0));
   1835   Register result = ToRegister(instr->result());
   1836 
   1837   if (FLAG_debug_code) {
   1838     __ AbortIfNotString(input);
   1839   }
   1840 
   1841   __ mov(result, FieldOperand(input, String::kHashFieldOffset));
   1842   __ IndexFromHash(result, result);
   1843 }
   1844 
   1845 
   1846 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   1847     LHasCachedArrayIndexAndBranch* instr) {
   1848   Register input = ToRegister(instr->InputAt(0));
   1849 
   1850   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1851   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1852 
   1853   __ test(FieldOperand(input, String::kHashFieldOffset),
   1854           Immediate(String::kContainsCachedArrayIndexMask));
   1855   EmitBranch(true_block, false_block, equal);
   1856 }
   1857 
   1858 
   1859 // Branches to a label or falls through with the answer in the z flag.  Trashes
   1860 // the temp registers, but not the input.
   1861 void LCodeGen::EmitClassOfTest(Label* is_true,
   1862                                Label* is_false,
   1863                                Handle<String>class_name,
   1864                                Register input,
   1865                                Register temp,
   1866                                Register temp2) {
   1867   ASSERT(!input.is(temp));
   1868   ASSERT(!input.is(temp2));
   1869   ASSERT(!temp.is(temp2));
   1870   __ JumpIfSmi(input, is_false);
   1871 
   1872   if (class_name->IsEqualTo(CStrVector("Function"))) {
   1873     // Assuming the following assertions, we can use the same compares to test
   1874     // for both being a function type and being in the object type range.
   1875     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   1876     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   1877                   FIRST_SPEC_OBJECT_TYPE + 1);
   1878     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   1879                   LAST_SPEC_OBJECT_TYPE - 1);
   1880     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
   1881     __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
   1882     __ j(below, is_false);
   1883     __ j(equal, is_true);
   1884     __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
   1885     __ j(equal, is_true);
   1886   } else {
   1887     // Faster code path to avoid two compares: subtract lower bound from the
   1888     // actual type and do a signed compare with the width of the type range.
   1889     __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   1890     __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
   1891     __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   1892     __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
   1893                                      FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   1894     __ j(above, is_false);
   1895   }
   1896 
   1897   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   1898   // Check if the constructor in the map is a function.
   1899   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
   1900   // Objects with a non-function constructor have class 'Object'.
   1901   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
   1902   if (class_name->IsEqualTo(CStrVector("Object"))) {
   1903     __ j(not_equal, is_true);
   1904   } else {
   1905     __ j(not_equal, is_false);
   1906   }
   1907 
   1908   // temp now contains the constructor function. Grab the
   1909   // instance class name from there.
   1910   __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   1911   __ mov(temp, FieldOperand(temp,
   1912                             SharedFunctionInfo::kInstanceClassNameOffset));
   1913   // The class name we are testing against is a symbol because it's a literal.
   1914   // The name in the constructor is a symbol because of the way the context is
   1915   // booted.  This routine isn't expected to work for random API-created
   1916   // classes and it doesn't have to because you can't access it with natives
   1917   // syntax.  Since both sides are symbols it is sufficient to use an identity
   1918   // comparison.
   1919   __ cmp(temp, class_name);
   1920   // End with the answer in the z flag.
   1921 }
   1922 
   1923 
   1924 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   1925   Register input = ToRegister(instr->InputAt(0));
   1926   Register temp = ToRegister(instr->TempAt(0));
   1927   Register temp2 = ToRegister(instr->TempAt(1));
   1928 
   1929   Handle<String> class_name = instr->hydrogen()->class_name();
   1930 
   1931   int true_block = chunk_->LookupDestination(instr->true_block_id());
   1932   int false_block = chunk_->LookupDestination(instr->false_block_id());
   1933 
   1934   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   1935   Label* false_label = chunk_->GetAssemblyLabel(false_block);
   1936 
   1937   EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
   1938 
   1939   EmitBranch(true_block, false_block, equal);
   1940 }
   1941 
   1942 
   1943 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   1944   Register reg = ToRegister(instr->InputAt(0));
   1945   int true_block = instr->true_block_id();
   1946   int false_block = instr->false_block_id();
   1947 
   1948   __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
   1949   EmitBranch(true_block, false_block, equal);
   1950 }
   1951 
   1952 
   1953 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   1954   // Object and function are in fixed registers defined by the stub.
   1955   ASSERT(ToRegister(instr->context()).is(esi));
   1956   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
   1957   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   1958 
   1959   Label true_value, done;
   1960   __ test(eax, Operand(eax));
   1961   __ j(zero, &true_value, Label::kNear);
   1962   __ mov(ToRegister(instr->result()), factory()->false_value());
   1963   __ jmp(&done, Label::kNear);
   1964   __ bind(&true_value);
   1965   __ mov(ToRegister(instr->result()), factory()->true_value());
   1966   __ bind(&done);
   1967 }
   1968 
   1969 
   1970 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   1971   class DeferredInstanceOfKnownGlobal: public LDeferredCode {
   1972    public:
   1973     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
   1974                                   LInstanceOfKnownGlobal* instr)
   1975         : LDeferredCode(codegen), instr_(instr) { }
   1976     virtual void Generate() {
   1977       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
   1978     }
   1979     virtual LInstruction* instr() { return instr_; }
   1980     Label* map_check() { return &map_check_; }
   1981    private:
   1982     LInstanceOfKnownGlobal* instr_;
   1983     Label map_check_;
   1984   };
   1985 
   1986   DeferredInstanceOfKnownGlobal* deferred;
   1987   deferred = new DeferredInstanceOfKnownGlobal(this, instr);
   1988 
   1989   Label done, false_result;
   1990   Register object = ToRegister(instr->InputAt(1));
   1991   Register temp = ToRegister(instr->TempAt(0));
   1992 
   1993   // A Smi is not an instance of anything.
   1994   __ JumpIfSmi(object, &false_result);
   1995 
   1996   // This is the inlined call site instanceof cache. The two occurences of the
   1997   // hole value will be patched to the last map/result pair generated by the
   1998   // instanceof stub.
   1999   Label cache_miss;
   2000   Register map = ToRegister(instr->TempAt(0));
   2001   __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
   2002   __ bind(deferred->map_check());  // Label for calculating code patching.
   2003   Handle<JSGlobalPropertyCell> cache_cell =
   2004       factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
   2005   __ cmp(map, Operand::Cell(cache_cell));  // Patched to cached map.
   2006   __ j(not_equal, &cache_miss, Label::kNear);
   2007   __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
   2008   __ jmp(&done);
   2009 
   2010   // The inlined call site cache did not match. Check for null and string
   2011   // before calling the deferred code.
   2012   __ bind(&cache_miss);
   2013   // Null is not an instance of anything.
   2014   __ cmp(object, factory()->null_value());
   2015   __ j(equal, &false_result);
   2016 
   2017   // String values are not instances of anything.
   2018   Condition is_string = masm_->IsObjectStringType(object, temp, temp);
   2019   __ j(is_string, &false_result);
   2020 
   2021   // Go to the deferred code.
   2022   __ jmp(deferred->entry());
   2023 
   2024   __ bind(&false_result);
   2025   __ mov(ToRegister(instr->result()), factory()->false_value());
   2026 
   2027   // Here result has either true or false. Deferred code also produces true or
   2028   // false object.
   2029   __ bind(deferred->exit());
   2030   __ bind(&done);
   2031 }
   2032 
   2033 
   2034 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
   2035                                                Label* map_check) {
   2036   PushSafepointRegistersScope scope(this);
   2037 
   2038   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   2039   flags = static_cast<InstanceofStub::Flags>(
   2040       flags | InstanceofStub::kArgsInRegisters);
   2041   flags = static_cast<InstanceofStub::Flags>(
   2042       flags | InstanceofStub::kCallSiteInlineCheck);
   2043   flags = static_cast<InstanceofStub::Flags>(
   2044       flags | InstanceofStub::kReturnTrueFalseObject);
   2045   InstanceofStub stub(flags);
   2046 
   2047   // Get the temp register reserved by the instruction. This needs to be a
   2048   // register which is pushed last by PushSafepointRegisters as top of the
   2049   // stack is used to pass the offset to the location of the map check to
   2050   // the stub.
   2051   Register temp = ToRegister(instr->TempAt(0));
   2052   ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
   2053   __ LoadHeapObject(InstanceofStub::right(), instr->function());
   2054   static const int kAdditionalDelta = 13;
   2055   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   2056   __ mov(temp, Immediate(delta));
   2057   __ StoreToSafepointRegisterSlot(temp, temp);
   2058   CallCodeGeneric(stub.GetCode(),
   2059                   RelocInfo::CODE_TARGET,
   2060                   instr,
   2061                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   2062   ASSERT(instr->HasDeoptimizationEnvironment());
   2063   LEnvironment* env = instr->deoptimization_environment();
   2064   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2065 
   2066   // Put the result value into the eax slot and restore all registers.
   2067   __ StoreToSafepointRegisterSlot(eax, eax);
   2068 }
   2069 
   2070 
   2071 void LCodeGen::DoCmpT(LCmpT* instr) {
   2072   Token::Value op = instr->op();
   2073 
   2074   Handle<Code> ic = CompareIC::GetUninitialized(op);
   2075   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2076 
   2077   Condition condition = ComputeCompareCondition(op);
   2078   Label true_value, done;
   2079   __ test(eax, Operand(eax));
   2080   __ j(condition, &true_value, Label::kNear);
   2081   __ mov(ToRegister(instr->result()), factory()->false_value());
   2082   __ jmp(&done, Label::kNear);
   2083   __ bind(&true_value);
   2084   __ mov(ToRegister(instr->result()), factory()->true_value());
   2085   __ bind(&done);
   2086 }
   2087 
   2088 
   2089 void LCodeGen::DoReturn(LReturn* instr) {
   2090   if (FLAG_trace) {
   2091     // Preserve the return value on the stack and rely on the runtime call
   2092     // to return the value in the same register.  We're leaving the code
   2093     // managed by the register allocator and tearing down the frame, it's
   2094     // safe to write to the context register.
   2095     __ push(eax);
   2096     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   2097     __ CallRuntime(Runtime::kTraceExit, 1);
   2098   }
   2099   __ mov(esp, ebp);
   2100   __ pop(ebp);
   2101   __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
   2102 }
   2103 
   2104 
   2105 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   2106   Register result = ToRegister(instr->result());
   2107   __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
   2108   if (instr->hydrogen()->RequiresHoleCheck()) {
   2109     __ cmp(result, factory()->the_hole_value());
   2110     DeoptimizeIf(equal, instr->environment());
   2111   }
   2112 }
   2113 
   2114 
   2115 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   2116   ASSERT(ToRegister(instr->context()).is(esi));
   2117   ASSERT(ToRegister(instr->global_object()).is(eax));
   2118   ASSERT(ToRegister(instr->result()).is(eax));
   2119 
   2120   __ mov(ecx, instr->name());
   2121   RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
   2122                                                RelocInfo::CODE_TARGET_CONTEXT;
   2123   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   2124   CallCode(ic, mode, instr);
   2125 }
   2126 
   2127 
   2128 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   2129   Register value = ToRegister(instr->value());
   2130   Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
   2131 
   2132   // If the cell we are storing to contains the hole it could have
   2133   // been deleted from the property dictionary. In that case, we need
   2134   // to update the property details in the property dictionary to mark
   2135   // it as no longer deleted. We deoptimize in that case.
   2136   if (instr->hydrogen()->RequiresHoleCheck()) {
   2137     __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
   2138     DeoptimizeIf(equal, instr->environment());
   2139   }
   2140 
   2141   // Store the value.
   2142   __ mov(Operand::Cell(cell_handle), value);
   2143   // Cells are always rescanned, so no write barrier here.
   2144 }
   2145 
   2146 
   2147 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
   2148   ASSERT(ToRegister(instr->context()).is(esi));
   2149   ASSERT(ToRegister(instr->global_object()).is(edx));
   2150   ASSERT(ToRegister(instr->value()).is(eax));
   2151 
   2152   __ mov(ecx, instr->name());
   2153   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   2154       ? isolate()->builtins()->StoreIC_Initialize_Strict()
   2155       : isolate()->builtins()->StoreIC_Initialize();
   2156   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
   2157 }
   2158 
   2159 
   2160 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2161   Register context = ToRegister(instr->context());
   2162   Register result = ToRegister(instr->result());
   2163   __ mov(result, ContextOperand(context, instr->slot_index()));
   2164 
   2165   if (instr->hydrogen()->RequiresHoleCheck()) {
   2166     __ cmp(result, factory()->the_hole_value());
   2167     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2168       DeoptimizeIf(equal, instr->environment());
   2169     } else {
   2170       Label is_not_hole;
   2171       __ j(not_equal, &is_not_hole, Label::kNear);
   2172       __ mov(result, factory()->undefined_value());
   2173       __ bind(&is_not_hole);
   2174     }
   2175   }
   2176 }
   2177 
   2178 
   2179 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2180   Register context = ToRegister(instr->context());
   2181   Register value = ToRegister(instr->value());
   2182 
   2183   Label skip_assignment;
   2184 
   2185   Operand target = ContextOperand(context, instr->slot_index());
   2186   if (instr->hydrogen()->RequiresHoleCheck()) {
   2187     __ cmp(target, factory()->the_hole_value());
   2188     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2189       DeoptimizeIf(equal, instr->environment());
   2190     } else {
   2191       __ j(not_equal, &skip_assignment, Label::kNear);
   2192     }
   2193   }
   2194 
   2195   __ mov(target, value);
   2196   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2197     HType type = instr->hydrogen()->value()->type();
   2198     SmiCheck check_needed =
   2199         type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2200     Register temp = ToRegister(instr->TempAt(0));
   2201     int offset = Context::SlotOffset(instr->slot_index());
   2202     __ RecordWriteContextSlot(context,
   2203                               offset,
   2204                               value,
   2205                               temp,
   2206                               kSaveFPRegs,
   2207                               EMIT_REMEMBERED_SET,
   2208                               check_needed);
   2209   }
   2210 
   2211   __ bind(&skip_assignment);
   2212 }
   2213 
   2214 
   2215 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2216   Register object = ToRegister(instr->object());
   2217   Register result = ToRegister(instr->result());
   2218   if (instr->hydrogen()->is_in_object()) {
   2219     __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
   2220   } else {
   2221     __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
   2222     __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
   2223   }
   2224 }
   2225 
   2226 
   2227 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
   2228                                                Register object,
   2229                                                Handle<Map> type,
   2230                                                Handle<String> name) {
   2231   LookupResult lookup(isolate());
   2232   type->LookupInDescriptors(NULL, *name, &lookup);
   2233   ASSERT(lookup.IsFound() &&
   2234          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
   2235   if (lookup.type() == FIELD) {
   2236     int index = lookup.GetLocalFieldIndexFromMap(*type);
   2237     int offset = index * kPointerSize;
   2238     if (index < 0) {
   2239       // Negative property indices are in-object properties, indexed
   2240       // from the end of the fixed part of the object.
   2241       __ mov(result, FieldOperand(object, offset + type->instance_size()));
   2242     } else {
   2243       // Non-negative property indices are in the properties array.
   2244       __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
   2245       __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
   2246     }
   2247   } else {
   2248     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
   2249     __ LoadHeapObject(result, function);
   2250   }
   2251 }
   2252 
   2253 
   2254 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   2255   ASSERT(!operand->IsDoubleRegister());
   2256   if (operand->IsConstantOperand()) {
   2257     Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
   2258     if (object->IsSmi()) {
   2259       __ Push(Handle<Smi>::cast(object));
   2260     } else {
   2261       __ PushHeapObject(Handle<HeapObject>::cast(object));
   2262     }
   2263   } else if (operand->IsRegister()) {
   2264     __ push(ToRegister(operand));
   2265   } else {
   2266     __ push(ToOperand(operand));
   2267   }
   2268 }
   2269 
   2270 
   2271 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
   2272   Register object = ToRegister(instr->object());
   2273   Register result = ToRegister(instr->result());
   2274 
   2275   int map_count = instr->hydrogen()->types()->length();
   2276   Handle<String> name = instr->hydrogen()->name();
   2277   if (map_count == 0) {
   2278     ASSERT(instr->hydrogen()->need_generic());
   2279     __ mov(ecx, name);
   2280     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   2281     CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2282   } else {
   2283     Label done;
   2284     for (int i = 0; i < map_count - 1; ++i) {
   2285       Handle<Map> map = instr->hydrogen()->types()->at(i);
   2286       Label next;
   2287       __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
   2288       __ j(not_equal, &next, Label::kNear);
   2289       EmitLoadFieldOrConstantFunction(result, object, map, name);
   2290       __ jmp(&done, Label::kNear);
   2291       __ bind(&next);
   2292     }
   2293     Handle<Map> map = instr->hydrogen()->types()->last();
   2294     __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
   2295     if (instr->hydrogen()->need_generic()) {
   2296       Label generic;
   2297       __ j(not_equal, &generic, Label::kNear);
   2298       EmitLoadFieldOrConstantFunction(result, object, map, name);
   2299       __ jmp(&done, Label::kNear);
   2300       __ bind(&generic);
   2301       __ mov(ecx, name);
   2302       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   2303       CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2304     } else {
   2305       DeoptimizeIf(not_equal, instr->environment());
   2306       EmitLoadFieldOrConstantFunction(result, object, map, name);
   2307     }
   2308     __ bind(&done);
   2309   }
   2310 }
   2311 
   2312 
   2313 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   2314   ASSERT(ToRegister(instr->context()).is(esi));
   2315   ASSERT(ToRegister(instr->object()).is(eax));
   2316   ASSERT(ToRegister(instr->result()).is(eax));
   2317 
   2318   __ mov(ecx, instr->name());
   2319   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   2320   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2321 }
   2322 
   2323 
   2324 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2325   Register function = ToRegister(instr->function());
   2326   Register temp = ToRegister(instr->TempAt(0));
   2327   Register result = ToRegister(instr->result());
   2328 
   2329   // Check that the function really is a function.
   2330   __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
   2331   DeoptimizeIf(not_equal, instr->environment());
   2332 
   2333   // Check whether the function has an instance prototype.
   2334   Label non_instance;
   2335   __ test_b(FieldOperand(result, Map::kBitFieldOffset),
   2336             1 << Map::kHasNonInstancePrototype);
   2337   __ j(not_zero, &non_instance, Label::kNear);
   2338 
   2339   // Get the prototype or initial map from the function.
   2340   __ mov(result,
   2341          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2342 
   2343   // Check that the function has a prototype or an initial map.
   2344   __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
   2345   DeoptimizeIf(equal, instr->environment());
   2346 
   2347   // If the function does not have an initial map, we're done.
   2348   Label done;
   2349   __ CmpObjectType(result, MAP_TYPE, temp);
   2350   __ j(not_equal, &done, Label::kNear);
   2351 
   2352   // Get the prototype from the initial map.
   2353   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
   2354   __ jmp(&done, Label::kNear);
   2355 
   2356   // Non-instance prototype: Fetch prototype from constructor field
   2357   // in the function's map.
   2358   __ bind(&non_instance);
   2359   __ mov(result, FieldOperand(result, Map::kConstructorOffset));
   2360 
   2361   // All done.
   2362   __ bind(&done);
   2363 }
   2364 
   2365 
   2366 void LCodeGen::DoLoadElements(LLoadElements* instr) {
   2367   Register result = ToRegister(instr->result());
   2368   Register input = ToRegister(instr->InputAt(0));
   2369   __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
   2370   if (FLAG_debug_code) {
   2371     Label done, ok, fail;
   2372     __ cmp(FieldOperand(result, HeapObject::kMapOffset),
   2373            Immediate(factory()->fixed_array_map()));
   2374     __ j(equal, &done, Label::kNear);
   2375     __ cmp(FieldOperand(result, HeapObject::kMapOffset),
   2376            Immediate(factory()->fixed_cow_array_map()));
   2377     __ j(equal, &done, Label::kNear);
   2378     Register temp((result.is(eax)) ? ebx : eax);
   2379     __ push(temp);
   2380     __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
   2381     __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
   2382     __ and_(temp, Map::kElementsKindMask);
   2383     __ shr(temp, Map::kElementsKindShift);
   2384     __ cmp(temp, FAST_ELEMENTS);
   2385     __ j(equal, &ok, Label::kNear);
   2386     __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
   2387     __ j(less, &fail, Label::kNear);
   2388     __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
   2389     __ j(less_equal, &ok, Label::kNear);
   2390     __ bind(&fail);
   2391     __ Abort("Check for fast or external elements failed.");
   2392     __ bind(&ok);
   2393     __ pop(temp);
   2394     __ bind(&done);
   2395   }
   2396 }
   2397 
   2398 
   2399 void LCodeGen::DoLoadExternalArrayPointer(
   2400     LLoadExternalArrayPointer* instr) {
   2401   Register result = ToRegister(instr->result());
   2402   Register input = ToRegister(instr->InputAt(0));
   2403   __ mov(result, FieldOperand(input,
   2404                               ExternalArray::kExternalPointerOffset));
   2405 }
   2406 
   2407 
   2408 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2409   Register arguments = ToRegister(instr->arguments());
   2410   Register length = ToRegister(instr->length());
   2411   Operand index = ToOperand(instr->index());
   2412   Register result = ToRegister(instr->result());
   2413 
   2414   __ sub(length, index);
   2415   DeoptimizeIf(below_equal, instr->environment());
   2416 
   2417   // There are two words between the frame pointer and the last argument.
   2418   // Subtracting from length accounts for one of them add one more.
   2419   __ mov(result, Operand(arguments, length, times_4, kPointerSize));
   2420 }
   2421 
   2422 
   2423 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
   2424   Register result = ToRegister(instr->result());
   2425 
   2426   // Load the result.
   2427   __ mov(result,
   2428          BuildFastArrayOperand(instr->elements(), instr->key(),
   2429                                FAST_ELEMENTS,
   2430                                FixedArray::kHeaderSize - kHeapObjectTag));
   2431 
   2432   // Check for the hole value.
   2433   if (instr->hydrogen()->RequiresHoleCheck()) {
   2434     __ cmp(result, factory()->the_hole_value());
   2435     DeoptimizeIf(equal, instr->environment());
   2436   }
   2437 }
   2438 
   2439 
   2440 void LCodeGen::DoLoadKeyedFastDoubleElement(
   2441     LLoadKeyedFastDoubleElement* instr) {
   2442   XMMRegister result = ToDoubleRegister(instr->result());
   2443 
   2444   int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
   2445       sizeof(kHoleNanLower32);
   2446   Operand hole_check_operand = BuildFastArrayOperand(
   2447       instr->elements(), instr->key(),
   2448       FAST_DOUBLE_ELEMENTS,
   2449       offset);
   2450   __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
   2451   DeoptimizeIf(equal, instr->environment());
   2452 
   2453   Operand double_load_operand = BuildFastArrayOperand(
   2454       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
   2455       FixedDoubleArray::kHeaderSize - kHeapObjectTag);
   2456   __ movdbl(result, double_load_operand);
   2457 }
   2458 
   2459 
   2460 Operand LCodeGen::BuildFastArrayOperand(
   2461     LOperand* elements_pointer,
   2462     LOperand* key,
   2463     ElementsKind elements_kind,
   2464     uint32_t offset) {
   2465   Register elements_pointer_reg = ToRegister(elements_pointer);
   2466   int shift_size = ElementsKindToShiftSize(elements_kind);
   2467   if (key->IsConstantOperand()) {
   2468     int constant_value = ToInteger32(LConstantOperand::cast(key));
   2469     if (constant_value & 0xF0000000) {
   2470       Abort("array index constant value too big");
   2471     }
   2472     return Operand(elements_pointer_reg,
   2473                    constant_value * (1 << shift_size) + offset);
   2474   } else {
   2475     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
   2476     return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset);
   2477   }
   2478 }
   2479 
   2480 
   2481 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
   2482     LLoadKeyedSpecializedArrayElement* instr) {
   2483   ElementsKind elements_kind = instr->elements_kind();
   2484   Operand operand(BuildFastArrayOperand(instr->external_pointer(),
   2485                                         instr->key(), elements_kind, 0));
   2486   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
   2487     XMMRegister result(ToDoubleRegister(instr->result()));
   2488     __ movss(result, operand);
   2489     __ cvtss2sd(result, result);
   2490   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
   2491     __ movdbl(ToDoubleRegister(instr->result()), operand);
   2492   } else {
   2493     Register result(ToRegister(instr->result()));
   2494     switch (elements_kind) {
   2495       case EXTERNAL_BYTE_ELEMENTS:
   2496         __ movsx_b(result, operand);
   2497         break;
   2498       case EXTERNAL_PIXEL_ELEMENTS:
   2499       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
   2500         __ movzx_b(result, operand);
   2501         break;
   2502       case EXTERNAL_SHORT_ELEMENTS:
   2503         __ movsx_w(result, operand);
   2504         break;
   2505       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
   2506         __ movzx_w(result, operand);
   2507         break;
   2508       case EXTERNAL_INT_ELEMENTS:
   2509         __ mov(result, operand);
   2510         break;
   2511       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
   2512         __ mov(result, operand);
   2513         __ test(result, Operand(result));
   2514         // TODO(danno): we could be more clever here, perhaps having a special
   2515         // version of the stub that detects if the overflow case actually
   2516         // happens, and generate code that returns a double rather than int.
   2517         DeoptimizeIf(negative, instr->environment());
   2518         break;
   2519       case EXTERNAL_FLOAT_ELEMENTS:
   2520       case EXTERNAL_DOUBLE_ELEMENTS:
   2521       case FAST_SMI_ONLY_ELEMENTS:
   2522       case FAST_ELEMENTS:
   2523       case FAST_DOUBLE_ELEMENTS:
   2524       case DICTIONARY_ELEMENTS:
   2525       case NON_STRICT_ARGUMENTS_ELEMENTS:
   2526         UNREACHABLE();
   2527         break;
   2528     }
   2529   }
   2530 }
   2531 
   2532 
   2533 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   2534   ASSERT(ToRegister(instr->context()).is(esi));
   2535   ASSERT(ToRegister(instr->object()).is(edx));
   2536   ASSERT(ToRegister(instr->key()).is(eax));
   2537 
   2538   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
   2539   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2540 }
   2541 
   2542 
   2543 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   2544   Register result = ToRegister(instr->result());
   2545 
   2546   // Check for arguments adapter frame.
   2547   Label done, adapted;
   2548   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   2549   __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
   2550   __ cmp(Operand(result),
   2551          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   2552   __ j(equal, &adapted, Label::kNear);
   2553 
   2554   // No arguments adaptor frame.
   2555   __ mov(result, Operand(ebp));
   2556   __ jmp(&done, Label::kNear);
   2557 
   2558   // Arguments adaptor frame present.
   2559   __ bind(&adapted);
   2560   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   2561 
   2562   // Result is the frame pointer for the frame if not adapted and for the real
   2563   // frame below the adaptor frame if adapted.
   2564   __ bind(&done);
   2565 }
   2566 
   2567 
   2568 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   2569   Operand elem = ToOperand(instr->InputAt(0));
   2570   Register result = ToRegister(instr->result());
   2571 
   2572   Label done;
   2573 
   2574   // If no arguments adaptor frame the number of arguments is fixed.
   2575   __ cmp(ebp, elem);
   2576   __ mov(result, Immediate(scope()->num_parameters()));
   2577   __ j(equal, &done, Label::kNear);
   2578 
   2579   // Arguments adaptor frame present. Get argument length from there.
   2580   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   2581   __ mov(result, Operand(result,
   2582                          ArgumentsAdaptorFrameConstants::kLengthOffset));
   2583   __ SmiUntag(result);
   2584 
   2585   // Argument length is in result register.
   2586   __ bind(&done);
   2587 }
   2588 
   2589 
   2590 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   2591   Register receiver = ToRegister(instr->receiver());
   2592   Register function = ToRegister(instr->function());
   2593   Register scratch = ToRegister(instr->TempAt(0));
   2594 
   2595   // If the receiver is null or undefined, we have to pass the global
   2596   // object as a receiver to normal functions. Values have to be
   2597   // passed unchanged to builtins and strict-mode functions.
   2598   Label global_object, receiver_ok;
   2599 
   2600   // Do not transform the receiver to object for strict mode
   2601   // functions.
   2602   __ mov(scratch,
   2603          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   2604   __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
   2605             1 << SharedFunctionInfo::kStrictModeBitWithinByte);
   2606   __ j(not_equal, &receiver_ok, Label::kNear);
   2607 
   2608   // Do not transform the receiver to object for builtins.
   2609   __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
   2610             1 << SharedFunctionInfo::kNativeBitWithinByte);
   2611   __ j(not_equal, &receiver_ok, Label::kNear);
   2612 
   2613   // Normal function. Replace undefined or null with global receiver.
   2614   __ cmp(receiver, factory()->null_value());
   2615   __ j(equal, &global_object, Label::kNear);
   2616   __ cmp(receiver, factory()->undefined_value());
   2617   __ j(equal, &global_object, Label::kNear);
   2618 
   2619   // The receiver should be a JS object.
   2620   __ test(receiver, Immediate(kSmiTagMask));
   2621   DeoptimizeIf(equal, instr->environment());
   2622   __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
   2623   DeoptimizeIf(below, instr->environment());
   2624   __ jmp(&receiver_ok, Label::kNear);
   2625 
   2626   __ bind(&global_object);
   2627   // TODO(kmillikin): We have a hydrogen value for the global object.  See
   2628   // if it's better to use it than to explicitly fetch it from the context
   2629   // here.
   2630   __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
   2631   __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
   2632   __ mov(receiver,
   2633          FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   2634   __ bind(&receiver_ok);
   2635 }
   2636 
   2637 
   2638 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   2639   Register receiver = ToRegister(instr->receiver());
   2640   Register function = ToRegister(instr->function());
   2641   Register length = ToRegister(instr->length());
   2642   Register elements = ToRegister(instr->elements());
   2643   ASSERT(receiver.is(eax));  // Used for parameter count.
   2644   ASSERT(function.is(edi));  // Required by InvokeFunction.
   2645   ASSERT(ToRegister(instr->result()).is(eax));
   2646 
   2647   // Copy the arguments to this function possibly from the
   2648   // adaptor frame below it.
   2649   const uint32_t kArgumentsLimit = 1 * KB;
   2650   __ cmp(length, kArgumentsLimit);
   2651   DeoptimizeIf(above, instr->environment());
   2652 
   2653   __ push(receiver);
   2654   __ mov(receiver, length);
   2655 
   2656   // Loop through the arguments pushing them onto the execution
   2657   // stack.
   2658   Label invoke, loop;
   2659   // length is a small non-negative integer, due to the test above.
   2660   __ test(length, Operand(length));
   2661   __ j(zero, &invoke, Label::kNear);
   2662   __ bind(&loop);
   2663   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
   2664   __ dec(length);
   2665   __ j(not_zero, &loop);
   2666 
   2667   // Invoke the function.
   2668   __ bind(&invoke);
   2669   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   2670   LPointerMap* pointers = instr->pointer_map();
   2671   RecordPosition(pointers->position());
   2672   SafepointGenerator safepoint_generator(
   2673       this, pointers, Safepoint::kLazyDeopt);
   2674   ParameterCount actual(eax);
   2675   __ InvokeFunction(function, actual, CALL_FUNCTION,
   2676                     safepoint_generator, CALL_AS_METHOD);
   2677 }
   2678 
   2679 
   2680 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   2681   LOperand* argument = instr->InputAt(0);
   2682   EmitPushTaggedOperand(argument);
   2683 }
   2684 
   2685 
   2686 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   2687   Register result = ToRegister(instr->result());
   2688   __ LoadHeapObject(result, instr->hydrogen()->closure());
   2689 }
   2690 
   2691 
   2692 void LCodeGen::DoContext(LContext* instr) {
   2693   Register result = ToRegister(instr->result());
   2694   __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
   2695 }
   2696 
   2697 
   2698 void LCodeGen::DoOuterContext(LOuterContext* instr) {
   2699   Register context = ToRegister(instr->context());
   2700   Register result = ToRegister(instr->result());
   2701   __ mov(result,
   2702          Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   2703 }
   2704 
   2705 
   2706 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   2707   ASSERT(ToRegister(instr->InputAt(0)).is(esi));
   2708   __ push(esi);  // The context is the first argument.
   2709   __ push(Immediate(instr->hydrogen()->pairs()));
   2710   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
   2711   CallRuntime(Runtime::kDeclareGlobals, 3, instr);
   2712 }
   2713 
   2714 
   2715 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   2716   Register context = ToRegister(instr->context());
   2717   Register result = ToRegister(instr->result());
   2718   __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
   2719 }
   2720 
   2721 
   2722 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
   2723   Register global = ToRegister(instr->global());
   2724   Register result = ToRegister(instr->result());
   2725   __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
   2726 }
   2727 
   2728 
   2729 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   2730                                  int arity,
   2731                                  LInstruction* instr,
   2732                                  CallKind call_kind) {
   2733   bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
   2734       function->shared()->formal_parameter_count() == arity;
   2735 
   2736   LPointerMap* pointers = instr->pointer_map();
   2737   RecordPosition(pointers->position());
   2738 
   2739   if (can_invoke_directly) {
   2740     __ LoadHeapObject(edi, function);
   2741 
   2742     // Change context if needed.
   2743     bool change_context =
   2744         (info()->closure()->context() != function->context()) ||
   2745         scope()->contains_with() ||
   2746         (scope()->num_heap_slots() > 0);
   2747 
   2748     if (change_context) {
   2749       __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
   2750     } else {
   2751       __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   2752     }
   2753 
   2754     // Set eax to arguments count if adaption is not needed. Assumes that eax
   2755     // is available to write to at this point.
   2756     if (!function->NeedsArgumentsAdaption()) {
   2757       __ mov(eax, arity);
   2758     }
   2759 
   2760     // Invoke function directly.
   2761     __ SetCallKind(ecx, call_kind);
   2762     if (*function == *info()->closure()) {
   2763       __ CallSelf();
   2764     } else {
   2765       __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
   2766     }
   2767     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   2768   } else {
   2769     // We need to adapt arguments.
   2770     SafepointGenerator generator(
   2771         this, pointers, Safepoint::kLazyDeopt);
   2772     ParameterCount count(arity);
   2773     __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
   2774   }
   2775 }
   2776 
   2777 
   2778 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   2779   ASSERT(ToRegister(instr->result()).is(eax));
   2780   CallKnownFunction(instr->function(),
   2781                     instr->arity(),
   2782                     instr,
   2783                     CALL_AS_METHOD);
   2784 }
   2785 
   2786 
   2787 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
   2788   Register input_reg = ToRegister(instr->value());
   2789   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   2790          factory()->heap_number_map());
   2791   DeoptimizeIf(not_equal, instr->environment());
   2792 
   2793   Label done;
   2794   Register tmp = input_reg.is(eax) ? ecx : eax;
   2795   Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
   2796 
   2797   // Preserve the value of all registers.
   2798   PushSafepointRegistersScope scope(this);
   2799 
   2800   Label negative;
   2801   __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   2802   // Check the sign of the argument. If the argument is positive, just
   2803   // return it. We do not need to patch the stack since |input| and
   2804   // |result| are the same register and |input| will be restored
   2805   // unchanged by popping safepoint registers.
   2806   __ test(tmp, Immediate(HeapNumber::kSignMask));
   2807   __ j(not_zero, &negative);
   2808   __ jmp(&done);
   2809 
   2810   __ bind(&negative);
   2811 
   2812   Label allocated, slow;
   2813   __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
   2814   __ jmp(&allocated);
   2815 
   2816   // Slow case: Call the runtime system to do the number allocation.
   2817   __ bind(&slow);
   2818 
   2819   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
   2820                           instr, instr->context());
   2821 
   2822   // Set the pointer to the new heap number in tmp.
   2823   if (!tmp.is(eax)) __ mov(tmp, eax);
   2824 
   2825   // Restore input_reg after call to runtime.
   2826   __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
   2827 
   2828   __ bind(&allocated);
   2829   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   2830   __ and_(tmp2, ~HeapNumber::kSignMask);
   2831   __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
   2832   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
   2833   __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
   2834   __ StoreToSafepointRegisterSlot(input_reg, tmp);
   2835 
   2836   __ bind(&done);
   2837 }
   2838 
   2839 
   2840 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
   2841   Register input_reg = ToRegister(instr->value());
   2842   __ test(input_reg, Operand(input_reg));
   2843   Label is_positive;
   2844   __ j(not_sign, &is_positive);
   2845   __ neg(input_reg);
   2846   __ test(input_reg, Operand(input_reg));
   2847   DeoptimizeIf(negative, instr->environment());
   2848   __ bind(&is_positive);
   2849 }
   2850 
   2851 
   2852 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
   2853   // Class for deferred case.
   2854   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
   2855    public:
   2856     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
   2857                                     LUnaryMathOperation* instr)
   2858         : LDeferredCode(codegen), instr_(instr) { }
   2859     virtual void Generate() {
   2860       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   2861     }
   2862     virtual LInstruction* instr() { return instr_; }
   2863    private:
   2864     LUnaryMathOperation* instr_;
   2865   };
   2866 
   2867   ASSERT(instr->value()->Equals(instr->result()));
   2868   Representation r = instr->hydrogen()->value()->representation();
   2869 
   2870   if (r.IsDouble()) {
   2871     XMMRegister  scratch = xmm0;
   2872     XMMRegister input_reg = ToDoubleRegister(instr->value());
   2873     __ xorps(scratch, scratch);
   2874     __ subsd(scratch, input_reg);
   2875     __ pand(input_reg, scratch);
   2876   } else if (r.IsInteger32()) {
   2877     EmitIntegerMathAbs(instr);
   2878   } else {  // Tagged case.
   2879     DeferredMathAbsTaggedHeapNumber* deferred =
   2880         new DeferredMathAbsTaggedHeapNumber(this, instr);
   2881     Register input_reg = ToRegister(instr->value());
   2882     // Smi check.
   2883     __ JumpIfNotSmi(input_reg, deferred->entry());
   2884     EmitIntegerMathAbs(instr);
   2885     __ bind(deferred->exit());
   2886   }
   2887 }
   2888 
   2889 
   2890 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
   2891   XMMRegister xmm_scratch = xmm0;
   2892   Register output_reg = ToRegister(instr->result());
   2893   XMMRegister input_reg = ToDoubleRegister(instr->value());
   2894 
   2895   if (CpuFeatures::IsSupported(SSE4_1)) {
   2896     CpuFeatures::Scope scope(SSE4_1);
   2897     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   2898       // Deoptimize on negative zero.
   2899       Label non_zero;
   2900       __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
   2901       __ ucomisd(input_reg, xmm_scratch);
   2902       __ j(not_equal, &non_zero, Label::kNear);
   2903       __ movmskpd(output_reg, input_reg);
   2904       __ test(output_reg, Immediate(1));
   2905       DeoptimizeIf(not_zero, instr->environment());
   2906       __ bind(&non_zero);
   2907     }
   2908     __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
   2909     __ cvttsd2si(output_reg, Operand(xmm_scratch));
   2910     // Overflow is signalled with minint.
   2911     __ cmp(output_reg, 0x80000000u);
   2912     DeoptimizeIf(equal, instr->environment());
   2913   } else {
   2914     Label done;
   2915     // Deoptimize on negative numbers.
   2916     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
   2917     __ ucomisd(input_reg, xmm_scratch);
   2918     DeoptimizeIf(below, instr->environment());
   2919 
   2920     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   2921       // Check for negative zero.
   2922       Label positive_sign;
   2923       __ j(above, &positive_sign, Label::kNear);
   2924       __ movmskpd(output_reg, input_reg);
   2925       __ test(output_reg, Immediate(1));
   2926       DeoptimizeIf(not_zero, instr->environment());
   2927       __ Set(output_reg, Immediate(0));
   2928       __ jmp(&done, Label::kNear);
   2929       __ bind(&positive_sign);
   2930     }
   2931 
   2932     // Use truncating instruction (OK because input is positive).
   2933     __ cvttsd2si(output_reg, Operand(input_reg));
   2934 
   2935     // Overflow is signalled with minint.
   2936     __ cmp(output_reg, 0x80000000u);
   2937     DeoptimizeIf(equal, instr->environment());
   2938     __ bind(&done);
   2939   }
   2940 }
   2941 
   2942 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
   2943   XMMRegister xmm_scratch = xmm0;
   2944   Register output_reg = ToRegister(instr->result());
   2945   XMMRegister input_reg = ToDoubleRegister(instr->value());
   2946 
   2947   Label below_half, done;
   2948   // xmm_scratch = 0.5
   2949   ExternalReference one_half = ExternalReference::address_of_one_half();
   2950   __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
   2951   __ ucomisd(xmm_scratch, input_reg);
   2952   __ j(above, &below_half);
   2953   // xmm_scratch = input + 0.5
   2954   __ addsd(xmm_scratch, input_reg);
   2955 
   2956   // Compute Math.floor(value + 0.5).
   2957   // Use truncating instruction (OK because input is positive).
   2958   __ cvttsd2si(output_reg, Operand(xmm_scratch));
   2959 
   2960   // Overflow is signalled with minint.
   2961   __ cmp(output_reg, 0x80000000u);
   2962   DeoptimizeIf(equal, instr->environment());
   2963   __ jmp(&done);
   2964 
   2965   __ bind(&below_half);
   2966 
   2967   // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
   2968   // we can ignore the difference between a result of -0 and +0.
   2969   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   2970     // If the sign is positive, we return +0.
   2971     __ movmskpd(output_reg, input_reg);
   2972     __ test(output_reg, Immediate(1));
   2973     DeoptimizeIf(not_zero, instr->environment());
   2974   } else {
   2975     // If the input is >= -0.5, we return +0.
   2976     __ mov(output_reg, Immediate(0xBF000000));
   2977     __ movd(xmm_scratch, Operand(output_reg));
   2978     __ cvtss2sd(xmm_scratch, xmm_scratch);
   2979     __ ucomisd(input_reg, xmm_scratch);
   2980     DeoptimizeIf(below, instr->environment());
   2981   }
   2982   __ Set(output_reg, Immediate(0));
   2983   __ bind(&done);
   2984 }
   2985 
   2986 
   2987 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
   2988   XMMRegister input_reg = ToDoubleRegister(instr->value());
   2989   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   2990   __ sqrtsd(input_reg, input_reg);
   2991 }
   2992 
   2993 
   2994 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   2995   XMMRegister xmm_scratch = xmm0;
   2996   XMMRegister input_reg = ToDoubleRegister(instr->value());
   2997   Register scratch = ToRegister(instr->temp());
   2998   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   2999 
   3000   // Note that according to ECMA-262 15.8.2.13:
   3001   // Math.pow(-Infinity, 0.5) == Infinity
   3002   // Math.sqrt(-Infinity) == NaN
   3003   Label done, sqrt;
   3004   // Check base for -Infinity.  According to IEEE-754, single-precision
   3005   // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
   3006   __ mov(scratch, 0xFF800000);
   3007   __ movd(xmm_scratch, scratch);
   3008   __ cvtss2sd(xmm_scratch, xmm_scratch);
   3009   __ ucomisd(input_reg, xmm_scratch);
   3010   // Comparing -Infinity with NaN results in "unordered", which sets the
   3011   // zero flag as if both were equal.  However, it also sets the carry flag.
   3012   __ j(not_equal, &sqrt, Label::kNear);
   3013   __ j(carry, &sqrt, Label::kNear);
   3014   // If input is -Infinity, return Infinity.
   3015   __ xorps(input_reg, input_reg);
   3016   __ subsd(input_reg, xmm_scratch);
   3017   __ jmp(&done, Label::kNear);
   3018 
   3019   // Square root.
   3020   __ bind(&sqrt);
   3021   __ xorps(xmm_scratch, xmm_scratch);
   3022   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   3023   __ sqrtsd(input_reg, input_reg);
   3024   __ bind(&done);
   3025 }
   3026 
   3027 
   3028 void LCodeGen::DoPower(LPower* instr) {
   3029   Representation exponent_type = instr->hydrogen()->right()->representation();
   3030   // Having marked this as a call, we can use any registers.
   3031   // Just make sure that the input/output registers are the expected ones.
   3032   ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
   3033          ToDoubleRegister(instr->InputAt(1)).is(xmm1));
   3034   ASSERT(!instr->InputAt(1)->IsRegister() ||
   3035          ToRegister(instr->InputAt(1)).is(eax));
   3036   ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
   3037   ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
   3038 
   3039   if (exponent_type.IsTagged()) {
   3040     Label no_deopt;
   3041     __ JumpIfSmi(eax, &no_deopt);
   3042     __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
   3043     DeoptimizeIf(not_equal, instr->environment());
   3044     __ bind(&no_deopt);
   3045     MathPowStub stub(MathPowStub::TAGGED);
   3046     __ CallStub(&stub);
   3047   } else if (exponent_type.IsInteger32()) {
   3048     MathPowStub stub(MathPowStub::INTEGER);
   3049     __ CallStub(&stub);
   3050   } else {
   3051     ASSERT(exponent_type.IsDouble());
   3052     MathPowStub stub(MathPowStub::DOUBLE);
   3053     __ CallStub(&stub);
   3054   }
   3055 }
   3056 
   3057 
   3058 void LCodeGen::DoRandom(LRandom* instr) {
   3059   class DeferredDoRandom: public LDeferredCode {
   3060    public:
   3061     DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
   3062         : LDeferredCode(codegen), instr_(instr) { }
   3063     virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
   3064     virtual LInstruction* instr() { return instr_; }
   3065    private:
   3066     LRandom* instr_;
   3067   };
   3068 
   3069   DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
   3070 
   3071   // Having marked this instruction as a call we can use any
   3072   // registers.
   3073   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3074   ASSERT(ToRegister(instr->InputAt(0)).is(eax));
   3075   // Assert that the register size is indeed the size of each seed.
   3076   static const int kSeedSize = sizeof(uint32_t);
   3077   STATIC_ASSERT(kPointerSize == kSeedSize);
   3078 
   3079   __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
   3080   static const int kRandomSeedOffset =
   3081       FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
   3082   __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
   3083   // ebx: FixedArray of the global context's random seeds
   3084 
   3085   // Load state[0].
   3086   __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
   3087   // If state[0] == 0, call runtime to initialize seeds.
   3088   __ test(ecx, ecx);
   3089   __ j(zero, deferred->entry());
   3090   // Load state[1].
   3091   __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
   3092   // ecx: state[0]
   3093   // eax: state[1]
   3094 
   3095   // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
   3096   __ movzx_w(edx, ecx);
   3097   __ imul(edx, edx, 18273);
   3098   __ shr(ecx, 16);
   3099   __ add(ecx, edx);
   3100   // Save state[0].
   3101   __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
   3102 
   3103   // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
   3104   __ movzx_w(edx, eax);
   3105   __ imul(edx, edx, 36969);
   3106   __ shr(eax, 16);
   3107   __ add(eax, edx);
   3108   // Save state[1].
   3109   __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
   3110 
   3111   // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
   3112   __ shl(ecx, 14);
   3113   __ and_(eax, Immediate(0x3FFFF));
   3114   __ add(eax, ecx);
   3115 
   3116   __ bind(deferred->exit());
   3117   // Convert 32 random bits in eax to 0.(32 random bits) in a double
   3118   // by computing:
   3119   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   3120   __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
   3121   __ movd(xmm2, ebx);
   3122   __ movd(xmm1, eax);
   3123   __ cvtss2sd(xmm2, xmm2);
   3124   __ xorps(xmm1, xmm2);
   3125   __ subsd(xmm1, xmm2);
   3126 }
   3127 
   3128 
   3129 void LCodeGen::DoDeferredRandom(LRandom* instr) {
   3130   __ PrepareCallCFunction(1, ebx);
   3131   __ mov(Operand(esp, 0), eax);
   3132   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
   3133   // Return value is in eax.
   3134 }
   3135 
   3136 
   3137 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
   3138   ASSERT(instr->value()->Equals(instr->result()));
   3139   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3140   Label positive, done, zero;
   3141   __ xorps(xmm0, xmm0);
   3142   __ ucomisd(input_reg, xmm0);
   3143   __ j(above, &positive, Label::kNear);
   3144   __ j(equal, &zero, Label::kNear);
   3145   ExternalReference nan =
   3146       ExternalReference::address_of_canonical_non_hole_nan();
   3147   __ movdbl(input_reg, Operand::StaticVariable(nan));
   3148   __ jmp(&done, Label::kNear);
   3149   __ bind(&zero);
   3150   __ push(Immediate(0xFFF00000));
   3151   __ push(Immediate(0));
   3152   __ movdbl(input_reg, Operand(esp, 0));
   3153   __ add(Operand(esp), Immediate(kDoubleSize));
   3154   __ jmp(&done, Label::kNear);
   3155   __ bind(&positive);
   3156   __ fldln2();
   3157   __ sub(Operand(esp), Immediate(kDoubleSize));
   3158   __ movdbl(Operand(esp, 0), input_reg);
   3159   __ fld_d(Operand(esp, 0));
   3160   __ fyl2x();
   3161   __ fstp_d(Operand(esp, 0));
   3162   __ movdbl(input_reg, Operand(esp, 0));
   3163   __ add(Operand(esp), Immediate(kDoubleSize));
   3164   __ bind(&done);
   3165 }
   3166 
   3167 
   3168 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
   3169   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3170   TranscendentalCacheStub stub(TranscendentalCache::TAN,
   3171                                TranscendentalCacheStub::UNTAGGED);
   3172   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3173 }
   3174 
   3175 
   3176 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   3177   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3178   TranscendentalCacheStub stub(TranscendentalCache::COS,
   3179                                TranscendentalCacheStub::UNTAGGED);
   3180   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3181 }
   3182 
   3183 
   3184 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
   3185   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3186   TranscendentalCacheStub stub(TranscendentalCache::SIN,
   3187                                TranscendentalCacheStub::UNTAGGED);
   3188   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3189 }
   3190 
   3191 
   3192 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
   3193   switch (instr->op()) {
   3194     case kMathAbs:
   3195       DoMathAbs(instr);
   3196       break;
   3197     case kMathFloor:
   3198       DoMathFloor(instr);
   3199       break;
   3200     case kMathRound:
   3201       DoMathRound(instr);
   3202       break;
   3203     case kMathSqrt:
   3204       DoMathSqrt(instr);
   3205       break;
   3206     case kMathCos:
   3207       DoMathCos(instr);
   3208       break;
   3209     case kMathSin:
   3210       DoMathSin(instr);
   3211       break;
   3212     case kMathTan:
   3213       DoMathTan(instr);
   3214       break;
   3215     case kMathLog:
   3216       DoMathLog(instr);
   3217       break;
   3218 
   3219     default:
   3220       UNREACHABLE();
   3221   }
   3222 }
   3223 
   3224 
   3225 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3226   ASSERT(ToRegister(instr->context()).is(esi));
   3227   ASSERT(ToRegister(instr->function()).is(edi));
   3228   ASSERT(instr->HasPointerMap());
   3229   ASSERT(instr->HasDeoptimizationEnvironment());
   3230   LPointerMap* pointers = instr->pointer_map();
   3231   RecordPosition(pointers->position());
   3232   SafepointGenerator generator(
   3233       this, pointers, Safepoint::kLazyDeopt);
   3234   ParameterCount count(instr->arity());
   3235   __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
   3236 }
   3237 
   3238 
   3239 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   3240   ASSERT(ToRegister(instr->context()).is(esi));
   3241   ASSERT(ToRegister(instr->key()).is(ecx));
   3242   ASSERT(ToRegister(instr->result()).is(eax));
   3243 
   3244   int arity = instr->arity();
   3245   Handle<Code> ic =
   3246       isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
   3247   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3248 }
   3249 
   3250 
   3251 void LCodeGen::DoCallNamed(LCallNamed* instr) {
   3252   ASSERT(ToRegister(instr->context()).is(esi));
   3253   ASSERT(ToRegister(instr->result()).is(eax));
   3254 
   3255   int arity = instr->arity();
   3256   RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
   3257   Handle<Code> ic =
   3258       isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   3259   __ mov(ecx, instr->name());
   3260   CallCode(ic, mode, instr);
   3261 }
   3262 
   3263 
   3264 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   3265   ASSERT(ToRegister(instr->context()).is(esi));
   3266   ASSERT(ToRegister(instr->function()).is(edi));
   3267   ASSERT(ToRegister(instr->result()).is(eax));
   3268 
   3269   int arity = instr->arity();
   3270   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   3271   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3272 }
   3273 
   3274 
   3275 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
   3276   ASSERT(ToRegister(instr->context()).is(esi));
   3277   ASSERT(ToRegister(instr->result()).is(eax));
   3278 
   3279   int arity = instr->arity();
   3280   RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
   3281   Handle<Code> ic =
   3282       isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   3283   __ mov(ecx, instr->name());
   3284   CallCode(ic, mode, instr);
   3285 }
   3286 
   3287 
   3288 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   3289   ASSERT(ToRegister(instr->result()).is(eax));
   3290   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
   3291 }
   3292 
   3293 
   3294 void LCodeGen::DoCallNew(LCallNew* instr) {
   3295   ASSERT(ToRegister(instr->context()).is(esi));
   3296   ASSERT(ToRegister(instr->constructor()).is(edi));
   3297   ASSERT(ToRegister(instr->result()).is(eax));
   3298 
   3299   CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   3300   __ Set(eax, Immediate(instr->arity()));
   3301   CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   3302 }
   3303 
   3304 
   3305 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   3306   CallRuntime(instr->function(), instr->arity(), instr);
   3307 }
   3308 
   3309 
   3310 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   3311   Register object = ToRegister(instr->object());
   3312   Register value = ToRegister(instr->value());
   3313   int offset = instr->offset();
   3314 
   3315   if (!instr->transition().is_null()) {
   3316     __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
   3317   }
   3318 
   3319   // Do the store.
   3320   HType type = instr->hydrogen()->value()->type();
   3321   SmiCheck check_needed =
   3322       type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   3323   if (instr->is_in_object()) {
   3324     __ mov(FieldOperand(object, offset), value);
   3325     if (instr->hydrogen()->NeedsWriteBarrier()) {
   3326       Register temp = ToRegister(instr->TempAt(0));
   3327       // Update the write barrier for the object for in-object properties.
   3328       __ RecordWriteField(object,
   3329                           offset,
   3330                           value,
   3331                           temp,
   3332                           kSaveFPRegs,
   3333                           EMIT_REMEMBERED_SET,
   3334                           check_needed);
   3335     }
   3336   } else {
   3337     Register temp = ToRegister(instr->TempAt(0));
   3338     __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
   3339     __ mov(FieldOperand(temp, offset), value);
   3340     if (instr->hydrogen()->NeedsWriteBarrier()) {
   3341       // Update the write barrier for the properties array.
   3342       // object is used as a scratch register.
   3343       __ RecordWriteField(temp,
   3344                           offset,
   3345                           value,
   3346                           object,
   3347                           kSaveFPRegs,
   3348                           EMIT_REMEMBERED_SET,
   3349                           check_needed);
   3350     }
   3351   }
   3352 }
   3353 
   3354 
   3355 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   3356   ASSERT(ToRegister(instr->context()).is(esi));
   3357   ASSERT(ToRegister(instr->object()).is(edx));
   3358   ASSERT(ToRegister(instr->value()).is(eax));
   3359 
   3360   __ mov(ecx, instr->name());
   3361   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   3362       ? isolate()->builtins()->StoreIC_Initialize_Strict()
   3363       : isolate()->builtins()->StoreIC_Initialize();
   3364   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3365 }
   3366 
   3367 
   3368 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   3369   if (instr->index()->IsConstantOperand()) {
   3370     __ cmp(ToOperand(instr->length()),
   3371            Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
   3372     DeoptimizeIf(below_equal, instr->environment());
   3373   } else {
   3374     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
   3375     DeoptimizeIf(above_equal, instr->environment());
   3376   }
   3377 }
   3378 
   3379 
   3380 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
   3381     LStoreKeyedSpecializedArrayElement* instr) {
   3382   ElementsKind elements_kind = instr->elements_kind();
   3383   Operand operand(BuildFastArrayOperand(instr->external_pointer(),
   3384                                         instr->key(), elements_kind, 0));
   3385   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
   3386     __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
   3387     __ movss(operand, xmm0);
   3388   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
   3389     __ movdbl(operand, ToDoubleRegister(instr->value()));
   3390   } else {
   3391     Register value = ToRegister(instr->value());
   3392     switch (elements_kind) {
   3393       case EXTERNAL_PIXEL_ELEMENTS:
   3394       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
   3395       case EXTERNAL_BYTE_ELEMENTS:
   3396         __ mov_b(operand, value);
   3397         break;
   3398       case EXTERNAL_SHORT_ELEMENTS:
   3399       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
   3400         __ mov_w(operand, value);
   3401         break;
   3402       case EXTERNAL_INT_ELEMENTS:
   3403       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
   3404         __ mov(operand, value);
   3405         break;
   3406       case EXTERNAL_FLOAT_ELEMENTS:
   3407       case EXTERNAL_DOUBLE_ELEMENTS:
   3408       case FAST_SMI_ONLY_ELEMENTS:
   3409       case FAST_ELEMENTS:
   3410       case FAST_DOUBLE_ELEMENTS:
   3411       case DICTIONARY_ELEMENTS:
   3412       case NON_STRICT_ARGUMENTS_ELEMENTS:
   3413         UNREACHABLE();
   3414         break;
   3415     }
   3416   }
   3417 }
   3418 
   3419 
   3420 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
   3421   Register value = ToRegister(instr->value());
   3422   Register elements = ToRegister(instr->object());
   3423   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
   3424 
   3425   // Do the store.
   3426   if (instr->key()->IsConstantOperand()) {
   3427     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   3428     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   3429     int offset =
   3430         ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
   3431     __ mov(FieldOperand(elements, offset), value);
   3432   } else {
   3433     __ mov(FieldOperand(elements,
   3434                         key,
   3435                         times_pointer_size,
   3436                         FixedArray::kHeaderSize),
   3437            value);
   3438   }
   3439 
   3440   if (instr->hydrogen()->NeedsWriteBarrier()) {
   3441     HType type = instr->hydrogen()->value()->type();
   3442     SmiCheck check_needed =
   3443         type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   3444     // Compute address of modified element and store it into key register.
   3445     __ lea(key,
   3446            FieldOperand(elements,
   3447                         key,
   3448                         times_pointer_size,
   3449                         FixedArray::kHeaderSize));
   3450     __ RecordWrite(elements,
   3451                    key,
   3452                    value,
   3453                    kSaveFPRegs,
   3454                    EMIT_REMEMBERED_SET,
   3455                    check_needed);
   3456   }
   3457 }
   3458 
   3459 
   3460 void LCodeGen::DoStoreKeyedFastDoubleElement(
   3461     LStoreKeyedFastDoubleElement* instr) {
   3462   XMMRegister value = ToDoubleRegister(instr->value());
   3463   Label have_value;
   3464 
   3465   __ ucomisd(value, value);
   3466   __ j(parity_odd, &have_value);  // NaN.
   3467 
   3468   ExternalReference canonical_nan_reference =
   3469       ExternalReference::address_of_canonical_non_hole_nan();
   3470   __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
   3471   __ bind(&have_value);
   3472 
   3473   Operand double_store_operand = BuildFastArrayOperand(
   3474       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
   3475       FixedDoubleArray::kHeaderSize - kHeapObjectTag);
   3476   __ movdbl(double_store_operand, value);
   3477 }
   3478 
   3479 
   3480 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   3481   ASSERT(ToRegister(instr->context()).is(esi));
   3482   ASSERT(ToRegister(instr->object()).is(edx));
   3483   ASSERT(ToRegister(instr->key()).is(ecx));
   3484   ASSERT(ToRegister(instr->value()).is(eax));
   3485 
   3486   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   3487       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
   3488       : isolate()->builtins()->KeyedStoreIC_Initialize();
   3489   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3490 }
   3491 
   3492 
   3493 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   3494   Register object_reg = ToRegister(instr->object());
   3495   Register new_map_reg = ToRegister(instr->new_map_reg());
   3496 
   3497   Handle<Map> from_map = instr->original_map();
   3498   Handle<Map> to_map = instr->transitioned_map();
   3499   ElementsKind from_kind = from_map->elements_kind();
   3500   ElementsKind to_kind = to_map->elements_kind();
   3501 
   3502   Label not_applicable;
   3503   __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
   3504   __ j(not_equal, &not_applicable);
   3505   __ mov(new_map_reg, to_map);
   3506   if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
   3507     Register object_reg = ToRegister(instr->object());
   3508     __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
   3509     // Write barrier.
   3510     ASSERT_NE(instr->temp_reg(), NULL);
   3511     __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
   3512                         ToRegister(instr->temp_reg()), kDontSaveFPRegs);
   3513   } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
   3514       to_kind == FAST_DOUBLE_ELEMENTS) {
   3515     Register fixed_object_reg = ToRegister(instr->temp_reg());
   3516     ASSERT(fixed_object_reg.is(edx));
   3517     ASSERT(new_map_reg.is(ebx));
   3518     __ mov(fixed_object_reg, object_reg);
   3519     CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
   3520              RelocInfo::CODE_TARGET, instr);
   3521   } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
   3522     Register fixed_object_reg = ToRegister(instr->temp_reg());
   3523     ASSERT(fixed_object_reg.is(edx));
   3524     ASSERT(new_map_reg.is(ebx));
   3525     __ mov(fixed_object_reg, object_reg);
   3526     CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
   3527              RelocInfo::CODE_TARGET, instr);
   3528   } else {
   3529     UNREACHABLE();
   3530   }
   3531   __ bind(&not_applicable);
   3532 }
   3533 
   3534 
   3535 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   3536   class DeferredStringCharCodeAt: public LDeferredCode {
   3537    public:
   3538     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   3539         : LDeferredCode(codegen), instr_(instr) { }
   3540     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
   3541     virtual LInstruction* instr() { return instr_; }
   3542    private:
   3543     LStringCharCodeAt* instr_;
   3544   };
   3545 
   3546   DeferredStringCharCodeAt* deferred =
   3547       new DeferredStringCharCodeAt(this, instr);
   3548 
   3549   StringCharLoadGenerator::Generate(masm(),
   3550                                     factory(),
   3551                                     ToRegister(instr->string()),
   3552                                     ToRegister(instr->index()),
   3553                                     ToRegister(instr->result()),
   3554                                     deferred->entry());
   3555   __ bind(deferred->exit());
   3556 }
   3557 
   3558 
   3559 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   3560   Register string = ToRegister(instr->string());
   3561   Register result = ToRegister(instr->result());
   3562 
   3563   // TODO(3095996): Get rid of this. For now, we need to make the
   3564   // result register contain a valid pointer because it is already
   3565   // contained in the register pointer map.
   3566   __ Set(result, Immediate(0));
   3567 
   3568   PushSafepointRegistersScope scope(this);
   3569   __ push(string);
   3570   // Push the index as a smi. This is safe because of the checks in
   3571   // DoStringCharCodeAt above.
   3572   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
   3573   if (instr->index()->IsConstantOperand()) {
   3574     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3575     __ push(Immediate(Smi::FromInt(const_index)));
   3576   } else {
   3577     Register index = ToRegister(instr->index());
   3578     __ SmiTag(index);
   3579     __ push(index);
   3580   }
   3581   CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
   3582                           instr, instr->context());
   3583   if (FLAG_debug_code) {
   3584     __ AbortIfNotSmi(eax);
   3585   }
   3586   __ SmiUntag(eax);
   3587   __ StoreToSafepointRegisterSlot(result, eax);
   3588 }
   3589 
   3590 
   3591 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   3592   class DeferredStringCharFromCode: public LDeferredCode {
   3593    public:
   3594     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   3595         : LDeferredCode(codegen), instr_(instr) { }
   3596     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
   3597     virtual LInstruction* instr() { return instr_; }
   3598    private:
   3599     LStringCharFromCode* instr_;
   3600   };
   3601 
   3602   DeferredStringCharFromCode* deferred =
   3603       new DeferredStringCharFromCode(this, instr);
   3604 
   3605   ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
   3606   Register char_code = ToRegister(instr->char_code());
   3607   Register result = ToRegister(instr->result());
   3608   ASSERT(!char_code.is(result));
   3609 
   3610   __ cmp(char_code, String::kMaxAsciiCharCode);
   3611   __ j(above, deferred->entry());
   3612   __ Set(result, Immediate(factory()->single_character_string_cache()));
   3613   __ mov(result, FieldOperand(result,
   3614                               char_code, times_pointer_size,
   3615                               FixedArray::kHeaderSize));
   3616   __ cmp(result, factory()->undefined_value());
   3617   __ j(equal, deferred->entry());
   3618   __ bind(deferred->exit());
   3619 }
   3620 
   3621 
   3622 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   3623   Register char_code = ToRegister(instr->char_code());
   3624   Register result = ToRegister(instr->result());
   3625 
   3626   // TODO(3095996): Get rid of this. For now, we need to make the
   3627   // result register contain a valid pointer because it is already
   3628   // contained in the register pointer map.
   3629   __ Set(result, Immediate(0));
   3630 
   3631   PushSafepointRegistersScope scope(this);
   3632   __ SmiTag(char_code);
   3633   __ push(char_code);
   3634   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
   3635   __ StoreToSafepointRegisterSlot(result, eax);
   3636 }
   3637 
   3638 
   3639 void LCodeGen::DoStringLength(LStringLength* instr) {
   3640   Register string = ToRegister(instr->string());
   3641   Register result = ToRegister(instr->result());
   3642   __ mov(result, FieldOperand(string, String::kLengthOffset));
   3643 }
   3644 
   3645 
   3646 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   3647   EmitPushTaggedOperand(instr->left());
   3648   EmitPushTaggedOperand(instr->right());
   3649   StringAddStub stub(NO_STRING_CHECK_IN_STUB);
   3650   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3651 }
   3652 
   3653 
   3654 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   3655   LOperand* input = instr->InputAt(0);
   3656   ASSERT(input->IsRegister() || input->IsStackSlot());
   3657   LOperand* output = instr->result();
   3658   ASSERT(output->IsDoubleRegister());
   3659   __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
   3660 }
   3661 
   3662 
   3663 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   3664   class DeferredNumberTagI: public LDeferredCode {
   3665    public:
   3666     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   3667         : LDeferredCode(codegen), instr_(instr) { }
   3668     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
   3669     virtual LInstruction* instr() { return instr_; }
   3670    private:
   3671     LNumberTagI* instr_;
   3672   };
   3673 
   3674   LOperand* input = instr->InputAt(0);
   3675   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   3676   Register reg = ToRegister(input);
   3677 
   3678   DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
   3679   __ SmiTag(reg);
   3680   __ j(overflow, deferred->entry());
   3681   __ bind(deferred->exit());
   3682 }
   3683 
   3684 
   3685 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
   3686   Label slow;
   3687   Register reg = ToRegister(instr->InputAt(0));
   3688   Register tmp = reg.is(eax) ? ecx : eax;
   3689 
   3690   // Preserve the value of all registers.
   3691   PushSafepointRegistersScope scope(this);
   3692 
   3693   // There was overflow, so bits 30 and 31 of the original integer
   3694   // disagree. Try to allocate a heap number in new space and store
   3695   // the value in there. If that fails, call the runtime system.
   3696   Label done;
   3697   __ SmiUntag(reg);
   3698   __ xor_(reg, 0x80000000);
   3699   __ cvtsi2sd(xmm0, Operand(reg));
   3700   if (FLAG_inline_new) {
   3701     __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
   3702     __ jmp(&done, Label::kNear);
   3703   }
   3704 
   3705   // Slow case: Call the runtime system to do the number allocation.
   3706   __ bind(&slow);
   3707 
   3708   // TODO(3095996): Put a valid pointer value in the stack slot where the result
   3709   // register is stored, as this register is in the pointer map, but contains an
   3710   // integer value.
   3711   __ StoreToSafepointRegisterSlot(reg, Immediate(0));
   3712   // NumberTagI and NumberTagD use the context from the frame, rather than
   3713   // the environment's HContext or HInlinedContext value.
   3714   // They only call Runtime::kAllocateHeapNumber.
   3715   // The corresponding HChange instructions are added in a phase that does
   3716   // not have easy access to the local context.
   3717   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   3718   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   3719   RecordSafepointWithRegisters(
   3720       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   3721   if (!reg.is(eax)) __ mov(reg, eax);
   3722 
   3723   // Done. Put the value in xmm0 into the value of the allocated heap
   3724   // number.
   3725   __ bind(&done);
   3726   __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
   3727   __ StoreToSafepointRegisterSlot(reg, reg);
   3728 }
   3729 
   3730 
   3731 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   3732   class DeferredNumberTagD: public LDeferredCode {
   3733    public:
   3734     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   3735         : LDeferredCode(codegen), instr_(instr) { }
   3736     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
   3737     virtual LInstruction* instr() { return instr_; }
   3738    private:
   3739     LNumberTagD* instr_;
   3740   };
   3741 
   3742   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   3743   Register reg = ToRegister(instr->result());
   3744   Register tmp = ToRegister(instr->TempAt(0));
   3745 
   3746   DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
   3747   if (FLAG_inline_new) {
   3748     __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
   3749   } else {
   3750     __ jmp(deferred->entry());
   3751   }
   3752   __ bind(deferred->exit());
   3753   __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
   3754 }
   3755 
   3756 
   3757 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   3758   // TODO(3095996): Get rid of this. For now, we need to make the
   3759   // result register contain a valid pointer because it is already
   3760   // contained in the register pointer map.
   3761   Register reg = ToRegister(instr->result());
   3762   __ Set(reg, Immediate(0));
   3763 
   3764   PushSafepointRegistersScope scope(this);
   3765   // NumberTagI and NumberTagD use the context from the frame, rather than
   3766   // the environment's HContext or HInlinedContext value.
   3767   // They only call Runtime::kAllocateHeapNumber.
   3768   // The corresponding HChange instructions are added in a phase that does
   3769   // not have easy access to the local context.
   3770   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   3771   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   3772   RecordSafepointWithRegisters(
   3773       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   3774   __ StoreToSafepointRegisterSlot(reg, eax);
   3775 }
   3776 
   3777 
   3778 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   3779   LOperand* input = instr->InputAt(0);
   3780   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   3781   ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
   3782   __ SmiTag(ToRegister(input));
   3783 }
   3784 
   3785 
   3786 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   3787   LOperand* input = instr->InputAt(0);
   3788   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   3789   if (instr->needs_check()) {
   3790     __ test(ToRegister(input), Immediate(kSmiTagMask));
   3791     DeoptimizeIf(not_zero, instr->environment());
   3792   }
   3793   __ SmiUntag(ToRegister(input));
   3794 }
   3795 
   3796 
   3797 void LCodeGen::EmitNumberUntagD(Register input_reg,
   3798                                 Register temp_reg,
   3799                                 XMMRegister result_reg,
   3800                                 bool deoptimize_on_undefined,
   3801                                 bool deoptimize_on_minus_zero,
   3802                                 LEnvironment* env) {
   3803   Label load_smi, done;
   3804 
   3805   // Smi check.
   3806   __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
   3807 
   3808   // Heap number map check.
   3809   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   3810          factory()->heap_number_map());
   3811   if (deoptimize_on_undefined) {
   3812     DeoptimizeIf(not_equal, env);
   3813   } else {
   3814     Label heap_number;
   3815     __ j(equal, &heap_number, Label::kNear);
   3816 
   3817     __ cmp(input_reg, factory()->undefined_value());
   3818     DeoptimizeIf(not_equal, env);
   3819 
   3820     // Convert undefined to NaN.
   3821     ExternalReference nan =
   3822         ExternalReference::address_of_canonical_non_hole_nan();
   3823     __ movdbl(result_reg, Operand::StaticVariable(nan));
   3824     __ jmp(&done, Label::kNear);
   3825 
   3826     __ bind(&heap_number);
   3827   }
   3828   // Heap number to XMM conversion.
   3829   __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
   3830   if (deoptimize_on_minus_zero) {
   3831     XMMRegister xmm_scratch = xmm0;
   3832     __ xorps(xmm_scratch, xmm_scratch);
   3833     __ ucomisd(result_reg, xmm_scratch);
   3834     __ j(not_zero, &done, Label::kNear);
   3835     __ movmskpd(temp_reg, result_reg);
   3836     __ test_b(temp_reg, 1);
   3837     DeoptimizeIf(not_zero, env);
   3838   }
   3839   __ jmp(&done, Label::kNear);
   3840 
   3841   // Smi to XMM conversion
   3842   __ bind(&load_smi);
   3843   __ SmiUntag(input_reg);  // Untag smi before converting to float.
   3844   __ cvtsi2sd(result_reg, Operand(input_reg));
   3845   __ SmiTag(input_reg);  // Retag smi.
   3846   __ bind(&done);
   3847 }
   3848 
   3849 
   3850 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   3851   Label done, heap_number;
   3852   Register input_reg = ToRegister(instr->InputAt(0));
   3853 
   3854   // Heap number map check.
   3855   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   3856          factory()->heap_number_map());
   3857 
   3858   if (instr->truncating()) {
   3859     __ j(equal, &heap_number, Label::kNear);
   3860     // Check for undefined. Undefined is converted to zero for truncating
   3861     // conversions.
   3862     __ cmp(input_reg, factory()->undefined_value());
   3863     DeoptimizeIf(not_equal, instr->environment());
   3864     __ mov(input_reg, 0);
   3865     __ jmp(&done, Label::kNear);
   3866 
   3867     __ bind(&heap_number);
   3868     if (CpuFeatures::IsSupported(SSE3)) {
   3869       CpuFeatures::Scope scope(SSE3);
   3870       Label convert;
   3871       // Use more powerful conversion when sse3 is available.
   3872       // Load x87 register with heap number.
   3873       __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
   3874       // Get exponent alone and check for too-big exponent.
   3875       __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   3876       __ and_(input_reg, HeapNumber::kExponentMask);
   3877       const uint32_t kTooBigExponent =
   3878           (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
   3879       __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
   3880       __ j(less, &convert, Label::kNear);
   3881       // Pop FPU stack before deoptimizing.
   3882       __ fstp(0);
   3883       DeoptimizeIf(no_condition, instr->environment());
   3884 
   3885       // Reserve space for 64 bit answer.
   3886       __ bind(&convert);
   3887       __ sub(Operand(esp), Immediate(kDoubleSize));
   3888       // Do conversion, which cannot fail because we checked the exponent.
   3889       __ fisttp_d(Operand(esp, 0));
   3890       __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
   3891       __ add(Operand(esp), Immediate(kDoubleSize));
   3892     } else {
   3893       XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
   3894       __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   3895       __ cvttsd2si(input_reg, Operand(xmm0));
   3896       __ cmp(input_reg, 0x80000000u);
   3897       __ j(not_equal, &done);
   3898       // Check if the input was 0x8000000 (kMinInt).
   3899       // If no, then we got an overflow and we deoptimize.
   3900       ExternalReference min_int = ExternalReference::address_of_min_int();
   3901       __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
   3902       __ ucomisd(xmm_temp, xmm0);
   3903       DeoptimizeIf(not_equal, instr->environment());
   3904       DeoptimizeIf(parity_even, instr->environment());  // NaN.
   3905     }
   3906   } else {
   3907     // Deoptimize if we don't have a heap number.
   3908     DeoptimizeIf(not_equal, instr->environment());
   3909 
   3910     XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
   3911     __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   3912     __ cvttsd2si(input_reg, Operand(xmm0));
   3913     __ cvtsi2sd(xmm_temp, Operand(input_reg));
   3914     __ ucomisd(xmm0, xmm_temp);
   3915     DeoptimizeIf(not_equal, instr->environment());
   3916     DeoptimizeIf(parity_even, instr->environment());  // NaN.
   3917     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3918       __ test(input_reg, Operand(input_reg));
   3919       __ j(not_zero, &done);
   3920       __ movmskpd(input_reg, xmm0);
   3921       __ and_(input_reg, 1);
   3922       DeoptimizeIf(not_zero, instr->environment());
   3923     }
   3924   }
   3925   __ bind(&done);
   3926 }
   3927 
   3928 
   3929 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   3930   class DeferredTaggedToI: public LDeferredCode {
   3931    public:
   3932     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   3933         : LDeferredCode(codegen), instr_(instr) { }
   3934     virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
   3935     virtual LInstruction* instr() { return instr_; }
   3936    private:
   3937     LTaggedToI* instr_;
   3938   };
   3939 
   3940   LOperand* input = instr->InputAt(0);
   3941   ASSERT(input->IsRegister());
   3942   ASSERT(input->Equals(instr->result()));
   3943 
   3944   Register input_reg = ToRegister(input);
   3945 
   3946   DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
   3947 
   3948   // Smi check.
   3949   __ JumpIfNotSmi(input_reg, deferred->entry());
   3950 
   3951   // Smi to int32 conversion
   3952   __ SmiUntag(input_reg);  // Untag smi.
   3953 
   3954   __ bind(deferred->exit());
   3955 }
   3956 
   3957 
   3958 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   3959   LOperand* input = instr->InputAt(0);
   3960   ASSERT(input->IsRegister());
   3961   LOperand* temp = instr->TempAt(0);
   3962   ASSERT(temp == NULL || temp->IsRegister());
   3963   LOperand* result = instr->result();
   3964   ASSERT(result->IsDoubleRegister());
   3965 
   3966   Register input_reg = ToRegister(input);
   3967   XMMRegister result_reg = ToDoubleRegister(result);
   3968 
   3969   bool deoptimize_on_minus_zero =
   3970       instr->hydrogen()->deoptimize_on_minus_zero();
   3971   Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
   3972 
   3973   EmitNumberUntagD(input_reg,
   3974                    temp_reg,
   3975                    result_reg,
   3976                    instr->hydrogen()->deoptimize_on_undefined(),
   3977                    deoptimize_on_minus_zero,
   3978                    instr->environment());
   3979 }
   3980 
   3981 
   3982 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   3983   LOperand* input = instr->InputAt(0);
   3984   ASSERT(input->IsDoubleRegister());
   3985   LOperand* result = instr->result();
   3986   ASSERT(result->IsRegister());
   3987 
   3988   XMMRegister input_reg = ToDoubleRegister(input);
   3989   Register result_reg = ToRegister(result);
   3990 
   3991   if (instr->truncating()) {
   3992     // Performs a truncating conversion of a floating point number as used by
   3993     // the JS bitwise operations.
   3994     __ cvttsd2si(result_reg, Operand(input_reg));
   3995     __ cmp(result_reg, 0x80000000u);
   3996     if (CpuFeatures::IsSupported(SSE3)) {
   3997       // This will deoptimize if the exponent of the input in out of range.
   3998       CpuFeatures::Scope scope(SSE3);
   3999       Label convert, done;
   4000       __ j(not_equal, &done, Label::kNear);
   4001       __ sub(Operand(esp), Immediate(kDoubleSize));
   4002       __ movdbl(Operand(esp, 0), input_reg);
   4003       // Get exponent alone and check for too-big exponent.
   4004       __ mov(result_reg, Operand(esp, sizeof(int32_t)));
   4005       __ and_(result_reg, HeapNumber::kExponentMask);
   4006       const uint32_t kTooBigExponent =
   4007           (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
   4008       __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
   4009       __ j(less, &convert, Label::kNear);
   4010       __ add(Operand(esp), Immediate(kDoubleSize));
   4011       DeoptimizeIf(no_condition, instr->environment());
   4012       __ bind(&convert);
   4013       // Do conversion, which cannot fail because we checked the exponent.
   4014       __ fld_d(Operand(esp, 0));
   4015       __ fisttp_d(Operand(esp, 0));
   4016       __ mov(result_reg, Operand(esp, 0));  // Low word of answer is the result.
   4017       __ add(Operand(esp), Immediate(kDoubleSize));
   4018       __ bind(&done);
   4019     } else {
   4020       Label done;
   4021       Register temp_reg = ToRegister(instr->TempAt(0));
   4022       XMMRegister xmm_scratch = xmm0;
   4023 
   4024       // If cvttsd2si succeeded, we're done. Otherwise, we attempt
   4025       // manual conversion.
   4026       __ j(not_equal, &done, Label::kNear);
   4027 
   4028       // Get high 32 bits of the input in result_reg and temp_reg.
   4029       __ pshufd(xmm_scratch, input_reg, 1);
   4030       __ movd(Operand(temp_reg), xmm_scratch);
   4031       __ mov(result_reg, temp_reg);
   4032 
   4033       // Prepare negation mask in temp_reg.
   4034       __ sar(temp_reg, kBitsPerInt - 1);
   4035 
   4036       // Extract the exponent from result_reg and subtract adjusted
   4037       // bias from it. The adjustment is selected in a way such that
   4038       // when the difference is zero, the answer is in the low 32 bits
   4039       // of the input, otherwise a shift has to be performed.
   4040       __ shr(result_reg, HeapNumber::kExponentShift);
   4041       __ and_(result_reg,
   4042               HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
   4043       __ sub(Operand(result_reg),
   4044              Immediate(HeapNumber::kExponentBias +
   4045                        HeapNumber::kExponentBits +
   4046                        HeapNumber::kMantissaBits));
   4047       // Don't handle big (> kMantissaBits + kExponentBits == 63) or
   4048       // special exponents.
   4049       DeoptimizeIf(greater, instr->environment());
   4050 
   4051       // Zero out the sign and the exponent in the input (by shifting
   4052       // it to the left) and restore the implicit mantissa bit,
   4053       // i.e. convert the input to unsigned int64 shifted left by
   4054       // kExponentBits.
   4055       ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
   4056       // Minus zero has the most significant bit set and the other
   4057       // bits cleared.
   4058       __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
   4059       __ psllq(input_reg, HeapNumber::kExponentBits);
   4060       __ por(input_reg, xmm_scratch);
   4061 
   4062       // Get the amount to shift the input right in xmm_scratch.
   4063       __ neg(result_reg);
   4064       __ movd(xmm_scratch, Operand(result_reg));
   4065 
   4066       // Shift the input right and extract low 32 bits.
   4067       __ psrlq(input_reg, xmm_scratch);
   4068       __ movd(Operand(result_reg), input_reg);
   4069 
   4070       // Use the prepared mask in temp_reg to negate the result if necessary.
   4071       __ xor_(result_reg, Operand(temp_reg));
   4072       __ sub(result_reg, Operand(temp_reg));
   4073       __ bind(&done);
   4074     }
   4075   } else {
   4076     Label done;
   4077     __ cvttsd2si(result_reg, Operand(input_reg));
   4078     __ cvtsi2sd(xmm0, Operand(result_reg));
   4079     __ ucomisd(xmm0, input_reg);
   4080     DeoptimizeIf(not_equal, instr->environment());
   4081     DeoptimizeIf(parity_even, instr->environment());  // NaN.
   4082     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4083       // The integer converted back is equal to the original. We
   4084       // only have to test if we got -0 as an input.
   4085       __ test(result_reg, Operand(result_reg));
   4086       __ j(not_zero, &done, Label::kNear);
   4087       __ movmskpd(result_reg, input_reg);
   4088       // Bit 0 contains the sign of the double in input_reg.
   4089       // If input was positive, we are ok and return 0, otherwise
   4090       // deoptimize.
   4091       __ and_(result_reg, 1);
   4092       DeoptimizeIf(not_zero, instr->environment());
   4093     }
   4094     __ bind(&done);
   4095   }
   4096 }
   4097 
   4098 
   4099 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   4100   LOperand* input = instr->InputAt(0);
   4101   __ test(ToOperand(input), Immediate(kSmiTagMask));
   4102   DeoptimizeIf(not_zero, instr->environment());
   4103 }
   4104 
   4105 
   4106 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   4107   LOperand* input = instr->InputAt(0);
   4108   __ test(ToOperand(input), Immediate(kSmiTagMask));
   4109   DeoptimizeIf(zero, instr->environment());
   4110 }
   4111 
   4112 
   4113 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   4114   Register input = ToRegister(instr->InputAt(0));
   4115   Register temp = ToRegister(instr->TempAt(0));
   4116 
   4117   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   4118 
   4119   if (instr->hydrogen()->is_interval_check()) {
   4120     InstanceType first;
   4121     InstanceType last;
   4122     instr->hydrogen()->GetCheckInterval(&first, &last);
   4123 
   4124     __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
   4125             static_cast<int8_t>(first));
   4126 
   4127     // If there is only one type in the interval check for equality.
   4128     if (first == last) {
   4129       DeoptimizeIf(not_equal, instr->environment());
   4130     } else {
   4131       DeoptimizeIf(below, instr->environment());
   4132       // Omit check for the last type.
   4133       if (last != LAST_TYPE) {
   4134         __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
   4135                 static_cast<int8_t>(last));
   4136         DeoptimizeIf(above, instr->environment());
   4137       }
   4138     }
   4139   } else {
   4140     uint8_t mask;
   4141     uint8_t tag;
   4142     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   4143 
   4144     if (IsPowerOf2(mask)) {
   4145       ASSERT(tag == 0 || IsPowerOf2(tag));
   4146       __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
   4147       DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
   4148     } else {
   4149       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
   4150       __ and_(temp, mask);
   4151       __ cmp(temp, tag);
   4152       DeoptimizeIf(not_equal, instr->environment());
   4153     }
   4154   }
   4155 }
   4156 
   4157 
   4158 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
   4159   Handle<JSFunction> target = instr->hydrogen()->target();
   4160   if (isolate()->heap()->InNewSpace(*target)) {
   4161     Register reg = ToRegister(instr->value());
   4162     Handle<JSGlobalPropertyCell> cell =
   4163         isolate()->factory()->NewJSGlobalPropertyCell(target);
   4164     __ cmp(reg, Operand::Cell(cell));
   4165   } else {
   4166     Operand operand = ToOperand(instr->value());
   4167     __ cmp(operand, target);
   4168   }
   4169   DeoptimizeIf(not_equal, instr->environment());
   4170 }
   4171 
   4172 
   4173 void LCodeGen::DoCheckMapCommon(Register reg,
   4174                                 Handle<Map> map,
   4175                                 CompareMapMode mode,
   4176                                 LEnvironment* env) {
   4177   Label success;
   4178   __ CompareMap(reg, map, &success, mode);
   4179   DeoptimizeIf(not_equal, env);
   4180   __ bind(&success);
   4181 }
   4182 
   4183 
   4184 void LCodeGen::DoCheckMap(LCheckMap* instr) {
   4185   LOperand* input = instr->InputAt(0);
   4186   ASSERT(input->IsRegister());
   4187   Register reg = ToRegister(input);
   4188   Handle<Map> map = instr->hydrogen()->map();
   4189   DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
   4190 }
   4191 
   4192 
   4193 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   4194   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
   4195   Register result_reg = ToRegister(instr->result());
   4196   __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
   4197 }
   4198 
   4199 
   4200 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   4201   ASSERT(instr->unclamped()->Equals(instr->result()));
   4202   Register value_reg = ToRegister(instr->result());
   4203   __ ClampUint8(value_reg);
   4204 }
   4205 
   4206 
   4207 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   4208   ASSERT(instr->unclamped()->Equals(instr->result()));
   4209   Register input_reg = ToRegister(instr->unclamped());
   4210   Label is_smi, done, heap_number;
   4211 
   4212   __ JumpIfSmi(input_reg, &is_smi);
   4213 
   4214   // Check for heap number
   4215   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   4216          factory()->heap_number_map());
   4217   __ j(equal, &heap_number, Label::kNear);
   4218 
   4219   // Check for undefined. Undefined is converted to zero for clamping
   4220   // conversions.
   4221   __ cmp(input_reg, factory()->undefined_value());
   4222   DeoptimizeIf(not_equal, instr->environment());
   4223   __ mov(input_reg, 0);
   4224   __ jmp(&done, Label::kNear);
   4225 
   4226   // Heap number
   4227   __ bind(&heap_number);
   4228   __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4229   __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
   4230   __ jmp(&done, Label::kNear);
   4231 
   4232   // smi
   4233   __ bind(&is_smi);
   4234   __ SmiUntag(input_reg);
   4235   __ ClampUint8(input_reg);
   4236 
   4237   __ bind(&done);
   4238 }
   4239 
   4240 
   4241 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   4242   Register reg = ToRegister(instr->TempAt(0));
   4243 
   4244   Handle<JSObject> holder = instr->holder();
   4245   Handle<JSObject> current_prototype = instr->prototype();
   4246 
   4247   // Load prototype object.
   4248   __ LoadHeapObject(reg, current_prototype);
   4249 
   4250   // Check prototype maps up to the holder.
   4251   while (!current_prototype.is_identical_to(holder)) {
   4252     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
   4253                      ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
   4254 
   4255     current_prototype =
   4256         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
   4257     // Load next prototype object.
   4258     __ LoadHeapObject(reg, current_prototype);
   4259   }
   4260 
   4261   // Check the holder map.
   4262   DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
   4263                    ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
   4264 }
   4265 
   4266 
   4267 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
   4268   class DeferredAllocateObject: public LDeferredCode {
   4269    public:
   4270     DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
   4271         : LDeferredCode(codegen), instr_(instr) { }
   4272     virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
   4273     virtual LInstruction* instr() { return instr_; }
   4274    private:
   4275     LAllocateObject* instr_;
   4276   };
   4277 
   4278   DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
   4279 
   4280   Register result = ToRegister(instr->result());
   4281   Register scratch = ToRegister(instr->TempAt(0));
   4282   Handle<JSFunction> constructor = instr->hydrogen()->constructor();
   4283   Handle<Map> initial_map(constructor->initial_map());
   4284   int instance_size = initial_map->instance_size();
   4285   ASSERT(initial_map->pre_allocated_property_fields() +
   4286          initial_map->unused_property_fields() -
   4287          initial_map->inobject_properties() == 0);
   4288 
   4289   // Allocate memory for the object.  The initial map might change when
   4290   // the constructor's prototype changes, but instance size and property
   4291   // counts remain unchanged (if slack tracking finished).
   4292   ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
   4293   __ AllocateInNewSpace(instance_size,
   4294                         result,
   4295                         no_reg,
   4296                         scratch,
   4297                         deferred->entry(),
   4298                         TAG_OBJECT);
   4299 
   4300   // Load the initial map.
   4301   Register map = scratch;
   4302   __ LoadHeapObject(scratch, constructor);
   4303   __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
   4304 
   4305   if (FLAG_debug_code) {
   4306     __ AbortIfSmi(map);
   4307     __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
   4308             instance_size >> kPointerSizeLog2);
   4309     __ Assert(equal, "Unexpected instance size");
   4310     __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
   4311             initial_map->pre_allocated_property_fields());
   4312     __ Assert(equal, "Unexpected pre-allocated property fields count");
   4313     __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
   4314             initial_map->unused_property_fields());
   4315     __ Assert(equal, "Unexpected unused property fields count");
   4316     __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
   4317             initial_map->inobject_properties());
   4318     __ Assert(equal, "Unexpected in-object property fields count");
   4319   }
   4320 
   4321   // Initialize map and fields of the newly allocated object.
   4322   ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
   4323   __ mov(FieldOperand(result, JSObject::kMapOffset), map);
   4324   __ mov(scratch, factory()->empty_fixed_array());
   4325   __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
   4326   __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
   4327   if (initial_map->inobject_properties() != 0) {
   4328     __ mov(scratch, factory()->undefined_value());
   4329     for (int i = 0; i < initial_map->inobject_properties(); i++) {
   4330       int property_offset = JSObject::kHeaderSize + i * kPointerSize;
   4331       __ mov(FieldOperand(result, property_offset), scratch);
   4332     }
   4333   }
   4334 
   4335   __ bind(deferred->exit());
   4336 }
   4337 
   4338 
   4339 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
   4340   Register result = ToRegister(instr->result());
   4341   Handle<JSFunction> constructor = instr->hydrogen()->constructor();
   4342 
   4343   // TODO(3095996): Get rid of this. For now, we need to make the
   4344   // result register contain a valid pointer because it is already
   4345   // contained in the register pointer map.
   4346   __ Set(result, Immediate(0));
   4347 
   4348   PushSafepointRegistersScope scope(this);
   4349   __ PushHeapObject(constructor);
   4350   CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr, instr->context());
   4351   __ StoreToSafepointRegisterSlot(result, eax);
   4352 }
   4353 
   4354 
   4355 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
   4356   ASSERT(ToRegister(instr->context()).is(esi));
   4357   Heap* heap = isolate()->heap();
   4358   ElementsKind boilerplate_elements_kind =
   4359       instr->hydrogen()->boilerplate_elements_kind();
   4360 
   4361   // Deopt if the array literal boilerplate ElementsKind is of a type different
   4362   // than the expected one. The check isn't necessary if the boilerplate has
   4363   // already been converted to FAST_ELEMENTS.
   4364   if (boilerplate_elements_kind != FAST_ELEMENTS) {
   4365     __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
   4366     __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   4367     // Load the map's "bit field 2". We only need the first byte,
   4368     // but the following masking takes care of that anyway.
   4369     __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
   4370     // Retrieve elements_kind from bit field 2.
   4371     __ and_(ebx, Map::kElementsKindMask);
   4372     __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
   4373     DeoptimizeIf(not_equal, instr->environment());
   4374   }
   4375 
   4376   // Set up the parameters to the stub/runtime call.
   4377   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   4378   __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
   4379   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   4380   // Boilerplate already exists, constant elements are never accessed.
   4381   // Pass an empty fixed array.
   4382   __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
   4383 
   4384   // Pick the right runtime function or stub to call.
   4385   int length = instr->hydrogen()->length();
   4386   if (instr->hydrogen()->IsCopyOnWrite()) {
   4387     ASSERT(instr->hydrogen()->depth() == 1);
   4388     FastCloneShallowArrayStub::Mode mode =
   4389         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
   4390     FastCloneShallowArrayStub stub(mode, length);
   4391     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4392   } else if (instr->hydrogen()->depth() > 1) {
   4393     CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
   4394   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
   4395     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   4396   } else {
   4397     FastCloneShallowArrayStub::Mode mode =
   4398         boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
   4399             ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
   4400             : FastCloneShallowArrayStub::CLONE_ELEMENTS;
   4401     FastCloneShallowArrayStub stub(mode, length);
   4402     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4403   }
   4404 }
   4405 
   4406 
   4407 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
   4408                             Register result,
   4409                             Register source,
   4410                             int* offset) {
   4411   ASSERT(!source.is(ecx));
   4412   ASSERT(!result.is(ecx));
   4413 
   4414   if (FLAG_debug_code) {
   4415     __ LoadHeapObject(ecx, object);
   4416     __ cmp(source, ecx);
   4417     __ Assert(equal, "Unexpected object literal boilerplate");
   4418   }
   4419 
   4420   // Only elements backing stores for non-COW arrays need to be copied.
   4421   Handle<FixedArrayBase> elements(object->elements());
   4422   bool has_elements = elements->length() > 0 &&
   4423       elements->map() != isolate()->heap()->fixed_cow_array_map();
   4424 
   4425   // Increase the offset so that subsequent objects end up right after
   4426   // this object and its backing store.
   4427   int object_offset = *offset;
   4428   int object_size = object->map()->instance_size();
   4429   int elements_offset = *offset + object_size;
   4430   int elements_size = has_elements ? elements->Size() : 0;
   4431   *offset += object_size + elements_size;
   4432 
   4433   // Copy object header.
   4434   ASSERT(object->properties()->length() == 0);
   4435   int inobject_properties = object->map()->inobject_properties();
   4436   int header_size = object_size - inobject_properties * kPointerSize;
   4437   for (int i = 0; i < header_size; i += kPointerSize) {
   4438     if (has_elements && i == JSObject::kElementsOffset) {
   4439       __ lea(ecx, Operand(result, elements_offset));
   4440     } else {
   4441       __ mov(ecx, FieldOperand(source, i));
   4442     }
   4443     __ mov(FieldOperand(result, object_offset + i), ecx);
   4444   }
   4445 
   4446   // Copy in-object properties.
   4447   for (int i = 0; i < inobject_properties; i++) {
   4448     int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
   4449     Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
   4450     if (value->IsJSObject()) {
   4451       Handle<JSObject> value_object = Handle<JSObject>::cast(value);
   4452       __ lea(ecx, Operand(result, *offset));
   4453       __ mov(FieldOperand(result, total_offset), ecx);
   4454       __ LoadHeapObject(source, value_object);
   4455       EmitDeepCopy(value_object, result, source, offset);
   4456     } else if (value->IsHeapObject()) {
   4457       __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
   4458       __ mov(FieldOperand(result, total_offset), ecx);
   4459     } else {
   4460       __ mov(FieldOperand(result, total_offset), Immediate(value));
   4461     }
   4462   }
   4463 
   4464   if (has_elements) {
   4465     // Copy elements backing store header.
   4466     __ LoadHeapObject(source, elements);
   4467     for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
   4468       __ mov(ecx, FieldOperand(source, i));
   4469       __ mov(FieldOperand(result, elements_offset + i), ecx);
   4470     }
   4471 
   4472     // Copy elements backing store content.
   4473     int elements_length = elements->length();
   4474     if (elements->IsFixedDoubleArray()) {
   4475       Handle<FixedDoubleArray> double_array =
   4476           Handle<FixedDoubleArray>::cast(elements);
   4477       for (int i = 0; i < elements_length; i++) {
   4478         int64_t value = double_array->get_representation(i);
   4479         int32_t value_low = value & 0xFFFFFFFF;
   4480         int32_t value_high = value >> 32;
   4481         int total_offset =
   4482             elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
   4483         __ mov(FieldOperand(result, total_offset), Immediate(value_low));
   4484         __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
   4485       }
   4486     } else if (elements->IsFixedArray()) {
   4487       for (int i = 0; i < elements_length; i++) {
   4488         int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
   4489         Handle<Object> value = JSObject::GetElement(object, i);
   4490         if (value->IsJSObject()) {
   4491           Handle<JSObject> value_object = Handle<JSObject>::cast(value);
   4492           __ lea(ecx, Operand(result, *offset));
   4493           __ mov(FieldOperand(result, total_offset), ecx);
   4494           __ LoadHeapObject(source, value_object);
   4495           EmitDeepCopy(value_object, result, source, offset);
   4496         } else if (value->IsHeapObject()) {
   4497           __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
   4498           __ mov(FieldOperand(result, total_offset), ecx);
   4499         } else {
   4500           __ mov(FieldOperand(result, total_offset), Immediate(value));
   4501         }
   4502       }
   4503     } else {
   4504       UNREACHABLE();
   4505     }
   4506   }
   4507 }
   4508 
   4509 
   4510 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
   4511   ASSERT(ToRegister(instr->context()).is(esi));
   4512   int size = instr->hydrogen()->total_size();
   4513 
   4514   // Allocate all objects that are part of the literal in one big
   4515   // allocation. This avoids multiple limit checks.
   4516   Label allocated, runtime_allocate;
   4517   __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
   4518   __ jmp(&allocated);
   4519 
   4520   __ bind(&runtime_allocate);
   4521   __ push(Immediate(Smi::FromInt(size)));
   4522   CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   4523 
   4524   __ bind(&allocated);
   4525   int offset = 0;
   4526   __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
   4527   EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
   4528   ASSERT_EQ(size, offset);
   4529 }
   4530 
   4531 
   4532 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
   4533   ASSERT(ToRegister(instr->context()).is(esi));
   4534   Handle<FixedArray> literals(instr->environment()->closure()->literals());
   4535   Handle<FixedArray> constant_properties =
   4536       instr->hydrogen()->constant_properties();
   4537 
   4538   // Set up the parameters to the stub/runtime call.
   4539   __ PushHeapObject(literals);
   4540   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   4541   __ push(Immediate(constant_properties));
   4542   int flags = instr->hydrogen()->fast_elements()
   4543       ? ObjectLiteral::kFastElements
   4544       : ObjectLiteral::kNoFlags;
   4545   flags |= instr->hydrogen()->has_function()
   4546       ? ObjectLiteral::kHasFunction
   4547       : ObjectLiteral::kNoFlags;
   4548   __ push(Immediate(Smi::FromInt(flags)));
   4549 
   4550   // Pick the right runtime function or stub to call.
   4551   int properties_count = constant_properties->length() / 2;
   4552   if (instr->hydrogen()->depth() > 1) {
   4553     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
   4554   } else if (flags != ObjectLiteral::kFastElements ||
   4555       properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
   4556     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
   4557   } else {
   4558     FastCloneShallowObjectStub stub(properties_count);
   4559     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4560   }
   4561 }
   4562 
   4563 
   4564 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   4565   ASSERT(ToRegister(instr->InputAt(0)).is(eax));
   4566   __ push(eax);
   4567   CallRuntime(Runtime::kToFastProperties, 1, instr);
   4568 }
   4569 
   4570 
   4571 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
   4572   ASSERT(ToRegister(instr->context()).is(esi));
   4573   Label materialized;
   4574   // Registers will be used as follows:
   4575   // edi = JS function.
   4576   // ecx = literals array.
   4577   // ebx = regexp literal.
   4578   // eax = regexp literal clone.
   4579   // esi = context.
   4580   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   4581   __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
   4582   int literal_offset = FixedArray::kHeaderSize +
   4583       instr->hydrogen()->literal_index() * kPointerSize;
   4584   __ mov(ebx, FieldOperand(ecx, literal_offset));
   4585   __ cmp(ebx, factory()->undefined_value());
   4586   __ j(not_equal, &materialized, Label::kNear);
   4587 
   4588   // Create regexp literal using runtime function
   4589   // Result will be in eax.
   4590   __ push(ecx);
   4591   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   4592   __ push(Immediate(instr->hydrogen()->pattern()));
   4593   __ push(Immediate(instr->hydrogen()->flags()));
   4594   CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   4595   __ mov(ebx, eax);
   4596 
   4597   __ bind(&materialized);
   4598   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
   4599   Label allocated, runtime_allocate;
   4600   __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
   4601   __ jmp(&allocated);
   4602 
   4603   __ bind(&runtime_allocate);
   4604   __ push(ebx);
   4605   __ push(Immediate(Smi::FromInt(size)));
   4606   CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   4607   __ pop(ebx);
   4608 
   4609   __ bind(&allocated);
   4610   // Copy the content into the newly allocated memory.
   4611   // (Unroll copy loop once for better throughput).
   4612   for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
   4613     __ mov(edx, FieldOperand(ebx, i));
   4614     __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
   4615     __ mov(FieldOperand(eax, i), edx);
   4616     __ mov(FieldOperand(eax, i + kPointerSize), ecx);
   4617   }
   4618   if ((size % (2 * kPointerSize)) != 0) {
   4619     __ mov(edx, FieldOperand(ebx, size - kPointerSize));
   4620     __ mov(FieldOperand(eax, size - kPointerSize), edx);
   4621   }
   4622 }
   4623 
   4624 
   4625 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
   4626   ASSERT(ToRegister(instr->context()).is(esi));
   4627   // Use the fast case closure allocation code that allocates in new
   4628   // space for nested functions that don't need literals cloning.
   4629   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   4630   bool pretenure = instr->hydrogen()->pretenure();
   4631   if (!pretenure && shared_info->num_literals() == 0) {
   4632     FastNewClosureStub stub(shared_info->language_mode());
   4633     __ push(Immediate(shared_info));
   4634     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4635   } else {
   4636     __ push(esi);
   4637     __ push(Immediate(shared_info));
   4638     __ push(Immediate(pretenure
   4639                       ? factory()->true_value()
   4640                       : factory()->false_value()));
   4641     CallRuntime(Runtime::kNewClosure, 3, instr);
   4642   }
   4643 }
   4644 
   4645 
   4646 void LCodeGen::DoTypeof(LTypeof* instr) {
   4647   LOperand* input = instr->InputAt(1);
   4648   EmitPushTaggedOperand(input);
   4649   CallRuntime(Runtime::kTypeof, 1, instr);
   4650 }
   4651 
   4652 
   4653 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   4654   Register input = ToRegister(instr->InputAt(0));
   4655   int true_block = chunk_->LookupDestination(instr->true_block_id());
   4656   int false_block = chunk_->LookupDestination(instr->false_block_id());
   4657   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   4658   Label* false_label = chunk_->GetAssemblyLabel(false_block);
   4659 
   4660   Condition final_branch_condition =
   4661       EmitTypeofIs(true_label, false_label, input, instr->type_literal());
   4662   if (final_branch_condition != no_condition) {
   4663     EmitBranch(true_block, false_block, final_branch_condition);
   4664   }
   4665 }
   4666 
   4667 
   4668 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   4669                                  Label* false_label,
   4670                                  Register input,
   4671                                  Handle<String> type_name) {
   4672   Condition final_branch_condition = no_condition;
   4673   if (type_name->Equals(heap()->number_symbol())) {
   4674     __ JumpIfSmi(input, true_label);
   4675     __ cmp(FieldOperand(input, HeapObject::kMapOffset),
   4676            factory()->heap_number_map());
   4677     final_branch_condition = equal;
   4678 
   4679   } else if (type_name->Equals(heap()->string_symbol())) {
   4680     __ JumpIfSmi(input, false_label);
   4681     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
   4682     __ j(above_equal, false_label);
   4683     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
   4684               1 << Map::kIsUndetectable);
   4685     final_branch_condition = zero;
   4686 
   4687   } else if (type_name->Equals(heap()->boolean_symbol())) {
   4688     __ cmp(input, factory()->true_value());
   4689     __ j(equal, true_label);
   4690     __ cmp(input, factory()->false_value());
   4691     final_branch_condition = equal;
   4692 
   4693   } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
   4694     __ cmp(input, factory()->null_value());
   4695     final_branch_condition = equal;
   4696 
   4697   } else if (type_name->Equals(heap()->undefined_symbol())) {
   4698     __ cmp(input, factory()->undefined_value());
   4699     __ j(equal, true_label);
   4700     __ JumpIfSmi(input, false_label);
   4701     // Check for undetectable objects => true.
   4702     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
   4703     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
   4704               1 << Map::kIsUndetectable);
   4705     final_branch_condition = not_zero;
   4706 
   4707   } else if (type_name->Equals(heap()->function_symbol())) {
   4708     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   4709     __ JumpIfSmi(input, false_label);
   4710     __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
   4711     __ j(equal, true_label);
   4712     __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
   4713     final_branch_condition = equal;
   4714 
   4715   } else if (type_name->Equals(heap()->object_symbol())) {
   4716     __ JumpIfSmi(input, false_label);
   4717     if (!FLAG_harmony_typeof) {
   4718       __ cmp(input, factory()->null_value());
   4719       __ j(equal, true_label);
   4720     }
   4721     __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
   4722     __ j(below, false_label);
   4723     __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
   4724     __ j(above, false_label);
   4725     // Check for undetectable objects => false.
   4726     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
   4727               1 << Map::kIsUndetectable);
   4728     final_branch_condition = zero;
   4729 
   4730   } else {
   4731     __ jmp(false_label);
   4732   }
   4733   return final_branch_condition;
   4734 }
   4735 
   4736 
   4737 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   4738   Register temp = ToRegister(instr->TempAt(0));
   4739   int true_block = chunk_->LookupDestination(instr->true_block_id());
   4740   int false_block = chunk_->LookupDestination(instr->false_block_id());
   4741 
   4742   EmitIsConstructCall(temp);
   4743   EmitBranch(true_block, false_block, equal);
   4744 }
   4745 
   4746 
   4747 void LCodeGen::EmitIsConstructCall(Register temp) {
   4748   // Get the frame pointer for the calling frame.
   4749   __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   4750 
   4751   // Skip the arguments adaptor frame if it exists.
   4752   Label check_frame_marker;
   4753   __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
   4754          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   4755   __ j(not_equal, &check_frame_marker, Label::kNear);
   4756   __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
   4757 
   4758   // Check the marker in the calling frame.
   4759   __ bind(&check_frame_marker);
   4760   __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
   4761          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
   4762 }
   4763 
   4764 
   4765 void LCodeGen::EnsureSpaceForLazyDeopt() {
   4766   // Ensure that we have enough space after the previous lazy-bailout
   4767   // instruction for patching the code here.
   4768   int current_pc = masm()->pc_offset();
   4769   int patch_size = Deoptimizer::patch_size();
   4770   if (current_pc < last_lazy_deopt_pc_ + patch_size) {
   4771     int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
   4772     __ Nop(padding_size);
   4773   }
   4774   last_lazy_deopt_pc_ = masm()->pc_offset();
   4775 }
   4776 
   4777 
   4778 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   4779   EnsureSpaceForLazyDeopt();
   4780   ASSERT(instr->HasEnvironment());
   4781   LEnvironment* env = instr->environment();
   4782   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   4783   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   4784 }
   4785 
   4786 
   4787 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   4788   DeoptimizeIf(no_condition, instr->environment());
   4789 }
   4790 
   4791 
   4792 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   4793   LOperand* obj = instr->object();
   4794   LOperand* key = instr->key();
   4795   __ push(ToOperand(obj));
   4796   EmitPushTaggedOperand(key);
   4797   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   4798   LPointerMap* pointers = instr->pointer_map();
   4799   RecordPosition(pointers->position());
   4800   // Create safepoint generator that will also ensure enough space in the
   4801   // reloc info for patching in deoptimization (since this is invoking a
   4802   // builtin)
   4803   SafepointGenerator safepoint_generator(
   4804       this, pointers, Safepoint::kLazyDeopt);
   4805   __ push(Immediate(Smi::FromInt(strict_mode_flag())));
   4806   __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
   4807 }
   4808 
   4809 
   4810 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   4811   PushSafepointRegistersScope scope(this);
   4812   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   4813   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   4814   RecordSafepointWithLazyDeopt(
   4815       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4816   ASSERT(instr->HasEnvironment());
   4817   LEnvironment* env = instr->environment();
   4818   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   4819 }
   4820 
   4821 
   4822 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   4823   class DeferredStackCheck: public LDeferredCode {
   4824    public:
   4825     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   4826         : LDeferredCode(codegen), instr_(instr) { }
   4827     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
   4828     virtual LInstruction* instr() { return instr_; }
   4829    private:
   4830     LStackCheck* instr_;
   4831   };
   4832 
   4833   ASSERT(instr->HasEnvironment());
   4834   LEnvironment* env = instr->environment();
   4835   // There is no LLazyBailout instruction for stack-checks. We have to
   4836   // prepare for lazy deoptimization explicitly here.
   4837   if (instr->hydrogen()->is_function_entry()) {
   4838     // Perform stack overflow check.
   4839     Label done;
   4840     ExternalReference stack_limit =
   4841         ExternalReference::address_of_stack_limit(isolate());
   4842     __ cmp(esp, Operand::StaticVariable(stack_limit));
   4843     __ j(above_equal, &done, Label::kNear);
   4844 
   4845     ASSERT(instr->context()->IsRegister());
   4846     ASSERT(ToRegister(instr->context()).is(esi));
   4847     StackCheckStub stub;
   4848     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4849     EnsureSpaceForLazyDeopt();
   4850     __ bind(&done);
   4851     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   4852     safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   4853   } else {
   4854     ASSERT(instr->hydrogen()->is_backwards_branch());
   4855     // Perform stack overflow check if this goto needs it before jumping.
   4856     DeferredStackCheck* deferred_stack_check =
   4857         new DeferredStackCheck(this, instr);
   4858     ExternalReference stack_limit =
   4859         ExternalReference::address_of_stack_limit(isolate());
   4860     __ cmp(esp, Operand::StaticVariable(stack_limit));
   4861     __ j(below, deferred_stack_check->entry());
   4862     EnsureSpaceForLazyDeopt();
   4863     __ bind(instr->done_label());
   4864     deferred_stack_check->SetExit(instr->done_label());
   4865     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   4866     // Don't record a deoptimization index for the safepoint here.
   4867     // This will be done explicitly when emitting call and the safepoint in
   4868     // the deferred code.
   4869   }
   4870 }
   4871 
   4872 
   4873 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   4874   // This is a pseudo-instruction that ensures that the environment here is
   4875   // properly registered for deoptimization and records the assembler's PC
   4876   // offset.
   4877   LEnvironment* environment = instr->environment();
   4878   environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
   4879                                    instr->SpilledDoubleRegisterArray());
   4880 
   4881   // If the environment were already registered, we would have no way of
   4882   // backpatching it with the spill slot operands.
   4883   ASSERT(!environment->HasBeenRegistered());
   4884   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   4885   ASSERT(osr_pc_offset_ == -1);
   4886   osr_pc_offset_ = masm()->pc_offset();
   4887 }
   4888 
   4889 
   4890 void LCodeGen::DoIn(LIn* instr) {
   4891   LOperand* obj = instr->object();
   4892   LOperand* key = instr->key();
   4893   EmitPushTaggedOperand(key);
   4894   EmitPushTaggedOperand(obj);
   4895   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   4896   LPointerMap* pointers = instr->pointer_map();
   4897   RecordPosition(pointers->position());
   4898   SafepointGenerator safepoint_generator(
   4899       this, pointers, Safepoint::kLazyDeopt);
   4900   __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
   4901 }
   4902 
   4903 
   4904 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   4905   __ cmp(eax, isolate()->factory()->undefined_value());
   4906   DeoptimizeIf(equal, instr->environment());
   4907 
   4908   __ cmp(eax, isolate()->factory()->null_value());
   4909   DeoptimizeIf(equal, instr->environment());
   4910 
   4911   __ test(eax, Immediate(kSmiTagMask));
   4912   DeoptimizeIf(zero, instr->environment());
   4913 
   4914   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   4915   __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
   4916   DeoptimizeIf(below_equal, instr->environment());
   4917 
   4918   Label use_cache, call_runtime;
   4919   __ CheckEnumCache(&call_runtime);
   4920 
   4921   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
   4922   __ jmp(&use_cache, Label::kNear);
   4923 
   4924   // Get the set of properties to enumerate.
   4925   __ bind(&call_runtime);
   4926   __ push(eax);
   4927   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
   4928 
   4929   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
   4930          isolate()->factory()->meta_map());
   4931   DeoptimizeIf(not_equal, instr->environment());
   4932   __ bind(&use_cache);
   4933 }
   4934 
   4935 
   4936 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   4937   Register map = ToRegister(instr->map());
   4938   Register result = ToRegister(instr->result());
   4939   __ LoadInstanceDescriptors(map, result);
   4940   __ mov(result,
   4941          FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
   4942   __ mov(result,
   4943          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   4944   __ test(result, result);
   4945   DeoptimizeIf(equal, instr->environment());
   4946 }
   4947 
   4948 
   4949 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   4950   Register object = ToRegister(instr->value());
   4951   __ cmp(ToRegister(instr->map()),
   4952          FieldOperand(object, HeapObject::kMapOffset));
   4953   DeoptimizeIf(not_equal, instr->environment());
   4954 }
   4955 
   4956 
   4957 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   4958   Register object = ToRegister(instr->object());
   4959   Register index = ToRegister(instr->index());
   4960 
   4961   Label out_of_object, done;
   4962   __ cmp(index, Immediate(0));
   4963   __ j(less, &out_of_object);
   4964   __ mov(object, FieldOperand(object,
   4965                               index,
   4966                               times_half_pointer_size,
   4967                               JSObject::kHeaderSize));
   4968   __ jmp(&done, Label::kNear);
   4969 
   4970   __ bind(&out_of_object);
   4971   __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
   4972   __ neg(index);
   4973   // Index is now equal to out of object property index plus 1.
   4974   __ mov(object, FieldOperand(object,
   4975                               index,
   4976                               times_half_pointer_size,
   4977                               FixedArray::kHeaderSize - kPointerSize));
   4978   __ bind(&done);
   4979 }
   4980 
   4981 
   4982 #undef __
   4983 
   4984 } }  // namespace v8::internal
   4985 
   4986 #endif  // V8_TARGET_ARCH_IA32
   4987