Home | History | Annotate | Download | only in x64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if V8_TARGET_ARCH_X64
     31 
     32 #include "x64/lithium-codegen-x64.h"
     33 #include "code-stubs.h"
     34 #include "stub-cache.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 
     40 // When invoking builtins, we need to record the safepoint in the middle of
     41 // the invoke instruction sequence generated by the macro assembler.
     42 class SafepointGenerator : public CallWrapper {
     43  public:
     44   SafepointGenerator(LCodeGen* codegen,
     45                      LPointerMap* pointers,
     46                      Safepoint::DeoptMode mode)
     47       : codegen_(codegen),
     48         pointers_(pointers),
     49         deopt_mode_(mode) { }
     50   virtual ~SafepointGenerator() { }
     51 
     52   virtual void BeforeCall(int call_size) const {
     53     codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
     54   }
     55 
     56   virtual void AfterCall() const {
     57     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     58   }
     59 
     60  private:
     61   LCodeGen* codegen_;
     62   LPointerMap* pointers_;
     63   Safepoint::DeoptMode deopt_mode_;
     64 };
     65 
     66 
     67 #define __ masm()->
     68 
     69 bool LCodeGen::GenerateCode() {
     70   LPhase phase("Z_Code generation", chunk());
     71   ASSERT(is_unused());
     72   status_ = GENERATING;
     73 
     74   // Open a frame scope to indicate that there is a frame on the stack.  The
     75   // MANUAL indicates that the scope shouldn't actually generate code to set up
     76   // the frame (that is done in GeneratePrologue).
     77   FrameScope frame_scope(masm_, StackFrame::MANUAL);
     78 
     79   return GeneratePrologue() &&
     80       GenerateBody() &&
     81       GenerateDeferredCode() &&
     82       GenerateJumpTable() &&
     83       GenerateSafepointTable();
     84 }
     85 
     86 
     87 void LCodeGen::FinishCode(Handle<Code> code) {
     88   ASSERT(is_done());
     89   code->set_stack_slots(GetStackSlotCount());
     90   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     91   if (FLAG_weak_embedded_maps_in_optimized_code) {
     92     RegisterDependentCodeForEmbeddedMaps(code);
     93   }
     94   PopulateDeoptimizationData(code);
     95   info()->CommitDependencies(code);
     96 }
     97 
     98 
     99 void LChunkBuilder::Abort(BailoutReason reason) {
    100   info()->set_bailout_reason(reason);
    101   status_ = ABORTED;
    102 }
    103 
    104 
    105 void LCodeGen::Comment(const char* format, ...) {
    106   if (!FLAG_code_comments) return;
    107   char buffer[4 * KB];
    108   StringBuilder builder(buffer, ARRAY_SIZE(buffer));
    109   va_list arguments;
    110   va_start(arguments, format);
    111   builder.AddFormattedList(format, arguments);
    112   va_end(arguments);
    113 
    114   // Copy the string before recording it in the assembler to avoid
    115   // issues when the stack allocated buffer goes out of scope.
    116   int length = builder.position();
    117   Vector<char> copy = Vector<char>::New(length + 1);
    118   OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
    119   masm()->RecordComment(copy.start());
    120 }
    121 
    122 
    123 #ifdef _MSC_VER
    124 void LCodeGen::MakeSureStackPagesMapped(int offset) {
    125   const int kPageSize = 4 * KB;
    126   for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
    127     __ movq(Operand(rsp, offset), rax);
    128   }
    129 }
    130 #endif
    131 
    132 
    133 bool LCodeGen::GeneratePrologue() {
    134   ASSERT(is_generating());
    135 
    136   if (info()->IsOptimizing()) {
    137     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    138 
    139 #ifdef DEBUG
    140     if (strlen(FLAG_stop_at) > 0 &&
    141         info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    142       __ int3();
    143     }
    144 #endif
    145 
    146     // Strict mode functions need to replace the receiver with undefined
    147     // when called as functions (without an explicit receiver
    148     // object). rcx is zero for method calls and non-zero for function
    149     // calls.
    150     if (!info_->is_classic_mode() || info_->is_native()) {
    151       Label ok;
    152       __ testq(rcx, rcx);
    153       __ j(zero, &ok, Label::kNear);
    154       // +1 for return address.
    155       int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
    156       __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
    157       __ movq(Operand(rsp, receiver_offset), kScratchRegister);
    158       __ bind(&ok);
    159     }
    160   }
    161 
    162   info()->set_prologue_offset(masm_->pc_offset());
    163   if (NeedsEagerFrame()) {
    164     ASSERT(!frame_is_built_);
    165     frame_is_built_ = true;
    166     __ push(rbp);  // Caller's frame pointer.
    167     __ movq(rbp, rsp);
    168     __ push(rsi);  // Callee's context.
    169     if (info()->IsStub()) {
    170       __ Push(Smi::FromInt(StackFrame::STUB));
    171     } else {
    172       __ push(rdi);  // Callee's JS function.
    173     }
    174     info()->AddNoFrameRange(0, masm_->pc_offset());
    175   }
    176 
    177   // Reserve space for the stack slots needed by the code.
    178   int slots = GetStackSlotCount();
    179   if (slots > 0) {
    180     if (FLAG_debug_code) {
    181       __ subq(rsp, Immediate(slots * kPointerSize));
    182 #ifdef _MSC_VER
    183       MakeSureStackPagesMapped(slots * kPointerSize);
    184 #endif
    185       __ push(rax);
    186       __ Set(rax, slots);
    187       __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
    188       Label loop;
    189       __ bind(&loop);
    190       __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
    191               kScratchRegister);
    192       __ decl(rax);
    193       __ j(not_zero, &loop);
    194       __ pop(rax);
    195     } else {
    196       __ subq(rsp, Immediate(slots * kPointerSize));
    197 #ifdef _MSC_VER
    198       MakeSureStackPagesMapped(slots * kPointerSize);
    199 #endif
    200     }
    201 
    202     if (info()->saves_caller_doubles()) {
    203       Comment(";;; Save clobbered callee double registers");
    204       int count = 0;
    205       BitVector* doubles = chunk()->allocated_double_registers();
    206       BitVector::Iterator save_iterator(doubles);
    207       while (!save_iterator.Done()) {
    208         __ movsd(MemOperand(rsp, count * kDoubleSize),
    209                  XMMRegister::FromAllocationIndex(save_iterator.Current()));
    210         save_iterator.Advance();
    211         count++;
    212       }
    213     }
    214   }
    215 
    216   // Possibly allocate a local context.
    217   int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    218   if (heap_slots > 0) {
    219     Comment(";;; Allocate local context");
    220     // Argument to NewContext is the function, which is still in rdi.
    221     __ push(rdi);
    222     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
    223       FastNewContextStub stub(heap_slots);
    224       __ CallStub(&stub);
    225     } else {
    226       __ CallRuntime(Runtime::kNewFunctionContext, 1);
    227     }
    228     RecordSafepoint(Safepoint::kNoLazyDeopt);
    229     // Context is returned in both rax and rsi.  It replaces the context
    230     // passed to us.  It's saved in the stack and kept live in rsi.
    231     __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
    232 
    233     // Copy any necessary parameters into the context.
    234     int num_parameters = scope()->num_parameters();
    235     for (int i = 0; i < num_parameters; i++) {
    236       Variable* var = scope()->parameter(i);
    237       if (var->IsContextSlot()) {
    238         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    239             (num_parameters - 1 - i) * kPointerSize;
    240         // Load parameter from stack.
    241         __ movq(rax, Operand(rbp, parameter_offset));
    242         // Store it in the context.
    243         int context_offset = Context::SlotOffset(var->index());
    244         __ movq(Operand(rsi, context_offset), rax);
    245         // Update the write barrier. This clobbers rax and rbx.
    246         __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
    247       }
    248     }
    249     Comment(";;; End allocate local context");
    250   }
    251 
    252   // Trace the call.
    253   if (FLAG_trace && info()->IsOptimizing()) {
    254     __ CallRuntime(Runtime::kTraceEnter, 0);
    255   }
    256   return !is_aborted();
    257 }
    258 
    259 
    260 bool LCodeGen::GenerateBody() {
    261   ASSERT(is_generating());
    262   bool emit_instructions = true;
    263   for (current_instruction_ = 0;
    264        !is_aborted() && current_instruction_ < instructions_->length();
    265        current_instruction_++) {
    266     LInstruction* instr = instructions_->at(current_instruction_);
    267 
    268     // Don't emit code for basic blocks with a replacement.
    269     if (instr->IsLabel()) {
    270       emit_instructions = !LLabel::cast(instr)->HasReplacement();
    271     }
    272     if (!emit_instructions) continue;
    273 
    274     if (FLAG_code_comments && instr->HasInterestingComment(this)) {
    275       Comment(";;; <@%d,#%d> %s",
    276               current_instruction_,
    277               instr->hydrogen_value()->id(),
    278               instr->Mnemonic());
    279     }
    280 
    281     RecordAndUpdatePosition(instr->position());
    282 
    283     instr->CompileToNative(this);
    284   }
    285   EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    286   return !is_aborted();
    287 }
    288 
    289 
    290 bool LCodeGen::GenerateJumpTable() {
    291   Label needs_frame;
    292   if (jump_table_.length() > 0) {
    293     Comment(";;; -------------------- Jump table --------------------");
    294   }
    295   for (int i = 0; i < jump_table_.length(); i++) {
    296     __ bind(&jump_table_[i].label);
    297     Address entry = jump_table_[i].address;
    298     Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
    299     int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
    300     if (id == Deoptimizer::kNotDeoptimizationEntry) {
    301       Comment(";;; jump table entry %d.", i);
    302     } else {
    303       Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
    304     }
    305     if (jump_table_[i].needs_frame) {
    306       __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
    307       if (needs_frame.is_bound()) {
    308         __ jmp(&needs_frame);
    309       } else {
    310         __ bind(&needs_frame);
    311         __ push(rbp);
    312         __ movq(rbp, rsp);
    313         __ push(rsi);
    314         // This variant of deopt can only be used with stubs. Since we don't
    315         // have a function pointer to install in the stack frame that we're
    316         // building, install a special marker there instead.
    317         ASSERT(info()->IsStub());
    318         __ Move(rsi, Smi::FromInt(StackFrame::STUB));
    319         __ push(rsi);
    320         __ movq(rsi, MemOperand(rsp, kPointerSize));
    321         __ call(kScratchRegister);
    322       }
    323     } else {
    324       __ call(entry, RelocInfo::RUNTIME_ENTRY);
    325     }
    326   }
    327   return !is_aborted();
    328 }
    329 
    330 
    331 bool LCodeGen::GenerateDeferredCode() {
    332   ASSERT(is_generating());
    333   if (deferred_.length() > 0) {
    334     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    335       LDeferredCode* code = deferred_[i];
    336 
    337       int pos = instructions_->at(code->instruction_index())->position();
    338       RecordAndUpdatePosition(pos);
    339 
    340       Comment(";;; <@%d,#%d> "
    341               "-------------------- Deferred %s --------------------",
    342               code->instruction_index(),
    343               code->instr()->hydrogen_value()->id(),
    344               code->instr()->Mnemonic());
    345       __ bind(code->entry());
    346       if (NeedsDeferredFrame()) {
    347         Comment(";;; Build frame");
    348         ASSERT(!frame_is_built_);
    349         ASSERT(info()->IsStub());
    350         frame_is_built_ = true;
    351         // Build the frame in such a way that esi isn't trashed.
    352         __ push(rbp);  // Caller's frame pointer.
    353         __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
    354         __ Push(Smi::FromInt(StackFrame::STUB));
    355         __ lea(rbp, Operand(rsp, 2 * kPointerSize));
    356         Comment(";;; Deferred code");
    357       }
    358       code->Generate();
    359       if (NeedsDeferredFrame()) {
    360         Comment(";;; Destroy frame");
    361         ASSERT(frame_is_built_);
    362         frame_is_built_ = false;
    363         __ movq(rsp, rbp);
    364         __ pop(rbp);
    365       }
    366       __ jmp(code->exit());
    367     }
    368   }
    369 
    370   // Deferred code is the last part of the instruction sequence. Mark
    371   // the generated code as done unless we bailed out.
    372   if (!is_aborted()) status_ = DONE;
    373   return !is_aborted();
    374 }
    375 
    376 
    377 bool LCodeGen::GenerateSafepointTable() {
    378   ASSERT(is_done());
    379   safepoints_.Emit(masm(), GetStackSlotCount());
    380   return !is_aborted();
    381 }
    382 
    383 
    384 Register LCodeGen::ToRegister(int index) const {
    385   return Register::FromAllocationIndex(index);
    386 }
    387 
    388 
    389 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
    390   return XMMRegister::FromAllocationIndex(index);
    391 }
    392 
    393 
    394 Register LCodeGen::ToRegister(LOperand* op) const {
    395   ASSERT(op->IsRegister());
    396   return ToRegister(op->index());
    397 }
    398 
    399 
    400 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    401   ASSERT(op->IsDoubleRegister());
    402   return ToDoubleRegister(op->index());
    403 }
    404 
    405 
    406 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
    407   return op->IsConstantOperand() &&
    408       chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    409 }
    410 
    411 
    412 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
    413   return op->IsConstantOperand() &&
    414       chunk_->LookupLiteralRepresentation(op).IsSmi();
    415 }
    416 
    417 
    418 bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
    419   return op->IsConstantOperand() &&
    420       chunk_->LookupLiteralRepresentation(op).IsTagged();
    421 }
    422 
    423 
    424 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    425   HConstant* constant = chunk_->LookupConstant(op);
    426   return constant->Integer32Value();
    427 }
    428 
    429 
    430 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    431   HConstant* constant = chunk_->LookupConstant(op);
    432   return Smi::FromInt(constant->Integer32Value());
    433 }
    434 
    435 
    436 double LCodeGen::ToDouble(LConstantOperand* op) const {
    437   HConstant* constant = chunk_->LookupConstant(op);
    438   ASSERT(constant->HasDoubleValue());
    439   return constant->DoubleValue();
    440 }
    441 
    442 
    443 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
    444   HConstant* constant = chunk_->LookupConstant(op);
    445   ASSERT(constant->HasExternalReferenceValue());
    446   return constant->ExternalReferenceValue();
    447 }
    448 
    449 
    450 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    451   HConstant* constant = chunk_->LookupConstant(op);
    452   ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    453   return constant->handle();
    454 }
    455 
    456 
    457 Operand LCodeGen::ToOperand(LOperand* op) const {
    458   // Does not handle registers. In X64 assembler, plain registers are not
    459   // representable as an Operand.
    460   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
    461   return Operand(rbp, StackSlotOffset(op->index()));
    462 }
    463 
    464 
    465 void LCodeGen::WriteTranslation(LEnvironment* environment,
    466                                 Translation* translation) {
    467   if (environment == NULL) return;
    468 
    469   // The translation includes one command per value in the environment.
    470   int translation_size = environment->translation_size();
    471   // The output frame height does not include the parameters.
    472   int height = translation_size - environment->parameter_count();
    473 
    474   WriteTranslation(environment->outer(), translation);
    475   bool has_closure_id = !info()->closure().is_null() &&
    476       !info()->closure().is_identical_to(environment->closure());
    477   int closure_id = has_closure_id
    478       ? DefineDeoptimizationLiteral(environment->closure())
    479       : Translation::kSelfLiteralId;
    480 
    481   switch (environment->frame_type()) {
    482     case JS_FUNCTION:
    483       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
    484       break;
    485     case JS_CONSTRUCT:
    486       translation->BeginConstructStubFrame(closure_id, translation_size);
    487       break;
    488     case JS_GETTER:
    489       ASSERT(translation_size == 1);
    490       ASSERT(height == 0);
    491       translation->BeginGetterStubFrame(closure_id);
    492       break;
    493     case JS_SETTER:
    494       ASSERT(translation_size == 2);
    495       ASSERT(height == 0);
    496       translation->BeginSetterStubFrame(closure_id);
    497       break;
    498     case ARGUMENTS_ADAPTOR:
    499       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
    500       break;
    501     case STUB:
    502       translation->BeginCompiledStubFrame();
    503       break;
    504   }
    505 
    506   int object_index = 0;
    507   int dematerialized_index = 0;
    508   for (int i = 0; i < translation_size; ++i) {
    509     LOperand* value = environment->values()->at(i);
    510     AddToTranslation(environment,
    511                      translation,
    512                      value,
    513                      environment->HasTaggedValueAt(i),
    514                      environment->HasUint32ValueAt(i),
    515                      &object_index,
    516                      &dematerialized_index);
    517   }
    518 }
    519 
    520 
    521 void LCodeGen::AddToTranslation(LEnvironment* environment,
    522                                 Translation* translation,
    523                                 LOperand* op,
    524                                 bool is_tagged,
    525                                 bool is_uint32,
    526                                 int* object_index_pointer,
    527                                 int* dematerialized_index_pointer) {
    528   if (op == LEnvironment::materialization_marker()) {
    529     int object_index = (*object_index_pointer)++;
    530     if (environment->ObjectIsDuplicateAt(object_index)) {
    531       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    532       translation->DuplicateObject(dupe_of);
    533       return;
    534     }
    535     int object_length = environment->ObjectLengthAt(object_index);
    536     if (environment->ObjectIsArgumentsAt(object_index)) {
    537       translation->BeginArgumentsObject(object_length);
    538     } else {
    539       translation->BeginCapturedObject(object_length);
    540     }
    541     int dematerialized_index = *dematerialized_index_pointer;
    542     int env_offset = environment->translation_size() + dematerialized_index;
    543     *dematerialized_index_pointer += object_length;
    544     for (int i = 0; i < object_length; ++i) {
    545       LOperand* value = environment->values()->at(env_offset + i);
    546       AddToTranslation(environment,
    547                        translation,
    548                        value,
    549                        environment->HasTaggedValueAt(env_offset + i),
    550                        environment->HasUint32ValueAt(env_offset + i),
    551                        object_index_pointer,
    552                        dematerialized_index_pointer);
    553     }
    554     return;
    555   }
    556 
    557   if (op->IsStackSlot()) {
    558     if (is_tagged) {
    559       translation->StoreStackSlot(op->index());
    560     } else if (is_uint32) {
    561       translation->StoreUint32StackSlot(op->index());
    562     } else {
    563       translation->StoreInt32StackSlot(op->index());
    564     }
    565   } else if (op->IsDoubleStackSlot()) {
    566     translation->StoreDoubleStackSlot(op->index());
    567   } else if (op->IsArgument()) {
    568     ASSERT(is_tagged);
    569     int src_index = GetStackSlotCount() + op->index();
    570     translation->StoreStackSlot(src_index);
    571   } else if (op->IsRegister()) {
    572     Register reg = ToRegister(op);
    573     if (is_tagged) {
    574       translation->StoreRegister(reg);
    575     } else if (is_uint32) {
    576       translation->StoreUint32Register(reg);
    577     } else {
    578       translation->StoreInt32Register(reg);
    579     }
    580   } else if (op->IsDoubleRegister()) {
    581     XMMRegister reg = ToDoubleRegister(op);
    582     translation->StoreDoubleRegister(reg);
    583   } else if (op->IsConstantOperand()) {
    584     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    585     int src_index = DefineDeoptimizationLiteral(constant->handle());
    586     translation->StoreLiteral(src_index);
    587   } else {
    588     UNREACHABLE();
    589   }
    590 }
    591 
    592 
    593 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    594                                RelocInfo::Mode mode,
    595                                LInstruction* instr,
    596                                SafepointMode safepoint_mode,
    597                                int argc) {
    598   EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
    599   ASSERT(instr != NULL);
    600   LPointerMap* pointers = instr->pointer_map();
    601   RecordPosition(pointers->position());
    602   __ call(code, mode);
    603   RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
    604 
    605   // Signal that we don't inline smi code before these stubs in the
    606   // optimizing code generator.
    607   if (code->kind() == Code::BINARY_OP_IC ||
    608       code->kind() == Code::COMPARE_IC) {
    609     __ nop();
    610   }
    611 }
    612 
    613 
    614 void LCodeGen::CallCode(Handle<Code> code,
    615                         RelocInfo::Mode mode,
    616                         LInstruction* instr) {
    617   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
    618 }
    619 
    620 
    621 void LCodeGen::CallRuntime(const Runtime::Function* function,
    622                            int num_arguments,
    623                            LInstruction* instr) {
    624   ASSERT(instr != NULL);
    625   ASSERT(instr->HasPointerMap());
    626   LPointerMap* pointers = instr->pointer_map();
    627   RecordPosition(pointers->position());
    628 
    629   __ CallRuntime(function, num_arguments);
    630   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
    631 }
    632 
    633 
    634 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    635                                        int argc,
    636                                        LInstruction* instr) {
    637   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
    638   __ CallRuntimeSaveDoubles(id);
    639   RecordSafepointWithRegisters(
    640       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    641 }
    642 
    643 
    644 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    645                                                     Safepoint::DeoptMode mode) {
    646   if (!environment->HasBeenRegistered()) {
    647     // Physical stack frame layout:
    648     // -x ............. -4  0 ..................................... y
    649     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    650 
    651     // Layout of the environment:
    652     // 0 ..................................................... size-1
    653     // [parameters] [locals] [expression stack including arguments]
    654 
    655     // Layout of the translation:
    656     // 0 ........................................................ size - 1 + 4
    657     // [expression stack including arguments] [locals] [4 words] [parameters]
    658     // |>------------  translation_size ------------<|
    659 
    660     int frame_count = 0;
    661     int jsframe_count = 0;
    662     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    663       ++frame_count;
    664       if (e->frame_type() == JS_FUNCTION) {
    665         ++jsframe_count;
    666       }
    667     }
    668     Translation translation(&translations_, frame_count, jsframe_count, zone());
    669     WriteTranslation(environment, &translation);
    670     int deoptimization_index = deoptimizations_.length();
    671     int pc_offset = masm()->pc_offset();
    672     environment->Register(deoptimization_index,
    673                           translation.index(),
    674                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    675     deoptimizations_.Add(environment, environment->zone());
    676   }
    677 }
    678 
    679 
    680 void LCodeGen::DeoptimizeIf(Condition cc,
    681                             LEnvironment* environment,
    682                             Deoptimizer::BailoutType bailout_type) {
    683   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    684   ASSERT(environment->HasBeenRegistered());
    685   int id = environment->deoptimization_index();
    686   ASSERT(info()->IsOptimizing() || info()->IsStub());
    687   Address entry =
    688       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    689   if (entry == NULL) {
    690     Abort(kBailoutWasNotPrepared);
    691     return;
    692   }
    693 
    694   ASSERT(FLAG_deopt_every_n_times == 0);  // Not yet implemented on x64.
    695 
    696   if (info()->ShouldTrapOnDeopt()) {
    697     Label done;
    698     if (cc != no_condition) {
    699       __ j(NegateCondition(cc), &done, Label::kNear);
    700     }
    701     __ int3();
    702     __ bind(&done);
    703   }
    704 
    705   ASSERT(info()->IsStub() || frame_is_built_);
    706   if (cc == no_condition && frame_is_built_) {
    707     __ call(entry, RelocInfo::RUNTIME_ENTRY);
    708   } else {
    709     // We often have several deopts to the same entry, reuse the last
    710     // jump entry if this is the case.
    711     if (jump_table_.is_empty() ||
    712         jump_table_.last().address != entry ||
    713         jump_table_.last().needs_frame != !frame_is_built_ ||
    714         jump_table_.last().bailout_type != bailout_type) {
    715       Deoptimizer::JumpTableEntry table_entry(entry,
    716                                               bailout_type,
    717                                               !frame_is_built_);
    718       jump_table_.Add(table_entry, zone());
    719     }
    720     if (cc == no_condition) {
    721       __ jmp(&jump_table_.last().label);
    722     } else {
    723       __ j(cc, &jump_table_.last().label);
    724     }
    725   }
    726 }
    727 
    728 
    729 void LCodeGen::DeoptimizeIf(Condition cc,
    730                             LEnvironment* environment) {
    731   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    732       ? Deoptimizer::LAZY
    733       : Deoptimizer::EAGER;
    734   DeoptimizeIf(cc, environment, bailout_type);
    735 }
    736 
    737 
    738 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
    739   ZoneList<Handle<Map> > maps(1, zone());
    740   int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
    741   for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
    742     RelocInfo::Mode mode = it.rinfo()->rmode();
    743     if (mode == RelocInfo::EMBEDDED_OBJECT &&
    744         it.rinfo()->target_object()->IsMap()) {
    745       Handle<Map> map(Map::cast(it.rinfo()->target_object()));
    746       if (map->CanTransition()) {
    747         maps.Add(map, zone());
    748       }
    749     }
    750   }
    751 #ifdef VERIFY_HEAP
    752   // This disables verification of weak embedded maps after full GC.
    753   // AddDependentCode can cause a GC, which would observe the state where
    754   // this code is not yet in the depended code lists of the embedded maps.
    755   NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
    756 #endif
    757   for (int i = 0; i < maps.length(); i++) {
    758     maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
    759   }
    760 }
    761 
    762 
    763 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
    764   int length = deoptimizations_.length();
    765   if (length == 0) return;
    766   Handle<DeoptimizationInputData> data =
    767       factory()->NewDeoptimizationInputData(length, TENURED);
    768 
    769   Handle<ByteArray> translations =
    770       translations_.CreateByteArray(isolate()->factory());
    771   data->SetTranslationByteArray(*translations);
    772   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
    773 
    774   Handle<FixedArray> literals =
    775       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
    776   { AllowDeferredHandleDereference copy_handles;
    777     for (int i = 0; i < deoptimization_literals_.length(); i++) {
    778       literals->set(i, *deoptimization_literals_[i]);
    779     }
    780     data->SetLiteralArray(*literals);
    781   }
    782 
    783   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
    784   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
    785 
    786   // Populate the deoptimization entries.
    787   for (int i = 0; i < length; i++) {
    788     LEnvironment* env = deoptimizations_[i];
    789     data->SetAstId(i, env->ast_id());
    790     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
    791     data->SetArgumentsStackHeight(i,
    792                                   Smi::FromInt(env->arguments_stack_height()));
    793     data->SetPc(i, Smi::FromInt(env->pc_offset()));
    794   }
    795   code->set_deoptimization_data(*data);
    796 }
    797 
    798 
    799 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
    800   int result = deoptimization_literals_.length();
    801   for (int i = 0; i < deoptimization_literals_.length(); ++i) {
    802     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
    803   }
    804   deoptimization_literals_.Add(literal, zone());
    805   return result;
    806 }
    807 
    808 
    809 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
    810   ASSERT(deoptimization_literals_.length() == 0);
    811 
    812   const ZoneList<Handle<JSFunction> >* inlined_closures =
    813       chunk()->inlined_closures();
    814 
    815   for (int i = 0, length = inlined_closures->length();
    816        i < length;
    817        i++) {
    818     DefineDeoptimizationLiteral(inlined_closures->at(i));
    819   }
    820 
    821   inlined_function_count_ = deoptimization_literals_.length();
    822 }
    823 
    824 
    825 void LCodeGen::RecordSafepointWithLazyDeopt(
    826     LInstruction* instr, SafepointMode safepoint_mode, int argc) {
    827   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    828     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    829   } else {
    830     ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
    831     RecordSafepointWithRegisters(
    832         instr->pointer_map(), argc, Safepoint::kLazyDeopt);
    833   }
    834 }
    835 
    836 
    837 void LCodeGen::RecordSafepoint(
    838     LPointerMap* pointers,
    839     Safepoint::Kind kind,
    840     int arguments,
    841     Safepoint::DeoptMode deopt_mode) {
    842   ASSERT(kind == expected_safepoint_kind_);
    843 
    844   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    845 
    846   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
    847       kind, arguments, deopt_mode);
    848   for (int i = 0; i < operands->length(); i++) {
    849     LOperand* pointer = operands->at(i);
    850     if (pointer->IsStackSlot()) {
    851       safepoint.DefinePointerSlot(pointer->index(), zone());
    852     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    853       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    854     }
    855   }
    856   if (kind & Safepoint::kWithRegisters) {
    857     // Register rsi always contains a pointer to the context.
    858     safepoint.DefinePointerRegister(rsi, zone());
    859   }
    860 }
    861 
    862 
    863 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    864                                Safepoint::DeoptMode deopt_mode) {
    865   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    866 }
    867 
    868 
    869 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    870   LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
    871   RecordSafepoint(&empty_pointers, deopt_mode);
    872 }
    873 
    874 
    875 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    876                                             int arguments,
    877                                             Safepoint::DeoptMode deopt_mode) {
    878   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    879 }
    880 
    881 
    882 void LCodeGen::RecordPosition(int position) {
    883   if (position == RelocInfo::kNoPosition) return;
    884   masm()->positions_recorder()->RecordPosition(position);
    885 }
    886 
    887 
    888 void LCodeGen::RecordAndUpdatePosition(int position) {
    889   if (position >= 0 && position != old_position_) {
    890     masm()->positions_recorder()->RecordPosition(position);
    891     old_position_ = position;
    892   }
    893 }
    894 
    895 
    896 static const char* LabelType(LLabel* label) {
    897   if (label->is_loop_header()) return " (loop header)";
    898   if (label->is_osr_entry()) return " (OSR entry)";
    899   return "";
    900 }
    901 
    902 
    903 void LCodeGen::DoLabel(LLabel* label) {
    904   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    905           current_instruction_,
    906           label->hydrogen_value()->id(),
    907           label->block_id(),
    908           LabelType(label));
    909   __ bind(label->label());
    910   current_block_ = label->block_id();
    911   DoGap(label);
    912 }
    913 
    914 
    915 void LCodeGen::DoParallelMove(LParallelMove* move) {
    916   resolver_.Resolve(move);
    917 }
    918 
    919 
    920 void LCodeGen::DoGap(LGap* gap) {
    921   for (int i = LGap::FIRST_INNER_POSITION;
    922        i <= LGap::LAST_INNER_POSITION;
    923        i++) {
    924     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    925     LParallelMove* move = gap->GetParallelMove(inner_pos);
    926     if (move != NULL) DoParallelMove(move);
    927   }
    928 }
    929 
    930 
    931 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
    932   DoGap(instr);
    933 }
    934 
    935 
    936 void LCodeGen::DoParameter(LParameter* instr) {
    937   // Nothing to do.
    938 }
    939 
    940 
    941 void LCodeGen::DoCallStub(LCallStub* instr) {
    942   ASSERT(ToRegister(instr->result()).is(rax));
    943   switch (instr->hydrogen()->major_key()) {
    944     case CodeStub::RegExpConstructResult: {
    945       RegExpConstructResultStub stub;
    946       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
    947       break;
    948     }
    949     case CodeStub::RegExpExec: {
    950       RegExpExecStub stub;
    951       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
    952       break;
    953     }
    954     case CodeStub::SubString: {
    955       SubStringStub stub;
    956       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
    957       break;
    958     }
    959     case CodeStub::NumberToString: {
    960       NumberToStringStub stub;
    961       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
    962       break;
    963     }
    964     case CodeStub::StringCompare: {
    965       StringCompareStub stub;
    966       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
    967       break;
    968     }
    969     case CodeStub::TranscendentalCache: {
    970       TranscendentalCacheStub stub(instr->transcendental_type(),
    971                                    TranscendentalCacheStub::TAGGED);
    972       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
    973       break;
    974     }
    975     default:
    976       UNREACHABLE();
    977   }
    978 }
    979 
    980 
    981 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
    982   // Record the address of the first unknown OSR value as the place to enter.
    983   if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
    984 }
    985 
    986 
    987 void LCodeGen::DoModI(LModI* instr) {
    988   HMod* hmod = instr->hydrogen();
    989   HValue* left = hmod->left();
    990   HValue* right = hmod->right();
    991   if (hmod->HasPowerOf2Divisor()) {
    992     // TODO(svenpanne) We should really do the strength reduction on the
    993     // Hydrogen level.
    994     Register left_reg = ToRegister(instr->left());
    995     ASSERT(left_reg.is(ToRegister(instr->result())));
    996 
    997     // Note: The code below even works when right contains kMinInt.
    998     int32_t divisor = Abs(right->GetInteger32Constant());
    999 
   1000     Label left_is_not_negative, done;
   1001     if (left->CanBeNegative()) {
   1002       __ testl(left_reg, left_reg);
   1003       __ j(not_sign, &left_is_not_negative, Label::kNear);
   1004       __ negl(left_reg);
   1005       __ andl(left_reg, Immediate(divisor - 1));
   1006       __ negl(left_reg);
   1007       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1008         DeoptimizeIf(zero, instr->environment());
   1009       }
   1010       __ jmp(&done, Label::kNear);
   1011     }
   1012 
   1013     __ bind(&left_is_not_negative);
   1014     __ andl(left_reg, Immediate(divisor - 1));
   1015     __ bind(&done);
   1016 
   1017   } else if (hmod->fixed_right_arg().has_value) {
   1018     Register left_reg = ToRegister(instr->left());
   1019     ASSERT(left_reg.is(ToRegister(instr->result())));
   1020     Register right_reg = ToRegister(instr->right());
   1021 
   1022     int32_t divisor = hmod->fixed_right_arg().value;
   1023     ASSERT(IsPowerOf2(divisor));
   1024 
   1025     // Check if our assumption of a fixed right operand still holds.
   1026     __ cmpl(right_reg, Immediate(divisor));
   1027     DeoptimizeIf(not_equal, instr->environment());
   1028 
   1029     Label left_is_not_negative, done;
   1030     if (left->CanBeNegative()) {
   1031       __ testl(left_reg, left_reg);
   1032       __ j(not_sign, &left_is_not_negative, Label::kNear);
   1033       __ negl(left_reg);
   1034       __ andl(left_reg, Immediate(divisor - 1));
   1035       __ negl(left_reg);
   1036       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1037         DeoptimizeIf(zero, instr->environment());
   1038       }
   1039       __ jmp(&done, Label::kNear);
   1040     }
   1041 
   1042     __ bind(&left_is_not_negative);
   1043     __ andl(left_reg, Immediate(divisor - 1));
   1044     __ bind(&done);
   1045 
   1046   } else {
   1047     Register left_reg = ToRegister(instr->left());
   1048     ASSERT(left_reg.is(rax));
   1049     Register right_reg = ToRegister(instr->right());
   1050     ASSERT(!right_reg.is(rax));
   1051     ASSERT(!right_reg.is(rdx));
   1052     Register result_reg = ToRegister(instr->result());
   1053     ASSERT(result_reg.is(rdx));
   1054 
   1055     Label done;
   1056     // Check for x % 0, idiv would signal a divide error. We have to
   1057     // deopt in this case because we can't return a NaN.
   1058     if (right->CanBeZero()) {
   1059       __ testl(right_reg, right_reg);
   1060       DeoptimizeIf(zero, instr->environment());
   1061     }
   1062 
   1063     // Check for kMinInt % -1, idiv would signal a divide error. We
   1064     // have to deopt if we care about -0, because we can't return that.
   1065     if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
   1066       Label no_overflow_possible;
   1067       __ cmpl(left_reg, Immediate(kMinInt));
   1068       __ j(not_zero, &no_overflow_possible, Label::kNear);
   1069       __ cmpl(right_reg, Immediate(-1));
   1070       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1071         DeoptimizeIf(equal, instr->environment());
   1072       } else {
   1073         __ j(not_equal, &no_overflow_possible, Label::kNear);
   1074         __ Set(result_reg, 0);
   1075         __ jmp(&done, Label::kNear);
   1076       }
   1077       __ bind(&no_overflow_possible);
   1078     }
   1079 
   1080     // Sign extend dividend in eax into edx:eax, since we are using only the low
   1081     // 32 bits of the values.
   1082     __ cdq();
   1083 
   1084     // If we care about -0, test if the dividend is <0 and the result is 0.
   1085     if (left->CanBeNegative() &&
   1086         hmod->CanBeZero() &&
   1087         hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1088       Label positive_left;
   1089       __ testl(left_reg, left_reg);
   1090       __ j(not_sign, &positive_left, Label::kNear);
   1091       __ idivl(right_reg);
   1092       __ testl(result_reg, result_reg);
   1093       DeoptimizeIf(zero, instr->environment());
   1094       __ jmp(&done, Label::kNear);
   1095       __ bind(&positive_left);
   1096     }
   1097     __ idivl(right_reg);
   1098     __ bind(&done);
   1099   }
   1100 }
   1101 
   1102 
   1103 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
   1104   ASSERT(instr->right()->IsConstantOperand());
   1105 
   1106   const Register dividend = ToRegister(instr->left());
   1107   int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
   1108   const Register result = ToRegister(instr->result());
   1109 
   1110   switch (divisor) {
   1111   case 0:
   1112     DeoptimizeIf(no_condition, instr->environment());
   1113     return;
   1114 
   1115   case 1:
   1116     if (!result.is(dividend)) {
   1117         __ movl(result, dividend);
   1118     }
   1119     return;
   1120 
   1121   case -1:
   1122     if (!result.is(dividend)) {
   1123       __ movl(result, dividend);
   1124     }
   1125     __ negl(result);
   1126     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1127       DeoptimizeIf(zero, instr->environment());
   1128     }
   1129     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1130       DeoptimizeIf(overflow, instr->environment());
   1131     }
   1132     return;
   1133   }
   1134 
   1135   uint32_t divisor_abs = abs(divisor);
   1136   if (IsPowerOf2(divisor_abs)) {
   1137     int32_t power = WhichPowerOf2(divisor_abs);
   1138     if (divisor < 0) {
   1139       __ movsxlq(result, dividend);
   1140       __ neg(result);
   1141       if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1142         DeoptimizeIf(zero, instr->environment());
   1143       }
   1144       __ sar(result, Immediate(power));
   1145     } else {
   1146       if (!result.is(dividend)) {
   1147         __ movl(result, dividend);
   1148       }
   1149       __ sarl(result, Immediate(power));
   1150     }
   1151   } else {
   1152     Register reg1 = ToRegister(instr->temp());
   1153     Register reg2 = ToRegister(instr->result());
   1154 
   1155     // Find b which: 2^b < divisor_abs < 2^(b+1).
   1156     unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
   1157     unsigned shift = 32 + b;  // Precision +1bit (effectively).
   1158     double multiplier_f =
   1159         static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
   1160     int64_t multiplier;
   1161     if (multiplier_f - floor(multiplier_f) < 0.5) {
   1162         multiplier = static_cast<int64_t>(floor(multiplier_f));
   1163     } else {
   1164         multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
   1165     }
   1166     // The multiplier is a uint32.
   1167     ASSERT(multiplier > 0 &&
   1168            multiplier < (static_cast<int64_t>(1) << 32));
   1169     // The multiply is int64, so sign-extend to r64.
   1170     __ movsxlq(reg1, dividend);
   1171     if (divisor < 0 &&
   1172         instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1173       __ neg(reg1);
   1174       DeoptimizeIf(zero, instr->environment());
   1175     }
   1176     __ movq(reg2, multiplier, RelocInfo::NONE64);
   1177     // Result just fit in r64, because it's int32 * uint32.
   1178     __ imul(reg2, reg1);
   1179 
   1180     __ addq(reg2, Immediate(1 << 30));
   1181     __ sar(reg2, Immediate(shift));
   1182   }
   1183 }
   1184 
   1185 
   1186 void LCodeGen::DoDivI(LDivI* instr) {
   1187   if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
   1188     Register dividend = ToRegister(instr->left());
   1189     int32_t divisor =
   1190         HConstant::cast(instr->hydrogen()->right())->Integer32Value();
   1191     int32_t test_value = 0;
   1192     int32_t power = 0;
   1193 
   1194     if (divisor > 0) {
   1195       test_value = divisor - 1;
   1196       power = WhichPowerOf2(divisor);
   1197     } else {
   1198       // Check for (0 / -x) that will produce negative zero.
   1199       if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1200         __ testl(dividend, dividend);
   1201         DeoptimizeIf(zero, instr->environment());
   1202       }
   1203       // Check for (kMinInt / -1).
   1204       if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1205         __ cmpl(dividend, Immediate(kMinInt));
   1206         DeoptimizeIf(zero, instr->environment());
   1207       }
   1208       test_value = - divisor - 1;
   1209       power = WhichPowerOf2(-divisor);
   1210     }
   1211 
   1212     if (test_value != 0) {
   1213       if (instr->hydrogen()->CheckFlag(
   1214           HInstruction::kAllUsesTruncatingToInt32)) {
   1215         Label done, negative;
   1216         __ cmpl(dividend, Immediate(0));
   1217         __ j(less, &negative, Label::kNear);
   1218         __ sarl(dividend, Immediate(power));
   1219         if (divisor < 0) __ negl(dividend);
   1220         __ jmp(&done, Label::kNear);
   1221 
   1222         __ bind(&negative);
   1223         __ negl(dividend);
   1224         __ sarl(dividend, Immediate(power));
   1225         if (divisor > 0) __ negl(dividend);
   1226         __ bind(&done);
   1227         return;  // Don't fall through to "__ neg" below.
   1228       } else {
   1229         // Deoptimize if remainder is not 0.
   1230         __ testl(dividend, Immediate(test_value));
   1231         DeoptimizeIf(not_zero, instr->environment());
   1232         __ sarl(dividend, Immediate(power));
   1233       }
   1234     }
   1235 
   1236     if (divisor < 0) __ negl(dividend);
   1237 
   1238     return;
   1239   }
   1240 
   1241   LOperand* right = instr->right();
   1242   ASSERT(ToRegister(instr->result()).is(rax));
   1243   ASSERT(ToRegister(instr->left()).is(rax));
   1244   ASSERT(!ToRegister(instr->right()).is(rax));
   1245   ASSERT(!ToRegister(instr->right()).is(rdx));
   1246 
   1247   Register left_reg = rax;
   1248 
   1249   // Check for x / 0.
   1250   Register right_reg = ToRegister(right);
   1251   if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
   1252     __ testl(right_reg, right_reg);
   1253     DeoptimizeIf(zero, instr->environment());
   1254   }
   1255 
   1256   // Check for (0 / -x) that will produce negative zero.
   1257   if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1258     Label left_not_zero;
   1259     __ testl(left_reg, left_reg);
   1260     __ j(not_zero, &left_not_zero, Label::kNear);
   1261     __ testl(right_reg, right_reg);
   1262     DeoptimizeIf(sign, instr->environment());
   1263     __ bind(&left_not_zero);
   1264   }
   1265 
   1266   // Check for (kMinInt / -1).
   1267   if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
   1268     Label left_not_min_int;
   1269     __ cmpl(left_reg, Immediate(kMinInt));
   1270     __ j(not_zero, &left_not_min_int, Label::kNear);
   1271     __ cmpl(right_reg, Immediate(-1));
   1272     DeoptimizeIf(zero, instr->environment());
   1273     __ bind(&left_not_min_int);
   1274   }
   1275 
   1276   // Sign extend to rdx.
   1277   __ cdq();
   1278   __ idivl(right_reg);
   1279 
   1280   if (instr->is_flooring()) {
   1281     Label done;
   1282     __ testl(rdx, rdx);
   1283     __ j(zero, &done, Label::kNear);
   1284     __ xorl(rdx, right_reg);
   1285     __ sarl(rdx, Immediate(31));
   1286     __ addl(rax, rdx);
   1287     __ bind(&done);
   1288   } else if (!instr->hydrogen()->CheckFlag(
   1289       HInstruction::kAllUsesTruncatingToInt32)) {
   1290     // Deoptimize if remainder is not 0.
   1291     __ testl(rdx, rdx);
   1292     DeoptimizeIf(not_zero, instr->environment());
   1293   }
   1294 }
   1295 
   1296 
   1297 void LCodeGen::DoMulI(LMulI* instr) {
   1298   Register left = ToRegister(instr->left());
   1299   LOperand* right = instr->right();
   1300 
   1301   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1302     __ movl(kScratchRegister, left);
   1303   }
   1304 
   1305   bool can_overflow =
   1306       instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1307   if (right->IsConstantOperand()) {
   1308     int32_t right_value = ToInteger32(LConstantOperand::cast(right));
   1309     if (right_value == -1) {
   1310       __ negl(left);
   1311     } else if (right_value == 0) {
   1312       __ xorl(left, left);
   1313     } else if (right_value == 2) {
   1314       __ addl(left, left);
   1315     } else if (!can_overflow) {
   1316       // If the multiplication is known to not overflow, we
   1317       // can use operations that don't set the overflow flag
   1318       // correctly.
   1319       switch (right_value) {
   1320         case 1:
   1321           // Do nothing.
   1322           break;
   1323         case 3:
   1324           __ leal(left, Operand(left, left, times_2, 0));
   1325           break;
   1326         case 4:
   1327           __ shll(left, Immediate(2));
   1328           break;
   1329         case 5:
   1330           __ leal(left, Operand(left, left, times_4, 0));
   1331           break;
   1332         case 8:
   1333           __ shll(left, Immediate(3));
   1334           break;
   1335         case 9:
   1336           __ leal(left, Operand(left, left, times_8, 0));
   1337           break;
   1338         case 16:
   1339           __ shll(left, Immediate(4));
   1340           break;
   1341         default:
   1342           __ imull(left, left, Immediate(right_value));
   1343           break;
   1344       }
   1345     } else {
   1346       __ imull(left, left, Immediate(right_value));
   1347     }
   1348   } else if (right->IsStackSlot()) {
   1349     if (instr->hydrogen_value()->representation().IsSmi()) {
   1350       __ SmiToInteger32(left, left);
   1351       __ imul(left, ToOperand(right));
   1352     } else {
   1353       __ imull(left, ToOperand(right));
   1354     }
   1355   } else {
   1356     if (instr->hydrogen_value()->representation().IsSmi()) {
   1357       __ SmiToInteger32(left, left);
   1358       __ imul(left, ToRegister(right));
   1359     } else {
   1360       __ imull(left, ToRegister(right));
   1361     }
   1362   }
   1363 
   1364   if (can_overflow) {
   1365     DeoptimizeIf(overflow, instr->environment());
   1366   }
   1367 
   1368   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1369     // Bail out if the result is supposed to be negative zero.
   1370     Label done;
   1371     __ testl(left, left);
   1372     __ j(not_zero, &done, Label::kNear);
   1373     if (right->IsConstantOperand()) {
   1374       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
   1375         DeoptimizeIf(no_condition, instr->environment());
   1376       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
   1377         __ cmpl(kScratchRegister, Immediate(0));
   1378         DeoptimizeIf(less, instr->environment());
   1379       }
   1380     } else if (right->IsStackSlot()) {
   1381       __ orl(kScratchRegister, ToOperand(right));
   1382       DeoptimizeIf(sign, instr->environment());
   1383     } else {
   1384       // Test the non-zero operand for negative sign.
   1385       __ orl(kScratchRegister, ToRegister(right));
   1386       DeoptimizeIf(sign, instr->environment());
   1387     }
   1388     __ bind(&done);
   1389   }
   1390 }
   1391 
   1392 
   1393 void LCodeGen::DoBitI(LBitI* instr) {
   1394   LOperand* left = instr->left();
   1395   LOperand* right = instr->right();
   1396   ASSERT(left->Equals(instr->result()));
   1397   ASSERT(left->IsRegister());
   1398 
   1399   if (right->IsConstantOperand()) {
   1400     int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
   1401     switch (instr->op()) {
   1402       case Token::BIT_AND:
   1403         __ andl(ToRegister(left), Immediate(right_operand));
   1404         break;
   1405       case Token::BIT_OR:
   1406         __ orl(ToRegister(left), Immediate(right_operand));
   1407         break;
   1408       case Token::BIT_XOR:
   1409         if (right_operand == int32_t(~0)) {
   1410           __ notl(ToRegister(left));
   1411         } else {
   1412           __ xorl(ToRegister(left), Immediate(right_operand));
   1413         }
   1414         break;
   1415       default:
   1416         UNREACHABLE();
   1417         break;
   1418     }
   1419   } else if (right->IsStackSlot()) {
   1420     switch (instr->op()) {
   1421       case Token::BIT_AND:
   1422         __ and_(ToRegister(left), ToOperand(right));
   1423         break;
   1424       case Token::BIT_OR:
   1425         __ or_(ToRegister(left), ToOperand(right));
   1426         break;
   1427       case Token::BIT_XOR:
   1428         __ xor_(ToRegister(left), ToOperand(right));
   1429         break;
   1430       default:
   1431         UNREACHABLE();
   1432         break;
   1433     }
   1434   } else {
   1435     ASSERT(right->IsRegister());
   1436     switch (instr->op()) {
   1437       case Token::BIT_AND:
   1438         __ and_(ToRegister(left), ToRegister(right));
   1439         break;
   1440       case Token::BIT_OR:
   1441         __ or_(ToRegister(left), ToRegister(right));
   1442         break;
   1443       case Token::BIT_XOR:
   1444         __ xor_(ToRegister(left), ToRegister(right));
   1445         break;
   1446       default:
   1447         UNREACHABLE();
   1448         break;
   1449     }
   1450   }
   1451 }
   1452 
   1453 
   1454 void LCodeGen::DoShiftI(LShiftI* instr) {
   1455   LOperand* left = instr->left();
   1456   LOperand* right = instr->right();
   1457   ASSERT(left->Equals(instr->result()));
   1458   ASSERT(left->IsRegister());
   1459   if (right->IsRegister()) {
   1460     ASSERT(ToRegister(right).is(rcx));
   1461 
   1462     switch (instr->op()) {
   1463       case Token::ROR:
   1464         __ rorl_cl(ToRegister(left));
   1465         break;
   1466       case Token::SAR:
   1467         __ sarl_cl(ToRegister(left));
   1468         break;
   1469       case Token::SHR:
   1470         __ shrl_cl(ToRegister(left));
   1471         if (instr->can_deopt()) {
   1472           __ testl(ToRegister(left), ToRegister(left));
   1473           DeoptimizeIf(negative, instr->environment());
   1474         }
   1475         break;
   1476       case Token::SHL:
   1477         __ shll_cl(ToRegister(left));
   1478         break;
   1479       default:
   1480         UNREACHABLE();
   1481         break;
   1482     }
   1483   } else {
   1484     int32_t value = ToInteger32(LConstantOperand::cast(right));
   1485     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1486     switch (instr->op()) {
   1487       case Token::ROR:
   1488         if (shift_count != 0) {
   1489           __ rorl(ToRegister(left), Immediate(shift_count));
   1490         }
   1491         break;
   1492       case Token::SAR:
   1493         if (shift_count != 0) {
   1494           __ sarl(ToRegister(left), Immediate(shift_count));
   1495         }
   1496         break;
   1497       case Token::SHR:
   1498         if (shift_count == 0 && instr->can_deopt()) {
   1499           __ testl(ToRegister(left), ToRegister(left));
   1500           DeoptimizeIf(negative, instr->environment());
   1501         } else {
   1502           __ shrl(ToRegister(left), Immediate(shift_count));
   1503         }
   1504         break;
   1505       case Token::SHL:
   1506         if (shift_count != 0) {
   1507           if (instr->hydrogen_value()->representation().IsSmi()) {
   1508             __ shl(ToRegister(left), Immediate(shift_count));
   1509           } else {
   1510             __ shll(ToRegister(left), Immediate(shift_count));
   1511           }
   1512         }
   1513         break;
   1514       default:
   1515         UNREACHABLE();
   1516         break;
   1517     }
   1518   }
   1519 }
   1520 
   1521 
   1522 void LCodeGen::DoSubI(LSubI* instr) {
   1523   LOperand* left = instr->left();
   1524   LOperand* right = instr->right();
   1525   ASSERT(left->Equals(instr->result()));
   1526 
   1527   if (right->IsConstantOperand()) {
   1528     __ subl(ToRegister(left),
   1529             Immediate(ToInteger32(LConstantOperand::cast(right))));
   1530   } else if (right->IsRegister()) {
   1531     if (instr->hydrogen_value()->representation().IsSmi()) {
   1532       __ subq(ToRegister(left), ToRegister(right));
   1533     } else {
   1534       __ subl(ToRegister(left), ToRegister(right));
   1535     }
   1536   } else {
   1537     if (instr->hydrogen_value()->representation().IsSmi()) {
   1538       __ subq(ToRegister(left), ToOperand(right));
   1539     } else {
   1540       __ subl(ToRegister(left), ToOperand(right));
   1541     }
   1542   }
   1543 
   1544   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1545     DeoptimizeIf(overflow, instr->environment());
   1546   }
   1547 }
   1548 
   1549 
   1550 void LCodeGen::DoConstantI(LConstantI* instr) {
   1551   __ Set(ToRegister(instr->result()), instr->value());
   1552 }
   1553 
   1554 
   1555 void LCodeGen::DoConstantS(LConstantS* instr) {
   1556   __ Move(ToRegister(instr->result()), instr->value());
   1557 }
   1558 
   1559 
   1560 void LCodeGen::DoConstantD(LConstantD* instr) {
   1561   ASSERT(instr->result()->IsDoubleRegister());
   1562   XMMRegister res = ToDoubleRegister(instr->result());
   1563   double v = instr->value();
   1564   uint64_t int_val = BitCast<uint64_t, double>(v);
   1565   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   1566   // do so if the constant is -0.0.
   1567   if (int_val == 0) {
   1568     __ xorps(res, res);
   1569   } else {
   1570     Register tmp = ToRegister(instr->temp());
   1571     __ Set(tmp, int_val);
   1572     __ movq(res, tmp);
   1573   }
   1574 }
   1575 
   1576 
   1577 void LCodeGen::DoConstantE(LConstantE* instr) {
   1578   __ LoadAddress(ToRegister(instr->result()), instr->value());
   1579 }
   1580 
   1581 
   1582 void LCodeGen::DoConstantT(LConstantT* instr) {
   1583   Handle<Object> value = instr->value();
   1584   AllowDeferredHandleDereference smi_check;
   1585   __ LoadObject(ToRegister(instr->result()), value);
   1586 }
   1587 
   1588 
   1589 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1590   Register result = ToRegister(instr->result());
   1591   Register map = ToRegister(instr->value());
   1592   __ EnumLength(result, map);
   1593 }
   1594 
   1595 
   1596 void LCodeGen::DoElementsKind(LElementsKind* instr) {
   1597   Register result = ToRegister(instr->result());
   1598   Register input = ToRegister(instr->value());
   1599 
   1600   // Load map into |result|.
   1601   __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
   1602   // Load the map's "bit field 2" into |result|. We only need the first byte.
   1603   __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
   1604   // Retrieve elements_kind from bit field 2.
   1605   __ and_(result, Immediate(Map::kElementsKindMask));
   1606   __ shr(result, Immediate(Map::kElementsKindShift));
   1607 }
   1608 
   1609 
   1610 void LCodeGen::DoValueOf(LValueOf* instr) {
   1611   Register input = ToRegister(instr->value());
   1612   Register result = ToRegister(instr->result());
   1613   ASSERT(input.is(result));
   1614   Label done;
   1615 
   1616   if (!instr->hydrogen()->value()->IsHeapObject()) {
   1617     // If the object is a smi return the object.
   1618     __ JumpIfSmi(input, &done, Label::kNear);
   1619   }
   1620 
   1621   // If the object is not a value type, return the object.
   1622   __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
   1623   __ j(not_equal, &done, Label::kNear);
   1624   __ movq(result, FieldOperand(input, JSValue::kValueOffset));
   1625 
   1626   __ bind(&done);
   1627 }
   1628 
   1629 
   1630 void LCodeGen::DoDateField(LDateField* instr) {
   1631   Register object = ToRegister(instr->date());
   1632   Register result = ToRegister(instr->result());
   1633   Smi* index = instr->index();
   1634   Label runtime, done, not_date_object;
   1635   ASSERT(object.is(result));
   1636   ASSERT(object.is(rax));
   1637 
   1638   Condition cc = masm()->CheckSmi(object);
   1639   DeoptimizeIf(cc, instr->environment());
   1640   __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
   1641   DeoptimizeIf(not_equal, instr->environment());
   1642 
   1643   if (index->value() == 0) {
   1644     __ movq(result, FieldOperand(object, JSDate::kValueOffset));
   1645   } else {
   1646     if (index->value() < JSDate::kFirstUncachedField) {
   1647       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
   1648       Operand stamp_operand = __ ExternalOperand(stamp);
   1649       __ movq(kScratchRegister, stamp_operand);
   1650       __ cmpq(kScratchRegister, FieldOperand(object,
   1651                                              JSDate::kCacheStampOffset));
   1652       __ j(not_equal, &runtime, Label::kNear);
   1653       __ movq(result, FieldOperand(object, JSDate::kValueOffset +
   1654                                            kPointerSize * index->value()));
   1655       __ jmp(&done);
   1656     }
   1657     __ bind(&runtime);
   1658     __ PrepareCallCFunction(2);
   1659     __ movq(arg_reg_1, object);
   1660     __ movq(arg_reg_2, index, RelocInfo::NONE64);
   1661     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
   1662     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   1663     __ bind(&done);
   1664   }
   1665 }
   1666 
   1667 
   1668 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1669   Register string = ToRegister(instr->string());
   1670   Register index = ToRegister(instr->index());
   1671   Register value = ToRegister(instr->value());
   1672   String::Encoding encoding = instr->encoding();
   1673 
   1674   if (FLAG_debug_code) {
   1675     __ push(value);
   1676     __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
   1677     __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
   1678 
   1679     __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
   1680     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1681     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1682     __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
   1683                                  ? one_byte_seq_type : two_byte_seq_type));
   1684     __ Check(equal, kUnexpectedStringType);
   1685     __ pop(value);
   1686   }
   1687 
   1688   if (encoding == String::ONE_BYTE_ENCODING) {
   1689     __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
   1690             value);
   1691   } else {
   1692     __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
   1693             value);
   1694   }
   1695 }
   1696 
   1697 
   1698 void LCodeGen::DoThrow(LThrow* instr) {
   1699   __ push(ToRegister(instr->value()));
   1700   CallRuntime(Runtime::kThrow, 1, instr);
   1701 
   1702   if (FLAG_debug_code) {
   1703     Comment("Unreachable code.");
   1704     __ int3();
   1705   }
   1706 }
   1707 
   1708 
   1709 void LCodeGen::DoAddI(LAddI* instr) {
   1710   LOperand* left = instr->left();
   1711   LOperand* right = instr->right();
   1712 
   1713   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
   1714     if (right->IsConstantOperand()) {
   1715       int32_t offset = ToInteger32(LConstantOperand::cast(right));
   1716       __ leal(ToRegister(instr->result()),
   1717               MemOperand(ToRegister(left), offset));
   1718     } else {
   1719       Operand address(ToRegister(left), ToRegister(right), times_1, 0);
   1720       if (instr->hydrogen()->representation().IsSmi()) {
   1721         __ lea(ToRegister(instr->result()), address);
   1722       } else {
   1723         __ leal(ToRegister(instr->result()), address);
   1724       }
   1725     }
   1726   } else {
   1727     if (right->IsConstantOperand()) {
   1728       __ addl(ToRegister(left),
   1729               Immediate(ToInteger32(LConstantOperand::cast(right))));
   1730     } else if (right->IsRegister()) {
   1731       if (instr->hydrogen_value()->representation().IsSmi()) {
   1732         __ addq(ToRegister(left), ToRegister(right));
   1733       } else {
   1734         __ addl(ToRegister(left), ToRegister(right));
   1735       }
   1736     } else {
   1737       if (instr->hydrogen_value()->representation().IsSmi()) {
   1738         __ addq(ToRegister(left), ToOperand(right));
   1739       } else {
   1740         __ addl(ToRegister(left), ToOperand(right));
   1741       }
   1742     }
   1743     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1744       DeoptimizeIf(overflow, instr->environment());
   1745     }
   1746   }
   1747 }
   1748 
   1749 
   1750 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1751   LOperand* left = instr->left();
   1752   LOperand* right = instr->right();
   1753   ASSERT(left->Equals(instr->result()));
   1754   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1755   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1756     Label return_left;
   1757     Condition condition = (operation == HMathMinMax::kMathMin)
   1758         ? less_equal
   1759         : greater_equal;
   1760     Register left_reg = ToRegister(left);
   1761     if (right->IsConstantOperand()) {
   1762       Immediate right_imm =
   1763           Immediate(ToInteger32(LConstantOperand::cast(right)));
   1764       ASSERT(!instr->hydrogen_value()->representation().IsSmi());
   1765       __ cmpl(left_reg, right_imm);
   1766       __ j(condition, &return_left, Label::kNear);
   1767       __ movq(left_reg, right_imm);
   1768     } else if (right->IsRegister()) {
   1769       Register right_reg = ToRegister(right);
   1770       if (instr->hydrogen_value()->representation().IsSmi()) {
   1771         __ cmpq(left_reg, right_reg);
   1772       } else {
   1773         __ cmpl(left_reg, right_reg);
   1774       }
   1775       __ j(condition, &return_left, Label::kNear);
   1776       __ movq(left_reg, right_reg);
   1777     } else {
   1778       Operand right_op = ToOperand(right);
   1779       if (instr->hydrogen_value()->representation().IsSmi()) {
   1780         __ cmpq(left_reg, right_op);
   1781       } else {
   1782         __ cmpl(left_reg, right_op);
   1783       }
   1784       __ j(condition, &return_left, Label::kNear);
   1785       __ movq(left_reg, right_op);
   1786     }
   1787     __ bind(&return_left);
   1788   } else {
   1789     ASSERT(instr->hydrogen()->representation().IsDouble());
   1790     Label check_nan_left, check_zero, return_left, return_right;
   1791     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
   1792     XMMRegister left_reg = ToDoubleRegister(left);
   1793     XMMRegister right_reg = ToDoubleRegister(right);
   1794     __ ucomisd(left_reg, right_reg);
   1795     __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
   1796     __ j(equal, &check_zero, Label::kNear);  // left == right.
   1797     __ j(condition, &return_left, Label::kNear);
   1798     __ jmp(&return_right, Label::kNear);
   1799 
   1800     __ bind(&check_zero);
   1801     XMMRegister xmm_scratch = xmm0;
   1802     __ xorps(xmm_scratch, xmm_scratch);
   1803     __ ucomisd(left_reg, xmm_scratch);
   1804     __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
   1805     // At this point, both left and right are either 0 or -0.
   1806     if (operation == HMathMinMax::kMathMin) {
   1807       __ orpd(left_reg, right_reg);
   1808     } else {
   1809       // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
   1810       __ addsd(left_reg, right_reg);
   1811     }
   1812     __ jmp(&return_left, Label::kNear);
   1813 
   1814     __ bind(&check_nan_left);
   1815     __ ucomisd(left_reg, left_reg);  // NaN check.
   1816     __ j(parity_even, &return_left, Label::kNear);
   1817     __ bind(&return_right);
   1818     __ movsd(left_reg, right_reg);
   1819 
   1820     __ bind(&return_left);
   1821   }
   1822 }
   1823 
   1824 
   1825 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1826   XMMRegister left = ToDoubleRegister(instr->left());
   1827   XMMRegister right = ToDoubleRegister(instr->right());
   1828   XMMRegister result = ToDoubleRegister(instr->result());
   1829   // All operations except MOD are computed in-place.
   1830   ASSERT(instr->op() == Token::MOD || left.is(result));
   1831   switch (instr->op()) {
   1832     case Token::ADD:
   1833       __ addsd(left, right);
   1834       break;
   1835     case Token::SUB:
   1836        __ subsd(left, right);
   1837        break;
   1838     case Token::MUL:
   1839       __ mulsd(left, right);
   1840       break;
   1841     case Token::DIV:
   1842       __ divsd(left, right);
   1843       // Don't delete this mov. It may improve performance on some CPUs,
   1844       // when there is a mulsd depending on the result
   1845       __ movaps(left, left);
   1846       break;
   1847     case Token::MOD:
   1848       __ PrepareCallCFunction(2);
   1849       __ movaps(xmm0, left);
   1850       ASSERT(right.is(xmm1));
   1851       __ CallCFunction(
   1852           ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
   1853       __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   1854       __ movaps(result, xmm0);
   1855       break;
   1856     default:
   1857       UNREACHABLE();
   1858       break;
   1859   }
   1860 }
   1861 
   1862 
   1863 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1864   ASSERT(ToRegister(instr->left()).is(rdx));
   1865   ASSERT(ToRegister(instr->right()).is(rax));
   1866   ASSERT(ToRegister(instr->result()).is(rax));
   1867 
   1868   BinaryOpStub stub(instr->op(), NO_OVERWRITE);
   1869   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1870   __ nop();  // Signals no inlined code.
   1871 }
   1872 
   1873 
   1874 int LCodeGen::GetNextEmittedBlock() const {
   1875   for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
   1876     if (!chunk_->GetLabel(i)->HasReplacement()) return i;
   1877   }
   1878   return -1;
   1879 }
   1880 
   1881 
   1882 template<class InstrType>
   1883 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
   1884   int left_block = instr->TrueDestination(chunk_);
   1885   int right_block = instr->FalseDestination(chunk_);
   1886 
   1887   int next_block = GetNextEmittedBlock();
   1888 
   1889   if (right_block == left_block || cc == no_condition) {
   1890     EmitGoto(left_block);
   1891   } else if (left_block == next_block) {
   1892     __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
   1893   } else if (right_block == next_block) {
   1894     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   1895   } else {
   1896     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   1897     if (cc != always) {
   1898       __ jmp(chunk_->GetAssemblyLabel(right_block));
   1899     }
   1900   }
   1901 }
   1902 
   1903 
   1904 template<class InstrType>
   1905 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
   1906   int false_block = instr->FalseDestination(chunk_);
   1907   __ j(cc, chunk_->GetAssemblyLabel(false_block));
   1908 }
   1909 
   1910 
   1911 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   1912   __ int3();
   1913 }
   1914 
   1915 
   1916 void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
   1917   Representation r = instr->hydrogen()->value()->representation();
   1918   if (r.IsSmiOrInteger32() || r.IsDouble()) {
   1919     EmitBranch(instr, no_condition);
   1920   } else {
   1921     ASSERT(r.IsTagged());
   1922     Register reg = ToRegister(instr->value());
   1923     HType type = instr->hydrogen()->value()->type();
   1924     if (type.IsTaggedNumber()) {
   1925       EmitBranch(instr, no_condition);
   1926     }
   1927     __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   1928     __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
   1929                    Heap::kHeapNumberMapRootIndex);
   1930     EmitBranch(instr, equal);
   1931   }
   1932 }
   1933 
   1934 
   1935 void LCodeGen::DoBranch(LBranch* instr) {
   1936   Representation r = instr->hydrogen()->value()->representation();
   1937   if (r.IsInteger32()) {
   1938     ASSERT(!info()->IsStub());
   1939     Register reg = ToRegister(instr->value());
   1940     __ testl(reg, reg);
   1941     EmitBranch(instr, not_zero);
   1942   } else if (r.IsSmi()) {
   1943     ASSERT(!info()->IsStub());
   1944     Register reg = ToRegister(instr->value());
   1945     __ testq(reg, reg);
   1946     EmitBranch(instr, not_zero);
   1947   } else if (r.IsDouble()) {
   1948     ASSERT(!info()->IsStub());
   1949     XMMRegister reg = ToDoubleRegister(instr->value());
   1950     __ xorps(xmm0, xmm0);
   1951     __ ucomisd(reg, xmm0);
   1952     EmitBranch(instr, not_equal);
   1953   } else {
   1954     ASSERT(r.IsTagged());
   1955     Register reg = ToRegister(instr->value());
   1956     HType type = instr->hydrogen()->value()->type();
   1957     if (type.IsBoolean()) {
   1958       ASSERT(!info()->IsStub());
   1959       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   1960       EmitBranch(instr, equal);
   1961     } else if (type.IsSmi()) {
   1962       ASSERT(!info()->IsStub());
   1963       __ SmiCompare(reg, Smi::FromInt(0));
   1964       EmitBranch(instr, not_equal);
   1965     } else if (type.IsJSArray()) {
   1966       ASSERT(!info()->IsStub());
   1967       EmitBranch(instr, no_condition);
   1968     } else if (type.IsHeapNumber()) {
   1969       ASSERT(!info()->IsStub());
   1970       __ xorps(xmm0, xmm0);
   1971       __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
   1972       EmitBranch(instr, not_equal);
   1973     } else if (type.IsString()) {
   1974       ASSERT(!info()->IsStub());
   1975       __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   1976       EmitBranch(instr, not_equal);
   1977     } else {
   1978       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   1979       // Avoid deopts in the case where we've never executed this path before.
   1980       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   1981 
   1982       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   1983         // undefined -> false.
   1984         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
   1985         __ j(equal, instr->FalseLabel(chunk_));
   1986       }
   1987       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   1988         // true -> true.
   1989         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   1990         __ j(equal, instr->TrueLabel(chunk_));
   1991         // false -> false.
   1992         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
   1993         __ j(equal, instr->FalseLabel(chunk_));
   1994       }
   1995       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   1996         // 'null' -> false.
   1997         __ CompareRoot(reg, Heap::kNullValueRootIndex);
   1998         __ j(equal, instr->FalseLabel(chunk_));
   1999       }
   2000 
   2001       if (expected.Contains(ToBooleanStub::SMI)) {
   2002         // Smis: 0 -> false, all other -> true.
   2003         __ Cmp(reg, Smi::FromInt(0));
   2004         __ j(equal, instr->FalseLabel(chunk_));
   2005         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2006       } else if (expected.NeedsMap()) {
   2007         // If we need a map later and have a Smi -> deopt.
   2008         __ testb(reg, Immediate(kSmiTagMask));
   2009         DeoptimizeIf(zero, instr->environment());
   2010       }
   2011 
   2012       const Register map = kScratchRegister;
   2013       if (expected.NeedsMap()) {
   2014         __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
   2015 
   2016         if (expected.CanBeUndetectable()) {
   2017           // Undetectable -> false.
   2018           __ testb(FieldOperand(map, Map::kBitFieldOffset),
   2019                    Immediate(1 << Map::kIsUndetectable));
   2020           __ j(not_zero, instr->FalseLabel(chunk_));
   2021         }
   2022       }
   2023 
   2024       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2025         // spec object -> true.
   2026         __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
   2027         __ j(above_equal, instr->TrueLabel(chunk_));
   2028       }
   2029 
   2030       if (expected.Contains(ToBooleanStub::STRING)) {
   2031         // String value -> false iff empty.
   2032         Label not_string;
   2033         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
   2034         __ j(above_equal, &not_string, Label::kNear);
   2035         __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   2036         __ j(not_zero, instr->TrueLabel(chunk_));
   2037         __ jmp(instr->FalseLabel(chunk_));
   2038         __ bind(&not_string);
   2039       }
   2040 
   2041       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2042         // Symbol value -> true.
   2043         __ CmpInstanceType(map, SYMBOL_TYPE);
   2044         __ j(equal, instr->TrueLabel(chunk_));
   2045       }
   2046 
   2047       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2048         // heap number -> false iff +0, -0, or NaN.
   2049         Label not_heap_number;
   2050         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   2051         __ j(not_equal, &not_heap_number, Label::kNear);
   2052         __ xorps(xmm0, xmm0);
   2053         __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
   2054         __ j(zero, instr->FalseLabel(chunk_));
   2055         __ jmp(instr->TrueLabel(chunk_));
   2056         __ bind(&not_heap_number);
   2057       }
   2058 
   2059       if (!expected.IsGeneric()) {
   2060         // We've seen something for the first time -> deopt.
   2061         // This can only happen if we are not generic already.
   2062         DeoptimizeIf(no_condition, instr->environment());
   2063       }
   2064     }
   2065   }
   2066 }
   2067 
   2068 
   2069 void LCodeGen::EmitGoto(int block) {
   2070   if (!IsNextEmittedBlock(block)) {
   2071     __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
   2072   }
   2073 }
   2074 
   2075 
   2076 void LCodeGen::DoGoto(LGoto* instr) {
   2077   EmitGoto(instr->block_id());
   2078 }
   2079 
   2080 
   2081 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2082   Condition cond = no_condition;
   2083   switch (op) {
   2084     case Token::EQ:
   2085     case Token::EQ_STRICT:
   2086       cond = equal;
   2087       break;
   2088     case Token::LT:
   2089       cond = is_unsigned ? below : less;
   2090       break;
   2091     case Token::GT:
   2092       cond = is_unsigned ? above : greater;
   2093       break;
   2094     case Token::LTE:
   2095       cond = is_unsigned ? below_equal : less_equal;
   2096       break;
   2097     case Token::GTE:
   2098       cond = is_unsigned ? above_equal : greater_equal;
   2099       break;
   2100     case Token::IN:
   2101     case Token::INSTANCEOF:
   2102     default:
   2103       UNREACHABLE();
   2104   }
   2105   return cond;
   2106 }
   2107 
   2108 
   2109 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2110   LOperand* left = instr->left();
   2111   LOperand* right = instr->right();
   2112   Condition cc = TokenToCondition(instr->op(), instr->is_double());
   2113 
   2114   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2115     // We can statically evaluate the comparison.
   2116     double left_val = ToDouble(LConstantOperand::cast(left));
   2117     double right_val = ToDouble(LConstantOperand::cast(right));
   2118     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2119         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2120     EmitGoto(next_block);
   2121   } else {
   2122     if (instr->is_double()) {
   2123       // Don't base result on EFLAGS when a NaN is involved. Instead
   2124       // jump to the false block.
   2125       __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
   2126       __ j(parity_even, instr->FalseLabel(chunk_));
   2127     } else {
   2128       int32_t value;
   2129       if (right->IsConstantOperand()) {
   2130         value = ToInteger32(LConstantOperand::cast(right));
   2131         if (instr->hydrogen_value()->representation().IsSmi()) {
   2132           __ Cmp(ToRegister(left), Smi::FromInt(value));
   2133         } else {
   2134           __ cmpl(ToRegister(left), Immediate(value));
   2135         }
   2136       } else if (left->IsConstantOperand()) {
   2137         value = ToInteger32(LConstantOperand::cast(left));
   2138         if (instr->hydrogen_value()->representation().IsSmi()) {
   2139           if (right->IsRegister()) {
   2140             __ Cmp(ToRegister(right), Smi::FromInt(value));
   2141           } else {
   2142             __ Cmp(ToOperand(right), Smi::FromInt(value));
   2143           }
   2144         } else if (right->IsRegister()) {
   2145           __ cmpl(ToRegister(right), Immediate(value));
   2146         } else {
   2147           __ cmpl(ToOperand(right), Immediate(value));
   2148         }
   2149         // We transposed the operands. Reverse the condition.
   2150         cc = ReverseCondition(cc);
   2151       } else if (instr->hydrogen_value()->representation().IsSmi()) {
   2152         if (right->IsRegister()) {
   2153           __ cmpq(ToRegister(left), ToRegister(right));
   2154         } else {
   2155           __ cmpq(ToRegister(left), ToOperand(right));
   2156         }
   2157       } else {
   2158         if (right->IsRegister()) {
   2159           __ cmpl(ToRegister(left), ToRegister(right));
   2160         } else {
   2161           __ cmpl(ToRegister(left), ToOperand(right));
   2162         }
   2163       }
   2164     }
   2165     EmitBranch(instr, cc);
   2166   }
   2167 }
   2168 
   2169 
   2170 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2171   Register left = ToRegister(instr->left());
   2172 
   2173   if (instr->right()->IsConstantOperand()) {
   2174     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
   2175     __ CmpObject(left, right);
   2176   } else {
   2177     Register right = ToRegister(instr->right());
   2178     __ cmpq(left, right);
   2179   }
   2180   EmitBranch(instr, equal);
   2181 }
   2182 
   2183 
   2184 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2185   if (instr->hydrogen()->representation().IsTagged()) {
   2186     Register input_reg = ToRegister(instr->object());
   2187     __ Cmp(input_reg, factory()->the_hole_value());
   2188     EmitBranch(instr, equal);
   2189     return;
   2190   }
   2191 
   2192   XMMRegister input_reg = ToDoubleRegister(instr->object());
   2193   __ ucomisd(input_reg, input_reg);
   2194   EmitFalseBranch(instr, parity_odd);
   2195 
   2196   __ subq(rsp, Immediate(kDoubleSize));
   2197   __ movsd(MemOperand(rsp, 0), input_reg);
   2198   __ addq(rsp, Immediate(kDoubleSize));
   2199 
   2200   int offset = sizeof(kHoleNanUpper32);
   2201   __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
   2202   EmitBranch(instr, equal);
   2203 }
   2204 
   2205 
   2206 Condition LCodeGen::EmitIsObject(Register input,
   2207                                  Label* is_not_object,
   2208                                  Label* is_object) {
   2209   ASSERT(!input.is(kScratchRegister));
   2210 
   2211   __ JumpIfSmi(input, is_not_object);
   2212 
   2213   __ CompareRoot(input, Heap::kNullValueRootIndex);
   2214   __ j(equal, is_object);
   2215 
   2216   __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
   2217   // Undetectable objects behave like undefined.
   2218   __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
   2219            Immediate(1 << Map::kIsUndetectable));
   2220   __ j(not_zero, is_not_object);
   2221 
   2222   __ movzxbl(kScratchRegister,
   2223              FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
   2224   __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2225   __ j(below, is_not_object);
   2226   __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2227   return below_equal;
   2228 }
   2229 
   2230 
   2231 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   2232   Register reg = ToRegister(instr->value());
   2233 
   2234   Condition true_cond = EmitIsObject(
   2235       reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
   2236 
   2237   EmitBranch(instr, true_cond);
   2238 }
   2239 
   2240 
   2241 Condition LCodeGen::EmitIsString(Register input,
   2242                                  Register temp1,
   2243                                  Label* is_not_string,
   2244                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2245   if (check_needed == INLINE_SMI_CHECK) {
   2246     __ JumpIfSmi(input, is_not_string);
   2247   }
   2248 
   2249   Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
   2250 
   2251   return cond;
   2252 }
   2253 
   2254 
   2255 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2256   Register reg = ToRegister(instr->value());
   2257   Register temp = ToRegister(instr->temp());
   2258 
   2259   SmiCheck check_needed =
   2260       instr->hydrogen()->value()->IsHeapObject()
   2261           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2262 
   2263   Condition true_cond = EmitIsString(
   2264       reg, temp, instr->FalseLabel(chunk_), check_needed);
   2265 
   2266   EmitBranch(instr, true_cond);
   2267 }
   2268 
   2269 
   2270 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2271   Condition is_smi;
   2272   if (instr->value()->IsRegister()) {
   2273     Register input = ToRegister(instr->value());
   2274     is_smi = masm()->CheckSmi(input);
   2275   } else {
   2276     Operand input = ToOperand(instr->value());
   2277     is_smi = masm()->CheckSmi(input);
   2278   }
   2279   EmitBranch(instr, is_smi);
   2280 }
   2281 
   2282 
   2283 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2284   Register input = ToRegister(instr->value());
   2285   Register temp = ToRegister(instr->temp());
   2286 
   2287   if (!instr->hydrogen()->value()->IsHeapObject()) {
   2288     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2289   }
   2290   __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
   2291   __ testb(FieldOperand(temp, Map::kBitFieldOffset),
   2292            Immediate(1 << Map::kIsUndetectable));
   2293   EmitBranch(instr, not_zero);
   2294 }
   2295 
   2296 
   2297 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2298   Token::Value op = instr->op();
   2299 
   2300   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2301   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2302 
   2303   Condition condition = TokenToCondition(op, false);
   2304   __ testq(rax, rax);
   2305 
   2306   EmitBranch(instr, condition);
   2307 }
   2308 
   2309 
   2310 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2311   InstanceType from = instr->from();
   2312   InstanceType to = instr->to();
   2313   if (from == FIRST_TYPE) return to;
   2314   ASSERT(from == to || to == LAST_TYPE);
   2315   return from;
   2316 }
   2317 
   2318 
   2319 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2320   InstanceType from = instr->from();
   2321   InstanceType to = instr->to();
   2322   if (from == to) return equal;
   2323   if (to == LAST_TYPE) return above_equal;
   2324   if (from == FIRST_TYPE) return below_equal;
   2325   UNREACHABLE();
   2326   return equal;
   2327 }
   2328 
   2329 
   2330 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2331   Register input = ToRegister(instr->value());
   2332 
   2333   if (!instr->hydrogen()->value()->IsHeapObject()) {
   2334     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2335   }
   2336 
   2337   __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
   2338   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2339 }
   2340 
   2341 
   2342 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2343   Register input = ToRegister(instr->value());
   2344   Register result = ToRegister(instr->result());
   2345 
   2346   __ AssertString(input);
   2347 
   2348   __ movl(result, FieldOperand(input, String::kHashFieldOffset));
   2349   ASSERT(String::kHashShift >= kSmiTagSize);
   2350   __ IndexFromHash(result, result);
   2351 }
   2352 
   2353 
   2354 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2355     LHasCachedArrayIndexAndBranch* instr) {
   2356   Register input = ToRegister(instr->value());
   2357 
   2358   __ testl(FieldOperand(input, String::kHashFieldOffset),
   2359            Immediate(String::kContainsCachedArrayIndexMask));
   2360   EmitBranch(instr, equal);
   2361 }
   2362 
   2363 
   2364 // Branches to a label or falls through with the answer in the z flag.
   2365 // Trashes the temp register.
   2366 void LCodeGen::EmitClassOfTest(Label* is_true,
   2367                                Label* is_false,
   2368                                Handle<String> class_name,
   2369                                Register input,
   2370                                Register temp,
   2371                                Register temp2) {
   2372   ASSERT(!input.is(temp));
   2373   ASSERT(!input.is(temp2));
   2374   ASSERT(!temp.is(temp2));
   2375 
   2376   __ JumpIfSmi(input, is_false);
   2377 
   2378   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
   2379     // Assuming the following assertions, we can use the same compares to test
   2380     // for both being a function type and being in the object type range.
   2381     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   2382     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2383                   FIRST_SPEC_OBJECT_TYPE + 1);
   2384     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2385                   LAST_SPEC_OBJECT_TYPE - 1);
   2386     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
   2387     __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
   2388     __ j(below, is_false);
   2389     __ j(equal, is_true);
   2390     __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
   2391     __ j(equal, is_true);
   2392   } else {
   2393     // Faster code path to avoid two compares: subtract lower bound from the
   2394     // actual type and do a signed compare with the width of the type range.
   2395     __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
   2396     __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
   2397     __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2398     __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
   2399                              FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2400     __ j(above, is_false);
   2401   }
   2402 
   2403   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   2404   // Check if the constructor in the map is a function.
   2405   __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
   2406 
   2407   // Objects with a non-function constructor have class 'Object'.
   2408   __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
   2409   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
   2410     __ j(not_equal, is_true);
   2411   } else {
   2412     __ j(not_equal, is_false);
   2413   }
   2414 
   2415   // temp now contains the constructor function. Grab the
   2416   // instance class name from there.
   2417   __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2418   __ movq(temp, FieldOperand(temp,
   2419                              SharedFunctionInfo::kInstanceClassNameOffset));
   2420   // The class name we are testing against is internalized since it's a literal.
   2421   // The name in the constructor is internalized because of the way the context
   2422   // is booted.  This routine isn't expected to work for random API-created
   2423   // classes and it doesn't have to because you can't access it with natives
   2424   // syntax.  Since both sides are internalized it is sufficient to use an
   2425   // identity comparison.
   2426   ASSERT(class_name->IsInternalizedString());
   2427   __ Cmp(temp, class_name);
   2428   // End with the answer in the z flag.
   2429 }
   2430 
   2431 
   2432 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2433   Register input = ToRegister(instr->value());
   2434   Register temp = ToRegister(instr->temp());
   2435   Register temp2 = ToRegister(instr->temp2());
   2436   Handle<String> class_name = instr->hydrogen()->class_name();
   2437 
   2438   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2439       class_name, input, temp, temp2);
   2440 
   2441   EmitBranch(instr, equal);
   2442 }
   2443 
   2444 
   2445 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2446   Register reg = ToRegister(instr->value());
   2447 
   2448   __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
   2449   EmitBranch(instr, equal);
   2450 }
   2451 
   2452 
   2453 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2454   InstanceofStub stub(InstanceofStub::kNoFlags);
   2455   __ push(ToRegister(instr->left()));
   2456   __ push(ToRegister(instr->right()));
   2457   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   2458   Label true_value, done;
   2459   __ testq(rax, rax);
   2460   __ j(zero, &true_value, Label::kNear);
   2461   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2462   __ jmp(&done, Label::kNear);
   2463   __ bind(&true_value);
   2464   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2465   __ bind(&done);
   2466 }
   2467 
   2468 
   2469 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   2470   class DeferredInstanceOfKnownGlobal: public LDeferredCode {
   2471    public:
   2472     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
   2473                                   LInstanceOfKnownGlobal* instr)
   2474         : LDeferredCode(codegen), instr_(instr) { }
   2475     virtual void Generate() {
   2476       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
   2477     }
   2478     virtual LInstruction* instr() { return instr_; }
   2479     Label* map_check() { return &map_check_; }
   2480    private:
   2481     LInstanceOfKnownGlobal* instr_;
   2482     Label map_check_;
   2483   };
   2484 
   2485 
   2486   DeferredInstanceOfKnownGlobal* deferred;
   2487   deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
   2488 
   2489   Label done, false_result;
   2490   Register object = ToRegister(instr->value());
   2491 
   2492   // A Smi is not an instance of anything.
   2493   __ JumpIfSmi(object, &false_result);
   2494 
   2495   // This is the inlined call site instanceof cache. The two occurences of the
   2496   // hole value will be patched to the last map/result pair generated by the
   2497   // instanceof stub.
   2498   Label cache_miss;
   2499   // Use a temp register to avoid memory operands with variable lengths.
   2500   Register map = ToRegister(instr->temp());
   2501   __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
   2502   __ bind(deferred->map_check());  // Label for calculating code patching.
   2503   Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
   2504   __ movq(kScratchRegister, cache_cell, RelocInfo::CELL);
   2505   __ cmpq(map, Operand(kScratchRegister, 0));
   2506   __ j(not_equal, &cache_miss, Label::kNear);
   2507   // Patched to load either true or false.
   2508   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
   2509 #ifdef DEBUG
   2510   // Check that the code size between patch label and patch sites is invariant.
   2511   Label end_of_patched_code;
   2512   __ bind(&end_of_patched_code);
   2513   ASSERT(true);
   2514 #endif
   2515   __ jmp(&done);
   2516 
   2517   // The inlined call site cache did not match. Check for null and string
   2518   // before calling the deferred code.
   2519   __ bind(&cache_miss);  // Null is not an instance of anything.
   2520   __ CompareRoot(object, Heap::kNullValueRootIndex);
   2521   __ j(equal, &false_result, Label::kNear);
   2522 
   2523   // String values are not instances of anything.
   2524   __ JumpIfNotString(object, kScratchRegister, deferred->entry());
   2525 
   2526   __ bind(&false_result);
   2527   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2528 
   2529   __ bind(deferred->exit());
   2530   __ bind(&done);
   2531 }
   2532 
   2533 
   2534 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
   2535                                                Label* map_check) {
   2536   {
   2537     PushSafepointRegistersScope scope(this);
   2538     InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
   2539         InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
   2540     InstanceofStub stub(flags);
   2541 
   2542     __ push(ToRegister(instr->value()));
   2543     __ PushHeapObject(instr->function());
   2544 
   2545     static const int kAdditionalDelta = 10;
   2546     int delta =
   2547         masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   2548     ASSERT(delta >= 0);
   2549     __ push_imm32(delta);
   2550 
   2551     // We are pushing three values on the stack but recording a
   2552     // safepoint with two arguments because stub is going to
   2553     // remove the third argument from the stack before jumping
   2554     // to instanceof builtin on the slow path.
   2555     CallCodeGeneric(stub.GetCode(isolate()),
   2556                     RelocInfo::CODE_TARGET,
   2557                     instr,
   2558                     RECORD_SAFEPOINT_WITH_REGISTERS,
   2559                     2);
   2560     ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
   2561     LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   2562     safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2563     // Move result to a register that survives the end of the
   2564     // PushSafepointRegisterScope.
   2565     __ movq(kScratchRegister, rax);
   2566   }
   2567   __ testq(kScratchRegister, kScratchRegister);
   2568   Label load_false;
   2569   Label done;
   2570   __ j(not_zero, &load_false);
   2571   __ LoadRoot(rax, Heap::kTrueValueRootIndex);
   2572   __ jmp(&done);
   2573   __ bind(&load_false);
   2574   __ LoadRoot(rax, Heap::kFalseValueRootIndex);
   2575   __ bind(&done);
   2576 }
   2577 
   2578 
   2579 void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
   2580   Register object = ToRegister(instr->object());
   2581   Register result = ToRegister(instr->result());
   2582   __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
   2583   __ movzxbq(result, FieldOperand(result, Map::kInstanceSizeOffset));
   2584 }
   2585 
   2586 
   2587 void LCodeGen::DoCmpT(LCmpT* instr) {
   2588   Token::Value op = instr->op();
   2589 
   2590   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2591   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2592 
   2593   Condition condition = TokenToCondition(op, false);
   2594   Label true_value, done;
   2595   __ testq(rax, rax);
   2596   __ j(condition, &true_value, Label::kNear);
   2597   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2598   __ jmp(&done, Label::kNear);
   2599   __ bind(&true_value);
   2600   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2601   __ bind(&done);
   2602 }
   2603 
   2604 
   2605 void LCodeGen::DoReturn(LReturn* instr) {
   2606   if (FLAG_trace && info()->IsOptimizing()) {
   2607     // Preserve the return value on the stack and rely on the runtime
   2608     // call to return the value in the same register.
   2609     __ push(rax);
   2610     __ CallRuntime(Runtime::kTraceExit, 1);
   2611   }
   2612   if (info()->saves_caller_doubles()) {
   2613     ASSERT(NeedsEagerFrame());
   2614     BitVector* doubles = chunk()->allocated_double_registers();
   2615     BitVector::Iterator save_iterator(doubles);
   2616     int count = 0;
   2617     while (!save_iterator.Done()) {
   2618       __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
   2619                MemOperand(rsp, count * kDoubleSize));
   2620       save_iterator.Advance();
   2621       count++;
   2622     }
   2623   }
   2624   int no_frame_start = -1;
   2625   if (NeedsEagerFrame()) {
   2626     __ movq(rsp, rbp);
   2627     __ pop(rbp);
   2628     no_frame_start = masm_->pc_offset();
   2629   }
   2630   if (instr->has_constant_parameter_count()) {
   2631     __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
   2632            rcx);
   2633   } else {
   2634     Register reg = ToRegister(instr->parameter_count());
   2635     // The argument count parameter is a smi
   2636     __ SmiToInteger32(reg, reg);
   2637     Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
   2638     __ PopReturnAddressTo(return_addr_reg);
   2639     __ shl(reg, Immediate(kPointerSizeLog2));
   2640     __ addq(rsp, reg);
   2641     __ jmp(return_addr_reg);
   2642   }
   2643   if (no_frame_start != -1) {
   2644     info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
   2645   }
   2646 }
   2647 
   2648 
   2649 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   2650   Register result = ToRegister(instr->result());
   2651   __ LoadGlobalCell(result, instr->hydrogen()->cell());
   2652   if (instr->hydrogen()->RequiresHoleCheck()) {
   2653     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2654     DeoptimizeIf(equal, instr->environment());
   2655   }
   2656 }
   2657 
   2658 
   2659 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   2660   ASSERT(ToRegister(instr->global_object()).is(rax));
   2661   ASSERT(ToRegister(instr->result()).is(rax));
   2662 
   2663   __ Move(rcx, instr->name());
   2664   RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
   2665                                                RelocInfo::CODE_TARGET_CONTEXT;
   2666   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   2667   CallCode(ic, mode, instr);
   2668 }
   2669 
   2670 
   2671 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   2672   Register value = ToRegister(instr->value());
   2673   Handle<Cell> cell_handle = instr->hydrogen()->cell();
   2674 
   2675   // If the cell we are storing to contains the hole it could have
   2676   // been deleted from the property dictionary. In that case, we need
   2677   // to update the property details in the property dictionary to mark
   2678   // it as no longer deleted. We deoptimize in that case.
   2679   if (instr->hydrogen()->RequiresHoleCheck()) {
   2680     // We have a temp because CompareRoot might clobber kScratchRegister.
   2681     Register cell = ToRegister(instr->temp());
   2682     ASSERT(!value.is(cell));
   2683     __ movq(cell, cell_handle, RelocInfo::CELL);
   2684     __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
   2685     DeoptimizeIf(equal, instr->environment());
   2686     // Store the value.
   2687     __ movq(Operand(cell, 0), value);
   2688   } else {
   2689     // Store the value.
   2690     __ movq(kScratchRegister, cell_handle, RelocInfo::CELL);
   2691     __ movq(Operand(kScratchRegister, 0), value);
   2692   }
   2693   // Cells are always rescanned, so no write barrier here.
   2694 }
   2695 
   2696 
   2697 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
   2698   ASSERT(ToRegister(instr->global_object()).is(rdx));
   2699   ASSERT(ToRegister(instr->value()).is(rax));
   2700 
   2701   __ Move(rcx, instr->name());
   2702   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   2703       ? isolate()->builtins()->StoreIC_Initialize_Strict()
   2704       : isolate()->builtins()->StoreIC_Initialize();
   2705   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
   2706 }
   2707 
   2708 
   2709 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2710   Register context = ToRegister(instr->context());
   2711   Register result = ToRegister(instr->result());
   2712   __ movq(result, ContextOperand(context, instr->slot_index()));
   2713   if (instr->hydrogen()->RequiresHoleCheck()) {
   2714     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2715     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2716       DeoptimizeIf(equal, instr->environment());
   2717     } else {
   2718       Label is_not_hole;
   2719       __ j(not_equal, &is_not_hole, Label::kNear);
   2720       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2721       __ bind(&is_not_hole);
   2722     }
   2723   }
   2724 }
   2725 
   2726 
   2727 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2728   Register context = ToRegister(instr->context());
   2729   Register value = ToRegister(instr->value());
   2730 
   2731   Operand target = ContextOperand(context, instr->slot_index());
   2732 
   2733   Label skip_assignment;
   2734   if (instr->hydrogen()->RequiresHoleCheck()) {
   2735     __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
   2736     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2737       DeoptimizeIf(equal, instr->environment());
   2738     } else {
   2739       __ j(not_equal, &skip_assignment);
   2740     }
   2741   }
   2742   __ movq(target, value);
   2743 
   2744   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2745     SmiCheck check_needed =
   2746       instr->hydrogen()->value()->IsHeapObject()
   2747           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2748     int offset = Context::SlotOffset(instr->slot_index());
   2749     Register scratch = ToRegister(instr->temp());
   2750     __ RecordWriteContextSlot(context,
   2751                               offset,
   2752                               value,
   2753                               scratch,
   2754                               kSaveFPRegs,
   2755                               EMIT_REMEMBERED_SET,
   2756                               check_needed);
   2757   }
   2758 
   2759   __ bind(&skip_assignment);
   2760 }
   2761 
   2762 
   2763 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2764   HObjectAccess access = instr->hydrogen()->access();
   2765   int offset = access.offset();
   2766 
   2767   if (access.IsExternalMemory()) {
   2768     Register result = ToRegister(instr->result());
   2769     if (instr->object()->IsConstantOperand()) {
   2770       ASSERT(result.is(rax));
   2771       __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
   2772     } else {
   2773       Register object = ToRegister(instr->object());
   2774       __ movq(result, MemOperand(object, offset));
   2775     }
   2776     return;
   2777   }
   2778 
   2779   Register object = ToRegister(instr->object());
   2780   if (FLAG_track_double_fields &&
   2781       instr->hydrogen()->representation().IsDouble()) {
   2782     XMMRegister result = ToDoubleRegister(instr->result());
   2783     __ movsd(result, FieldOperand(object, offset));
   2784     return;
   2785   }
   2786 
   2787   Register result = ToRegister(instr->result());
   2788   if (access.IsInobject()) {
   2789     __ movq(result, FieldOperand(object, offset));
   2790   } else {
   2791     __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
   2792     __ movq(result, FieldOperand(result, offset));
   2793   }
   2794 }
   2795 
   2796 
   2797 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   2798   ASSERT(ToRegister(instr->object()).is(rax));
   2799   ASSERT(ToRegister(instr->result()).is(rax));
   2800 
   2801   __ Move(rcx, instr->name());
   2802   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   2803   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2804 }
   2805 
   2806 
   2807 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2808   Register function = ToRegister(instr->function());
   2809   Register result = ToRegister(instr->result());
   2810 
   2811   // Check that the function really is a function.
   2812   __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
   2813   DeoptimizeIf(not_equal, instr->environment());
   2814 
   2815   // Check whether the function has an instance prototype.
   2816   Label non_instance;
   2817   __ testb(FieldOperand(result, Map::kBitFieldOffset),
   2818            Immediate(1 << Map::kHasNonInstancePrototype));
   2819   __ j(not_zero, &non_instance, Label::kNear);
   2820 
   2821   // Get the prototype or initial map from the function.
   2822   __ movq(result,
   2823          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2824 
   2825   // Check that the function has a prototype or an initial map.
   2826   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2827   DeoptimizeIf(equal, instr->environment());
   2828 
   2829   // If the function does not have an initial map, we're done.
   2830   Label done;
   2831   __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
   2832   __ j(not_equal, &done, Label::kNear);
   2833 
   2834   // Get the prototype from the initial map.
   2835   __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
   2836   __ jmp(&done, Label::kNear);
   2837 
   2838   // Non-instance prototype: Fetch prototype from constructor field
   2839   // in the function's map.
   2840   __ bind(&non_instance);
   2841   __ movq(result, FieldOperand(result, Map::kConstructorOffset));
   2842 
   2843   // All done.
   2844   __ bind(&done);
   2845 }
   2846 
   2847 
   2848 void LCodeGen::DoLoadExternalArrayPointer(
   2849     LLoadExternalArrayPointer* instr) {
   2850   Register result = ToRegister(instr->result());
   2851   Register input = ToRegister(instr->object());
   2852   __ movq(result, FieldOperand(input,
   2853                                ExternalPixelArray::kExternalPointerOffset));
   2854 }
   2855 
   2856 
   2857 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2858   Register arguments = ToRegister(instr->arguments());
   2859   Register result = ToRegister(instr->result());
   2860 
   2861   if (instr->length()->IsConstantOperand() &&
   2862       instr->index()->IsConstantOperand()) {
   2863     int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2864     int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   2865     int index = (const_length - const_index) + 1;
   2866     __ movq(result, Operand(arguments, index * kPointerSize));
   2867   } else {
   2868     Register length = ToRegister(instr->length());
   2869     // There are two words between the frame pointer and the last argument.
   2870     // Subtracting from length accounts for one of them add one more.
   2871     if (instr->index()->IsRegister()) {
   2872       __ subl(length, ToRegister(instr->index()));
   2873     } else {
   2874       __ subl(length, ToOperand(instr->index()));
   2875     }
   2876     __ movq(result,
   2877             Operand(arguments, length, times_pointer_size, kPointerSize));
   2878   }
   2879 }
   2880 
   2881 
   2882 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   2883   ElementsKind elements_kind = instr->elements_kind();
   2884   LOperand* key = instr->key();
   2885   if (!key->IsConstantOperand()) {
   2886     Register key_reg = ToRegister(key);
   2887     // Even though the HLoad/StoreKeyed (in this case) instructions force
   2888     // the input representation for the key to be an integer, the input
   2889     // gets replaced during bound check elimination with the index argument
   2890     // to the bounds check, which can be tagged, so that case must be
   2891     // handled here, too.
   2892     if (instr->hydrogen()->IsDehoisted()) {
   2893       // Sign extend key because it could be a 32 bit negative value
   2894       // and the dehoisted address computation happens in 64 bits
   2895       __ movsxlq(key_reg, key_reg);
   2896     }
   2897   }
   2898   Operand operand(BuildFastArrayOperand(
   2899       instr->elements(),
   2900       key,
   2901       elements_kind,
   2902       0,
   2903       instr->additional_index()));
   2904 
   2905   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
   2906     XMMRegister result(ToDoubleRegister(instr->result()));
   2907     __ movss(result, operand);
   2908     __ cvtss2sd(result, result);
   2909   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
   2910     __ movsd(ToDoubleRegister(instr->result()), operand);
   2911   } else {
   2912     Register result(ToRegister(instr->result()));
   2913     switch (elements_kind) {
   2914       case EXTERNAL_BYTE_ELEMENTS:
   2915         __ movsxbq(result, operand);
   2916         break;
   2917       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
   2918       case EXTERNAL_PIXEL_ELEMENTS:
   2919         __ movzxbq(result, operand);
   2920         break;
   2921       case EXTERNAL_SHORT_ELEMENTS:
   2922         __ movsxwq(result, operand);
   2923         break;
   2924       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
   2925         __ movzxwq(result, operand);
   2926         break;
   2927       case EXTERNAL_INT_ELEMENTS:
   2928         __ movsxlq(result, operand);
   2929         break;
   2930       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
   2931         __ movl(result, operand);
   2932         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   2933           __ testl(result, result);
   2934           DeoptimizeIf(negative, instr->environment());
   2935         }
   2936         break;
   2937       case EXTERNAL_FLOAT_ELEMENTS:
   2938       case EXTERNAL_DOUBLE_ELEMENTS:
   2939       case FAST_ELEMENTS:
   2940       case FAST_SMI_ELEMENTS:
   2941       case FAST_DOUBLE_ELEMENTS:
   2942       case FAST_HOLEY_ELEMENTS:
   2943       case FAST_HOLEY_SMI_ELEMENTS:
   2944       case FAST_HOLEY_DOUBLE_ELEMENTS:
   2945       case DICTIONARY_ELEMENTS:
   2946       case NON_STRICT_ARGUMENTS_ELEMENTS:
   2947         UNREACHABLE();
   2948         break;
   2949     }
   2950   }
   2951 }
   2952 
   2953 
   2954 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   2955   XMMRegister result(ToDoubleRegister(instr->result()));
   2956   LOperand* key = instr->key();
   2957   if (!key->IsConstantOperand()) {
   2958     Register key_reg = ToRegister(key);
   2959     // Even though the HLoad/StoreKeyed instructions force the input
   2960     // representation for the key to be an integer, the input gets replaced
   2961     // during bound check elimination with the index argument to the bounds
   2962     // check, which can be tagged, so that case must be handled here, too.
   2963     if (instr->hydrogen()->IsDehoisted()) {
   2964       // Sign extend key because it could be a 32 bit negative value
   2965       // and the dehoisted address computation happens in 64 bits
   2966       __ movsxlq(key_reg, key_reg);
   2967     }
   2968   }
   2969 
   2970   if (instr->hydrogen()->RequiresHoleCheck()) {
   2971     int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
   2972         sizeof(kHoleNanLower32);
   2973     Operand hole_check_operand = BuildFastArrayOperand(
   2974         instr->elements(),
   2975         key,
   2976         FAST_DOUBLE_ELEMENTS,
   2977         offset,
   2978         instr->additional_index());
   2979     __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
   2980     DeoptimizeIf(equal, instr->environment());
   2981   }
   2982 
   2983   Operand double_load_operand = BuildFastArrayOperand(
   2984       instr->elements(),
   2985       key,
   2986       FAST_DOUBLE_ELEMENTS,
   2987       FixedDoubleArray::kHeaderSize - kHeapObjectTag,
   2988       instr->additional_index());
   2989   __ movsd(result, double_load_operand);
   2990 }
   2991 
   2992 
   2993 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   2994   Register result = ToRegister(instr->result());
   2995   LOperand* key = instr->key();
   2996   if (!key->IsConstantOperand()) {
   2997     Register key_reg = ToRegister(key);
   2998     // Even though the HLoad/StoreKeyedFastElement instructions force
   2999     // the input representation for the key to be an integer, the input
   3000     // gets replaced during bound check elimination with the index
   3001     // argument to the bounds check, which can be tagged, so that
   3002     // case must be handled here, too.
   3003     if (instr->hydrogen()->IsDehoisted()) {
   3004       // Sign extend key because it could be a 32 bit negative value
   3005       // and the dehoisted address computation happens in 64 bits
   3006       __ movsxlq(key_reg, key_reg);
   3007     }
   3008   }
   3009 
   3010   // Load the result.
   3011   __ movq(result,
   3012           BuildFastArrayOperand(instr->elements(),
   3013                                 key,
   3014                                 FAST_ELEMENTS,
   3015                                 FixedArray::kHeaderSize - kHeapObjectTag,
   3016                                 instr->additional_index()));
   3017 
   3018   // Check for the hole value.
   3019   if (instr->hydrogen()->RequiresHoleCheck()) {
   3020     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   3021       Condition smi = __ CheckSmi(result);
   3022       DeoptimizeIf(NegateCondition(smi), instr->environment());
   3023     } else {
   3024       __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   3025       DeoptimizeIf(equal, instr->environment());
   3026     }
   3027   }
   3028 }
   3029 
   3030 
   3031 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3032   if (instr->is_external()) {
   3033     DoLoadKeyedExternalArray(instr);
   3034   } else if (instr->hydrogen()->representation().IsDouble()) {
   3035     DoLoadKeyedFixedDoubleArray(instr);
   3036   } else {
   3037     DoLoadKeyedFixedArray(instr);
   3038   }
   3039 }
   3040 
   3041 
   3042 Operand LCodeGen::BuildFastArrayOperand(
   3043     LOperand* elements_pointer,
   3044     LOperand* key,
   3045     ElementsKind elements_kind,
   3046     uint32_t offset,
   3047     uint32_t additional_index) {
   3048   Register elements_pointer_reg = ToRegister(elements_pointer);
   3049   int shift_size = ElementsKindToShiftSize(elements_kind);
   3050   if (key->IsConstantOperand()) {
   3051     int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
   3052     if (constant_value & 0xF0000000) {
   3053       Abort(kArrayIndexConstantValueTooBig);
   3054     }
   3055     return Operand(elements_pointer_reg,
   3056                    ((constant_value + additional_index) << shift_size)
   3057                        + offset);
   3058   } else {
   3059     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
   3060     return Operand(elements_pointer_reg,
   3061                    ToRegister(key),
   3062                    scale_factor,
   3063                    offset + (additional_index << shift_size));
   3064   }
   3065 }
   3066 
   3067 
   3068 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3069   ASSERT(ToRegister(instr->object()).is(rdx));
   3070   ASSERT(ToRegister(instr->key()).is(rax));
   3071 
   3072   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
   3073   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3074 }
   3075 
   3076 
   3077 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3078   Register result = ToRegister(instr->result());
   3079 
   3080   if (instr->hydrogen()->from_inlined()) {
   3081     __ lea(result, Operand(rsp, -2 * kPointerSize));
   3082   } else {
   3083     // Check for arguments adapter frame.
   3084     Label done, adapted;
   3085     __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   3086     __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
   3087            Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   3088     __ j(equal, &adapted, Label::kNear);
   3089 
   3090     // No arguments adaptor frame.
   3091     __ movq(result, rbp);
   3092     __ jmp(&done, Label::kNear);
   3093 
   3094     // Arguments adaptor frame present.
   3095     __ bind(&adapted);
   3096     __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   3097 
   3098     // Result is the frame pointer for the frame if not adapted and for the real
   3099     // frame below the adaptor frame if adapted.
   3100     __ bind(&done);
   3101   }
   3102 }
   3103 
   3104 
   3105 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3106   Register result = ToRegister(instr->result());
   3107 
   3108   Label done;
   3109 
   3110   // If no arguments adaptor frame the number of arguments is fixed.
   3111   if (instr->elements()->IsRegister()) {
   3112     __ cmpq(rbp, ToRegister(instr->elements()));
   3113   } else {
   3114     __ cmpq(rbp, ToOperand(instr->elements()));
   3115   }
   3116   __ movl(result, Immediate(scope()->num_parameters()));
   3117   __ j(equal, &done, Label::kNear);
   3118 
   3119   // Arguments adaptor frame present. Get argument length from there.
   3120   __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   3121   __ SmiToInteger32(result,
   3122                     Operand(result,
   3123                             ArgumentsAdaptorFrameConstants::kLengthOffset));
   3124 
   3125   // Argument length is in result register.
   3126   __ bind(&done);
   3127 }
   3128 
   3129 
   3130 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3131   Register receiver = ToRegister(instr->receiver());
   3132   Register function = ToRegister(instr->function());
   3133 
   3134   // If the receiver is null or undefined, we have to pass the global
   3135   // object as a receiver to normal functions. Values have to be
   3136   // passed unchanged to builtins and strict-mode functions.
   3137   Label global_object, receiver_ok;
   3138 
   3139   // Do not transform the receiver to object for strict mode
   3140   // functions.
   3141   __ movq(kScratchRegister,
   3142           FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3143   __ testb(FieldOperand(kScratchRegister,
   3144                         SharedFunctionInfo::kStrictModeByteOffset),
   3145            Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
   3146   __ j(not_equal, &receiver_ok, Label::kNear);
   3147 
   3148   // Do not transform the receiver to object for builtins.
   3149   __ testb(FieldOperand(kScratchRegister,
   3150                         SharedFunctionInfo::kNativeByteOffset),
   3151            Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
   3152   __ j(not_equal, &receiver_ok, Label::kNear);
   3153 
   3154   // Normal function. Replace undefined or null with global receiver.
   3155   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
   3156   __ j(equal, &global_object, Label::kNear);
   3157   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
   3158   __ j(equal, &global_object, Label::kNear);
   3159 
   3160   // The receiver should be a JS object.
   3161   Condition is_smi = __ CheckSmi(receiver);
   3162   DeoptimizeIf(is_smi, instr->environment());
   3163   __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
   3164   DeoptimizeIf(below, instr->environment());
   3165   __ jmp(&receiver_ok, Label::kNear);
   3166 
   3167   __ bind(&global_object);
   3168   // TODO(kmillikin): We have a hydrogen value for the global object.  See
   3169   // if it's better to use it than to explicitly fetch it from the context
   3170   // here.
   3171   __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
   3172   __ movq(receiver,
   3173           FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   3174   __ bind(&receiver_ok);
   3175 }
   3176 
   3177 
   3178 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3179   Register receiver = ToRegister(instr->receiver());
   3180   Register function = ToRegister(instr->function());
   3181   Register length = ToRegister(instr->length());
   3182   Register elements = ToRegister(instr->elements());
   3183   ASSERT(receiver.is(rax));  // Used for parameter count.
   3184   ASSERT(function.is(rdi));  // Required by InvokeFunction.
   3185   ASSERT(ToRegister(instr->result()).is(rax));
   3186 
   3187   // Copy the arguments to this function possibly from the
   3188   // adaptor frame below it.
   3189   const uint32_t kArgumentsLimit = 1 * KB;
   3190   __ cmpq(length, Immediate(kArgumentsLimit));
   3191   DeoptimizeIf(above, instr->environment());
   3192 
   3193   __ push(receiver);
   3194   __ movq(receiver, length);
   3195 
   3196   // Loop through the arguments pushing them onto the execution
   3197   // stack.
   3198   Label invoke, loop;
   3199   // length is a small non-negative integer, due to the test above.
   3200   __ testl(length, length);
   3201   __ j(zero, &invoke, Label::kNear);
   3202   __ bind(&loop);
   3203   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
   3204   __ decl(length);
   3205   __ j(not_zero, &loop);
   3206 
   3207   // Invoke the function.
   3208   __ bind(&invoke);
   3209   ASSERT(instr->HasPointerMap());
   3210   LPointerMap* pointers = instr->pointer_map();
   3211   RecordPosition(pointers->position());
   3212   SafepointGenerator safepoint_generator(
   3213       this, pointers, Safepoint::kLazyDeopt);
   3214   ParameterCount actual(rax);
   3215   __ InvokeFunction(function, actual, CALL_FUNCTION,
   3216                     safepoint_generator, CALL_AS_METHOD);
   3217   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   3218 }
   3219 
   3220 
   3221 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3222   LOperand* argument = instr->value();
   3223   EmitPushTaggedOperand(argument);
   3224 }
   3225 
   3226 
   3227 void LCodeGen::DoDrop(LDrop* instr) {
   3228   __ Drop(instr->count());
   3229 }
   3230 
   3231 
   3232 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3233   Register result = ToRegister(instr->result());
   3234   __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   3235 }
   3236 
   3237 
   3238 void LCodeGen::DoContext(LContext* instr) {
   3239   Register result = ToRegister(instr->result());
   3240   __ movq(result, rsi);
   3241 }
   3242 
   3243 
   3244 void LCodeGen::DoOuterContext(LOuterContext* instr) {
   3245   Register context = ToRegister(instr->context());
   3246   Register result = ToRegister(instr->result());
   3247   __ movq(result,
   3248           Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   3249 }
   3250 
   3251 
   3252 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3253   __ push(rsi);  // The context is the first argument.
   3254   __ PushHeapObject(instr->hydrogen()->pairs());
   3255   __ Push(Smi::FromInt(instr->hydrogen()->flags()));
   3256   CallRuntime(Runtime::kDeclareGlobals, 3, instr);
   3257 }
   3258 
   3259 
   3260 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   3261   Register result = ToRegister(instr->result());
   3262   __ movq(result, GlobalObjectOperand());
   3263 }
   3264 
   3265 
   3266 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
   3267   Register global = ToRegister(instr->global());
   3268   Register result = ToRegister(instr->result());
   3269   __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
   3270 }
   3271 
   3272 
   3273 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3274                                  int formal_parameter_count,
   3275                                  int arity,
   3276                                  LInstruction* instr,
   3277                                  CallKind call_kind,
   3278                                  RDIState rdi_state) {
   3279   bool dont_adapt_arguments =
   3280       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3281   bool can_invoke_directly =
   3282       dont_adapt_arguments || formal_parameter_count == arity;
   3283 
   3284   LPointerMap* pointers = instr->pointer_map();
   3285   RecordPosition(pointers->position());
   3286 
   3287   if (can_invoke_directly) {
   3288     if (rdi_state == RDI_UNINITIALIZED) {
   3289       __ LoadHeapObject(rdi, function);
   3290     }
   3291 
   3292     // Change context.
   3293     __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
   3294 
   3295     // Set rax to arguments count if adaption is not needed. Assumes that rax
   3296     // is available to write to at this point.
   3297     if (dont_adapt_arguments) {
   3298       __ Set(rax, arity);
   3299     }
   3300 
   3301     // Invoke function.
   3302     __ SetCallKind(rcx, call_kind);
   3303     if (function.is_identical_to(info()->closure())) {
   3304       __ CallSelf();
   3305     } else {
   3306       __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
   3307     }
   3308 
   3309     // Set up deoptimization.
   3310     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
   3311   } else {
   3312     // We need to adapt arguments.
   3313     SafepointGenerator generator(
   3314         this, pointers, Safepoint::kLazyDeopt);
   3315     ParameterCount count(arity);
   3316     ParameterCount expected(formal_parameter_count);
   3317     __ InvokeFunction(
   3318         function, expected, count, CALL_FUNCTION, generator, call_kind);
   3319   }
   3320 
   3321   // Restore context.
   3322   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   3323 }
   3324 
   3325 
   3326 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   3327   ASSERT(ToRegister(instr->result()).is(rax));
   3328   CallKnownFunction(instr->hydrogen()->function(),
   3329                     instr->hydrogen()->formal_parameter_count(),
   3330                     instr->arity(),
   3331                     instr,
   3332                     CALL_AS_METHOD,
   3333                     RDI_UNINITIALIZED);
   3334 }
   3335 
   3336 
   3337 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3338   Register input_reg = ToRegister(instr->value());
   3339   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
   3340                  Heap::kHeapNumberMapRootIndex);
   3341   DeoptimizeIf(not_equal, instr->environment());
   3342 
   3343   Label slow, allocated, done;
   3344   Register tmp = input_reg.is(rax) ? rcx : rax;
   3345   Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
   3346 
   3347   // Preserve the value of all registers.
   3348   PushSafepointRegistersScope scope(this);
   3349 
   3350   __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   3351   // Check the sign of the argument. If the argument is positive, just
   3352   // return it. We do not need to patch the stack since |input| and
   3353   // |result| are the same register and |input| will be restored
   3354   // unchanged by popping safepoint registers.
   3355   __ testl(tmp, Immediate(HeapNumber::kSignMask));
   3356   __ j(zero, &done);
   3357 
   3358   __ AllocateHeapNumber(tmp, tmp2, &slow);
   3359   __ jmp(&allocated, Label::kNear);
   3360 
   3361   // Slow case: Call the runtime system to do the number allocation.
   3362   __ bind(&slow);
   3363   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   3364   // Set the pointer to the new heap number in tmp.
   3365   if (!tmp.is(rax)) __ movq(tmp, rax);
   3366   // Restore input_reg after call to runtime.
   3367   __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
   3368 
   3369   __ bind(&allocated);
   3370   __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
   3371   __ shl(tmp2, Immediate(1));
   3372   __ shr(tmp2, Immediate(1));
   3373   __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
   3374   __ StoreToSafepointRegisterSlot(input_reg, tmp);
   3375 
   3376   __ bind(&done);
   3377 }
   3378 
   3379 
   3380 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3381   Register input_reg = ToRegister(instr->value());
   3382   __ testl(input_reg, input_reg);
   3383   Label is_positive;
   3384   __ j(not_sign, &is_positive, Label::kNear);
   3385   __ negl(input_reg);  // Sets flags.
   3386   DeoptimizeIf(negative, instr->environment());
   3387   __ bind(&is_positive);
   3388 }
   3389 
   3390 
   3391 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
   3392   Register input_reg = ToRegister(instr->value());
   3393   __ testq(input_reg, input_reg);
   3394   Label is_positive;
   3395   __ j(not_sign, &is_positive, Label::kNear);
   3396   __ neg(input_reg);  // Sets flags.
   3397   DeoptimizeIf(negative, instr->environment());
   3398   __ bind(&is_positive);
   3399 }
   3400 
   3401 
   3402 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3403   // Class for deferred case.
   3404   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
   3405    public:
   3406     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3407         : LDeferredCode(codegen), instr_(instr) { }
   3408     virtual void Generate() {
   3409       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3410     }
   3411     virtual LInstruction* instr() { return instr_; }
   3412    private:
   3413     LMathAbs* instr_;
   3414   };
   3415 
   3416   ASSERT(instr->value()->Equals(instr->result()));
   3417   Representation r = instr->hydrogen()->value()->representation();
   3418 
   3419   if (r.IsDouble()) {
   3420     XMMRegister scratch = xmm0;
   3421     XMMRegister input_reg = ToDoubleRegister(instr->value());
   3422     __ xorps(scratch, scratch);
   3423     __ subsd(scratch, input_reg);
   3424     __ andpd(input_reg, scratch);
   3425   } else if (r.IsInteger32()) {
   3426     EmitIntegerMathAbs(instr);
   3427   } else if (r.IsSmi()) {
   3428     EmitSmiMathAbs(instr);
   3429   } else {  // Tagged case.
   3430     DeferredMathAbsTaggedHeapNumber* deferred =
   3431         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3432     Register input_reg = ToRegister(instr->value());
   3433     // Smi check.
   3434     __ JumpIfNotSmi(input_reg, deferred->entry());
   3435     EmitSmiMathAbs(instr);
   3436     __ bind(deferred->exit());
   3437   }
   3438 }
   3439 
   3440 
   3441 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3442   XMMRegister xmm_scratch = xmm0;
   3443   Register output_reg = ToRegister(instr->result());
   3444   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3445 
   3446   if (CpuFeatures::IsSupported(SSE4_1)) {
   3447     CpuFeatureScope scope(masm(), SSE4_1);
   3448     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3449       // Deoptimize if minus zero.
   3450       __ movq(output_reg, input_reg);
   3451       __ subq(output_reg, Immediate(1));
   3452       DeoptimizeIf(overflow, instr->environment());
   3453     }
   3454     __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
   3455     __ cvttsd2si(output_reg, xmm_scratch);
   3456     __ cmpl(output_reg, Immediate(0x80000000));
   3457     DeoptimizeIf(equal, instr->environment());
   3458   } else {
   3459     Label negative_sign, done;
   3460     // Deoptimize on unordered.
   3461     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
   3462     __ ucomisd(input_reg, xmm_scratch);
   3463     DeoptimizeIf(parity_even, instr->environment());
   3464     __ j(below, &negative_sign, Label::kNear);
   3465 
   3466     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3467       // Check for negative zero.
   3468       Label positive_sign;
   3469       __ j(above, &positive_sign, Label::kNear);
   3470       __ movmskpd(output_reg, input_reg);
   3471       __ testq(output_reg, Immediate(1));
   3472       DeoptimizeIf(not_zero, instr->environment());
   3473       __ Set(output_reg, 0);
   3474       __ jmp(&done);
   3475       __ bind(&positive_sign);
   3476     }
   3477 
   3478     // Use truncating instruction (OK because input is positive).
   3479     __ cvttsd2si(output_reg, input_reg);
   3480     // Overflow is signalled with minint.
   3481     __ cmpl(output_reg, Immediate(0x80000000));
   3482     DeoptimizeIf(equal, instr->environment());
   3483     __ jmp(&done, Label::kNear);
   3484 
   3485     // Non-zero negative reaches here.
   3486     __ bind(&negative_sign);
   3487     // Truncate, then compare and compensate.
   3488     __ cvttsd2si(output_reg, input_reg);
   3489     __ cvtlsi2sd(xmm_scratch, output_reg);
   3490     __ ucomisd(input_reg, xmm_scratch);
   3491     __ j(equal, &done, Label::kNear);
   3492     __ subl(output_reg, Immediate(1));
   3493     DeoptimizeIf(overflow, instr->environment());
   3494 
   3495     __ bind(&done);
   3496   }
   3497 }
   3498 
   3499 
   3500 void LCodeGen::DoMathRound(LMathRound* instr) {
   3501   const XMMRegister xmm_scratch = xmm0;
   3502   Register output_reg = ToRegister(instr->result());
   3503   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3504   static int64_t one_half = V8_INT64_C(0x3FE0000000000000);  // 0.5
   3505   static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000);  // -0.5
   3506 
   3507   Label done, round_to_zero, below_one_half, do_not_compensate, restore;
   3508   __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
   3509   __ movq(xmm_scratch, kScratchRegister);
   3510   __ ucomisd(xmm_scratch, input_reg);
   3511   __ j(above, &below_one_half);
   3512 
   3513   // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
   3514   __ addsd(xmm_scratch, input_reg);
   3515   __ cvttsd2si(output_reg, xmm_scratch);
   3516   // Overflow is signalled with minint.
   3517   __ cmpl(output_reg, Immediate(0x80000000));
   3518   __ RecordComment("D2I conversion overflow");
   3519   DeoptimizeIf(equal, instr->environment());
   3520   __ jmp(&done);
   3521 
   3522   __ bind(&below_one_half);
   3523   __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
   3524   __ movq(xmm_scratch, kScratchRegister);
   3525   __ ucomisd(xmm_scratch, input_reg);
   3526   __ j(below_equal, &round_to_zero);
   3527 
   3528   // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
   3529   // compare and compensate.
   3530   __ movq(kScratchRegister, input_reg);  // Back up input_reg.
   3531   __ subsd(input_reg, xmm_scratch);
   3532   __ cvttsd2si(output_reg, input_reg);
   3533   // Catch minint due to overflow, and to prevent overflow when compensating.
   3534   __ cmpl(output_reg, Immediate(0x80000000));
   3535   __ RecordComment("D2I conversion overflow");
   3536   DeoptimizeIf(equal, instr->environment());
   3537 
   3538   __ cvtlsi2sd(xmm_scratch, output_reg);
   3539   __ ucomisd(input_reg, xmm_scratch);
   3540   __ j(equal, &restore, Label::kNear);
   3541   __ subl(output_reg, Immediate(1));
   3542   // No overflow because we already ruled out minint.
   3543   __ bind(&restore);
   3544   __ movq(input_reg, kScratchRegister);  // Restore input_reg.
   3545   __ jmp(&done);
   3546 
   3547   __ bind(&round_to_zero);
   3548   // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
   3549   // we can ignore the difference between a result of -0 and +0.
   3550   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3551     __ movq(output_reg, input_reg);
   3552     __ testq(output_reg, output_reg);
   3553     __ RecordComment("Minus zero");
   3554     DeoptimizeIf(negative, instr->environment());
   3555   }
   3556   __ Set(output_reg, 0);
   3557   __ bind(&done);
   3558 }
   3559 
   3560 
   3561 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3562   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3563   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   3564   __ sqrtsd(input_reg, input_reg);
   3565 }
   3566 
   3567 
   3568 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3569   XMMRegister xmm_scratch = xmm0;
   3570   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3571   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   3572 
   3573   // Note that according to ECMA-262 15.8.2.13:
   3574   // Math.pow(-Infinity, 0.5) == Infinity
   3575   // Math.sqrt(-Infinity) == NaN
   3576   Label done, sqrt;
   3577   // Check base for -Infinity.  According to IEEE-754, double-precision
   3578   // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
   3579   __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
   3580   __ movq(xmm_scratch, kScratchRegister);
   3581   __ ucomisd(xmm_scratch, input_reg);
   3582   // Comparing -Infinity with NaN results in "unordered", which sets the
   3583   // zero flag as if both were equal.  However, it also sets the carry flag.
   3584   __ j(not_equal, &sqrt, Label::kNear);
   3585   __ j(carry, &sqrt, Label::kNear);
   3586   // If input is -Infinity, return Infinity.
   3587   __ xorps(input_reg, input_reg);
   3588   __ subsd(input_reg, xmm_scratch);
   3589   __ jmp(&done, Label::kNear);
   3590 
   3591   // Square root.
   3592   __ bind(&sqrt);
   3593   __ xorps(xmm_scratch, xmm_scratch);
   3594   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   3595   __ sqrtsd(input_reg, input_reg);
   3596   __ bind(&done);
   3597 }
   3598 
   3599 
   3600 void LCodeGen::DoPower(LPower* instr) {
   3601   Representation exponent_type = instr->hydrogen()->right()->representation();
   3602   // Having marked this as a call, we can use any registers.
   3603   // Just make sure that the input/output registers are the expected ones.
   3604 
   3605   Register exponent = rdx;
   3606   ASSERT(!instr->right()->IsRegister() ||
   3607          ToRegister(instr->right()).is(exponent));
   3608   ASSERT(!instr->right()->IsDoubleRegister() ||
   3609          ToDoubleRegister(instr->right()).is(xmm1));
   3610   ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
   3611   ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
   3612 
   3613   if (exponent_type.IsSmi()) {
   3614     MathPowStub stub(MathPowStub::TAGGED);
   3615     __ CallStub(&stub);
   3616   } else if (exponent_type.IsTagged()) {
   3617     Label no_deopt;
   3618     __ JumpIfSmi(exponent, &no_deopt);
   3619     __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
   3620     DeoptimizeIf(not_equal, instr->environment());
   3621     __ bind(&no_deopt);
   3622     MathPowStub stub(MathPowStub::TAGGED);
   3623     __ CallStub(&stub);
   3624   } else if (exponent_type.IsInteger32()) {
   3625     MathPowStub stub(MathPowStub::INTEGER);
   3626     __ CallStub(&stub);
   3627   } else {
   3628     ASSERT(exponent_type.IsDouble());
   3629     MathPowStub stub(MathPowStub::DOUBLE);
   3630     __ CallStub(&stub);
   3631   }
   3632 }
   3633 
   3634 
   3635 void LCodeGen::DoRandom(LRandom* instr) {
   3636   class DeferredDoRandom: public LDeferredCode {
   3637    public:
   3638     DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
   3639         : LDeferredCode(codegen), instr_(instr) { }
   3640     virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
   3641     virtual LInstruction* instr() { return instr_; }
   3642    private:
   3643     LRandom* instr_;
   3644   };
   3645 
   3646   DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
   3647 
   3648   // Having marked this instruction as a call we can use any
   3649   // registers.
   3650   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3651 
   3652   // Choose the right register for the first argument depending on
   3653   // calling convention.
   3654 #ifdef _WIN64
   3655   ASSERT(ToRegister(instr->global_object()).is(rcx));
   3656   Register global_object = rcx;
   3657 #else
   3658   ASSERT(ToRegister(instr->global_object()).is(rdi));
   3659   Register global_object = rdi;
   3660 #endif
   3661 
   3662   static const int kSeedSize = sizeof(uint32_t);
   3663   STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
   3664 
   3665   __ movq(global_object,
   3666           FieldOperand(global_object, GlobalObject::kNativeContextOffset));
   3667   static const int kRandomSeedOffset =
   3668       FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
   3669   __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
   3670   // rbx: FixedArray of the native context's random seeds
   3671 
   3672   // Load state[0].
   3673   __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
   3674   // If state[0] == 0, call runtime to initialize seeds.
   3675   __ testl(rax, rax);
   3676   __ j(zero, deferred->entry());
   3677   // Load state[1].
   3678   __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
   3679 
   3680   // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
   3681   // Only operate on the lower 32 bit of rax.
   3682   __ movzxwl(rdx, rax);
   3683   __ imull(rdx, rdx, Immediate(18273));
   3684   __ shrl(rax, Immediate(16));
   3685   __ addl(rax, rdx);
   3686   // Save state[0].
   3687   __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
   3688 
   3689   // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
   3690   __ movzxwl(rdx, rcx);
   3691   __ imull(rdx, rdx, Immediate(36969));
   3692   __ shrl(rcx, Immediate(16));
   3693   __ addl(rcx, rdx);
   3694   // Save state[1].
   3695   __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
   3696 
   3697   // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
   3698   __ shll(rax, Immediate(14));
   3699   __ andl(rcx, Immediate(0x3FFFF));
   3700   __ addl(rax, rcx);
   3701 
   3702   __ bind(deferred->exit());
   3703   // Convert 32 random bits in rax to 0.(32 random bits) in a double
   3704   // by computing:
   3705   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   3706   __ movq(rcx, V8_INT64_C(0x4130000000000000),
   3707           RelocInfo::NONE64);  // 1.0 x 2^20 as double
   3708   __ movq(xmm2, rcx);
   3709   __ movd(xmm1, rax);
   3710   __ xorps(xmm1, xmm2);
   3711   __ subsd(xmm1, xmm2);
   3712 }
   3713 
   3714 
   3715 void LCodeGen::DoDeferredRandom(LRandom* instr) {
   3716   __ PrepareCallCFunction(1);
   3717   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
   3718   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   3719   // Return value is in rax.
   3720 }
   3721 
   3722 
   3723 void LCodeGen::DoMathExp(LMathExp* instr) {
   3724   XMMRegister input = ToDoubleRegister(instr->value());
   3725   XMMRegister result = ToDoubleRegister(instr->result());
   3726   Register temp1 = ToRegister(instr->temp1());
   3727   Register temp2 = ToRegister(instr->temp2());
   3728 
   3729   MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
   3730 }
   3731 
   3732 
   3733 void LCodeGen::DoMathLog(LMathLog* instr) {
   3734   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3735   TranscendentalCacheStub stub(TranscendentalCache::LOG,
   3736                                TranscendentalCacheStub::UNTAGGED);
   3737   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3738 }
   3739 
   3740 
   3741 void LCodeGen::DoMathTan(LMathTan* instr) {
   3742   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3743   TranscendentalCacheStub stub(TranscendentalCache::TAN,
   3744                                TranscendentalCacheStub::UNTAGGED);
   3745   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3746 }
   3747 
   3748 
   3749 void LCodeGen::DoMathCos(LMathCos* instr) {
   3750   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3751   TranscendentalCacheStub stub(TranscendentalCache::COS,
   3752                                TranscendentalCacheStub::UNTAGGED);
   3753   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3754 }
   3755 
   3756 
   3757 void LCodeGen::DoMathSin(LMathSin* instr) {
   3758   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   3759   TranscendentalCacheStub stub(TranscendentalCache::SIN,
   3760                                TranscendentalCacheStub::UNTAGGED);
   3761   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3762 }
   3763 
   3764 
   3765 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3766   ASSERT(ToRegister(instr->function()).is(rdi));
   3767   ASSERT(instr->HasPointerMap());
   3768 
   3769   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   3770   if (known_function.is_null()) {
   3771     LPointerMap* pointers = instr->pointer_map();
   3772     RecordPosition(pointers->position());
   3773     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3774     ParameterCount count(instr->arity());
   3775     __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
   3776     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   3777   } else {
   3778     CallKnownFunction(known_function,
   3779                       instr->hydrogen()->formal_parameter_count(),
   3780                       instr->arity(),
   3781                       instr,
   3782                       CALL_AS_METHOD,
   3783                       RDI_CONTAINS_TARGET);
   3784   }
   3785 }
   3786 
   3787 
   3788 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   3789   ASSERT(ToRegister(instr->key()).is(rcx));
   3790   ASSERT(ToRegister(instr->result()).is(rax));
   3791 
   3792   int arity = instr->arity();
   3793   Handle<Code> ic =
   3794       isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
   3795   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3796   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   3797 }
   3798 
   3799 
   3800 void LCodeGen::DoCallNamed(LCallNamed* instr) {
   3801   ASSERT(ToRegister(instr->result()).is(rax));
   3802 
   3803   int arity = instr->arity();
   3804   RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
   3805   Handle<Code> ic =
   3806       isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   3807   __ Move(rcx, instr->name());
   3808   CallCode(ic, mode, instr);
   3809   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   3810 }
   3811 
   3812 
   3813 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   3814   ASSERT(ToRegister(instr->function()).is(rdi));
   3815   ASSERT(ToRegister(instr->result()).is(rax));
   3816 
   3817   int arity = instr->arity();
   3818   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   3819   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   3820   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   3821 }
   3822 
   3823 
   3824 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
   3825   ASSERT(ToRegister(instr->result()).is(rax));
   3826   int arity = instr->arity();
   3827   RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
   3828   Handle<Code> ic =
   3829       isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   3830   __ Move(rcx, instr->name());
   3831   CallCode(ic, mode, instr);
   3832   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   3833 }
   3834 
   3835 
   3836 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   3837   ASSERT(ToRegister(instr->result()).is(rax));
   3838   CallKnownFunction(instr->hydrogen()->target(),
   3839                     instr->hydrogen()->formal_parameter_count(),
   3840                     instr->arity(),
   3841                     instr,
   3842                     CALL_AS_FUNCTION,
   3843                     RDI_UNINITIALIZED);
   3844 }
   3845 
   3846 
   3847 void LCodeGen::DoCallNew(LCallNew* instr) {
   3848   ASSERT(ToRegister(instr->constructor()).is(rdi));
   3849   ASSERT(ToRegister(instr->result()).is(rax));
   3850 
   3851   __ Set(rax, instr->arity());
   3852   // No cell in ebx for construct type feedback in optimized code
   3853   Handle<Object> undefined_value(isolate()->factory()->undefined_value());
   3854   __ Move(rbx, undefined_value);
   3855   CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   3856   CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   3857 }
   3858 
   3859 
   3860 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3861   ASSERT(ToRegister(instr->constructor()).is(rdi));
   3862   ASSERT(ToRegister(instr->result()).is(rax));
   3863 
   3864   __ Set(rax, instr->arity());
   3865   __ Move(rbx, instr->hydrogen()->property_cell());
   3866   ElementsKind kind = instr->hydrogen()->elements_kind();
   3867   AllocationSiteOverrideMode override_mode =
   3868       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3869           ? DISABLE_ALLOCATION_SITES
   3870           : DONT_OVERRIDE;
   3871   ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
   3872 
   3873   if (instr->arity() == 0) {
   3874     ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
   3875     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   3876   } else if (instr->arity() == 1) {
   3877     Label done;
   3878     if (IsFastPackedElementsKind(kind)) {
   3879       Label packed_case;
   3880       // We might need a change here
   3881       // look at the first argument
   3882       __ movq(rcx, Operand(rsp, 0));
   3883       __ testq(rcx, rcx);
   3884       __ j(zero, &packed_case);
   3885 
   3886       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   3887       ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
   3888                                               override_mode);
   3889       CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   3890       __ jmp(&done);
   3891       __ bind(&packed_case);
   3892     }
   3893 
   3894     ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
   3895     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   3896     __ bind(&done);
   3897   } else {
   3898     ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
   3899     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   3900   }
   3901 }
   3902 
   3903 
   3904 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   3905   CallRuntime(instr->function(), instr->arity(), instr);
   3906 }
   3907 
   3908 
   3909 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   3910   Register result = ToRegister(instr->result());
   3911   Register base = ToRegister(instr->base_object());
   3912   __ lea(result, Operand(base, instr->offset()));
   3913 }
   3914 
   3915 
   3916 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   3917   Representation representation = instr->representation();
   3918 
   3919   HObjectAccess access = instr->hydrogen()->access();
   3920   int offset = access.offset();
   3921 
   3922   if (access.IsExternalMemory()) {
   3923     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   3924     Register value = ToRegister(instr->value());
   3925     if (instr->object()->IsConstantOperand()) {
   3926       ASSERT(value.is(rax));
   3927       LConstantOperand* object = LConstantOperand::cast(instr->object());
   3928       __ store_rax(ToExternalReference(object));
   3929     } else {
   3930       Register object = ToRegister(instr->object());
   3931       __ movq(MemOperand(object, offset), value);
   3932     }
   3933     return;
   3934   }
   3935 
   3936   Register object = ToRegister(instr->object());
   3937   Handle<Map> transition = instr->transition();
   3938 
   3939   if (FLAG_track_fields && representation.IsSmi()) {
   3940     if (instr->value()->IsConstantOperand()) {
   3941       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   3942       if (!IsSmiConstant(operand_value)) {
   3943         DeoptimizeIf(no_condition, instr->environment());
   3944       }
   3945     }
   3946   } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
   3947     if (instr->value()->IsConstantOperand()) {
   3948       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   3949       if (IsInteger32Constant(operand_value)) {
   3950         DeoptimizeIf(no_condition, instr->environment());
   3951       }
   3952     } else {
   3953       if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   3954         Register value = ToRegister(instr->value());
   3955         Condition cc = masm()->CheckSmi(value);
   3956         DeoptimizeIf(cc, instr->environment());
   3957       }
   3958     }
   3959   } else if (FLAG_track_double_fields && representation.IsDouble()) {
   3960     ASSERT(transition.is_null());
   3961     ASSERT(access.IsInobject());
   3962     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   3963     XMMRegister value = ToDoubleRegister(instr->value());
   3964     __ movsd(FieldOperand(object, offset), value);
   3965     return;
   3966   }
   3967 
   3968   if (!transition.is_null()) {
   3969     if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
   3970       __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
   3971     } else {
   3972       Register temp = ToRegister(instr->temp());
   3973       __ Move(kScratchRegister, transition);
   3974       __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
   3975       // Update the write barrier for the map field.
   3976       __ RecordWriteField(object,
   3977                           HeapObject::kMapOffset,
   3978                           kScratchRegister,
   3979                           temp,
   3980                           kSaveFPRegs,
   3981                           OMIT_REMEMBERED_SET,
   3982                           OMIT_SMI_CHECK);
   3983     }
   3984   }
   3985 
   3986   // Do the store.
   3987   SmiCheck check_needed =
   3988       instr->hydrogen()->value()->IsHeapObject()
   3989           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   3990 
   3991   Register write_register = object;
   3992   if (!access.IsInobject()) {
   3993     write_register = ToRegister(instr->temp());
   3994     __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
   3995   }
   3996 
   3997   if (instr->value()->IsConstantOperand()) {
   3998     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   3999     if (operand_value->IsRegister()) {
   4000       __ movq(FieldOperand(write_register, offset),
   4001               ToRegister(operand_value));
   4002     } else {
   4003       Handle<Object> handle_value = ToHandle(operand_value);
   4004       ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4005       __ Move(FieldOperand(write_register, offset), handle_value);
   4006     }
   4007   } else {
   4008     __ movq(FieldOperand(write_register, offset), ToRegister(instr->value()));
   4009   }
   4010 
   4011   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4012     Register value = ToRegister(instr->value());
   4013     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
   4014     // Update the write barrier for the object for in-object properties.
   4015     __ RecordWriteField(write_register,
   4016                         offset,
   4017                         value,
   4018                         temp,
   4019                         kSaveFPRegs,
   4020                         EMIT_REMEMBERED_SET,
   4021                         check_needed);
   4022   }
   4023 }
   4024 
   4025 
   4026 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   4027   ASSERT(ToRegister(instr->object()).is(rdx));
   4028   ASSERT(ToRegister(instr->value()).is(rax));
   4029 
   4030   __ Move(rcx, instr->hydrogen()->name());
   4031   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   4032       ? isolate()->builtins()->StoreIC_Initialize_Strict()
   4033       : isolate()->builtins()->StoreIC_Initialize();
   4034   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4035 }
   4036 
   4037 
   4038 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
   4039   if (FLAG_debug_code && check->hydrogen()->skip_check()) {
   4040     Label done;
   4041     __ j(NegateCondition(cc), &done, Label::kNear);
   4042     __ int3();
   4043     __ bind(&done);
   4044   } else {
   4045     DeoptimizeIf(cc, check->environment());
   4046   }
   4047 }
   4048 
   4049 
   4050 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   4051   if (instr->hydrogen()->skip_check()) return;
   4052 
   4053   if (instr->length()->IsRegister()) {
   4054     Register reg = ToRegister(instr->length());
   4055     if (!instr->hydrogen()->length()->representation().IsSmi()) {
   4056       __ AssertZeroExtended(reg);
   4057     }
   4058     if (instr->index()->IsConstantOperand()) {
   4059       int32_t constant_index =
   4060           ToInteger32(LConstantOperand::cast(instr->index()));
   4061       if (instr->hydrogen()->length()->representation().IsSmi()) {
   4062         __ Cmp(reg, Smi::FromInt(constant_index));
   4063       } else {
   4064         __ cmpq(reg, Immediate(constant_index));
   4065       }
   4066     } else {
   4067       Register reg2 = ToRegister(instr->index());
   4068       if (!instr->hydrogen()->index()->representation().IsSmi()) {
   4069         __ AssertZeroExtended(reg2);
   4070       }
   4071       __ cmpq(reg, reg2);
   4072     }
   4073   } else {
   4074     Operand length = ToOperand(instr->length());
   4075     if (instr->index()->IsConstantOperand()) {
   4076       int32_t constant_index =
   4077           ToInteger32(LConstantOperand::cast(instr->index()));
   4078       if (instr->hydrogen()->length()->representation().IsSmi()) {
   4079         __ Cmp(length, Smi::FromInt(constant_index));
   4080       } else {
   4081         __ cmpq(length, Immediate(constant_index));
   4082       }
   4083     } else {
   4084       __ cmpq(length, ToRegister(instr->index()));
   4085     }
   4086   }
   4087   Condition condition =
   4088       instr->hydrogen()->allow_equality() ? below : below_equal;
   4089   ApplyCheckIf(condition, instr);
   4090 }
   4091 
   4092 
   4093 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   4094   ElementsKind elements_kind = instr->elements_kind();
   4095   LOperand* key = instr->key();
   4096   if (!key->IsConstantOperand()) {
   4097     Register key_reg = ToRegister(key);
   4098     // Even though the HLoad/StoreKeyedFastElement instructions force
   4099     // the input representation for the key to be an integer, the input
   4100     // gets replaced during bound check elimination with the index
   4101     // argument to the bounds check, which can be tagged, so that case
   4102     // must be handled here, too.
   4103     if (instr->hydrogen()->IsDehoisted()) {
   4104       // Sign extend key because it could be a 32 bit negative value
   4105       // and the dehoisted address computation happens in 64 bits
   4106       __ movsxlq(key_reg, key_reg);
   4107     }
   4108   }
   4109   Operand operand(BuildFastArrayOperand(
   4110       instr->elements(),
   4111       key,
   4112       elements_kind,
   4113       0,
   4114       instr->additional_index()));
   4115 
   4116   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
   4117     XMMRegister value(ToDoubleRegister(instr->value()));
   4118     __ cvtsd2ss(value, value);
   4119     __ movss(operand, value);
   4120   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
   4121     __ movsd(operand, ToDoubleRegister(instr->value()));
   4122   } else {
   4123     Register value(ToRegister(instr->value()));
   4124     switch (elements_kind) {
   4125       case EXTERNAL_PIXEL_ELEMENTS:
   4126       case EXTERNAL_BYTE_ELEMENTS:
   4127       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
   4128         __ movb(operand, value);
   4129         break;
   4130       case EXTERNAL_SHORT_ELEMENTS:
   4131       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
   4132         __ movw(operand, value);
   4133         break;
   4134       case EXTERNAL_INT_ELEMENTS:
   4135       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
   4136         __ movl(operand, value);
   4137         break;
   4138       case EXTERNAL_FLOAT_ELEMENTS:
   4139       case EXTERNAL_DOUBLE_ELEMENTS:
   4140       case FAST_ELEMENTS:
   4141       case FAST_SMI_ELEMENTS:
   4142       case FAST_DOUBLE_ELEMENTS:
   4143       case FAST_HOLEY_ELEMENTS:
   4144       case FAST_HOLEY_SMI_ELEMENTS:
   4145       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4146       case DICTIONARY_ELEMENTS:
   4147       case NON_STRICT_ARGUMENTS_ELEMENTS:
   4148         UNREACHABLE();
   4149         break;
   4150     }
   4151   }
   4152 }
   4153 
   4154 
   4155 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4156   XMMRegister value = ToDoubleRegister(instr->value());
   4157   LOperand* key = instr->key();
   4158   if (!key->IsConstantOperand()) {
   4159     Register key_reg = ToRegister(key);
   4160     // Even though the HLoad/StoreKeyedFastElement instructions force
   4161     // the input representation for the key to be an integer, the
   4162     // input gets replaced during bound check elimination with the index
   4163     // argument to the bounds check, which can be tagged, so that case
   4164     // must be handled here, too.
   4165     if (instr->hydrogen()->IsDehoisted()) {
   4166       // Sign extend key because it could be a 32 bit negative value
   4167       // and the dehoisted address computation happens in 64 bits
   4168       __ movsxlq(key_reg, key_reg);
   4169     }
   4170   }
   4171 
   4172   if (instr->NeedsCanonicalization()) {
   4173     Label have_value;
   4174 
   4175     __ ucomisd(value, value);
   4176     __ j(parity_odd, &have_value);  // NaN.
   4177 
   4178     __ Set(kScratchRegister, BitCast<uint64_t>(
   4179         FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
   4180     __ movq(value, kScratchRegister);
   4181 
   4182     __ bind(&have_value);
   4183   }
   4184 
   4185   Operand double_store_operand = BuildFastArrayOperand(
   4186       instr->elements(),
   4187       key,
   4188       FAST_DOUBLE_ELEMENTS,
   4189       FixedDoubleArray::kHeaderSize - kHeapObjectTag,
   4190       instr->additional_index());
   4191 
   4192   __ movsd(double_store_operand, value);
   4193 }
   4194 
   4195 
   4196 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4197   Register elements = ToRegister(instr->elements());
   4198   LOperand* key = instr->key();
   4199   if (!key->IsConstantOperand()) {
   4200     Register key_reg = ToRegister(key);
   4201     // Even though the HLoad/StoreKeyedFastElement instructions force
   4202     // the input representation for the key to be an integer, the
   4203     // input gets replaced during bound check elimination with the index
   4204     // argument to the bounds check, which can be tagged, so that case
   4205     // must be handled here, too.
   4206     if (instr->hydrogen()->IsDehoisted()) {
   4207       // Sign extend key because it could be a 32 bit negative value
   4208       // and the dehoisted address computation happens in 64 bits
   4209       __ movsxlq(key_reg, key_reg);
   4210     }
   4211   }
   4212 
   4213   Operand operand =
   4214       BuildFastArrayOperand(instr->elements(),
   4215                             key,
   4216                             FAST_ELEMENTS,
   4217                             FixedArray::kHeaderSize - kHeapObjectTag,
   4218                             instr->additional_index());
   4219   if (instr->value()->IsRegister()) {
   4220     __ movq(operand, ToRegister(instr->value()));
   4221   } else {
   4222     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   4223     if (IsInteger32Constant(operand_value)) {
   4224       Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
   4225       __ Move(operand, smi_value);
   4226     } else {
   4227       Handle<Object> handle_value = ToHandle(operand_value);
   4228       __ Move(operand, handle_value);
   4229     }
   4230   }
   4231 
   4232   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4233     ASSERT(instr->value()->IsRegister());
   4234     Register value = ToRegister(instr->value());
   4235     ASSERT(!instr->key()->IsConstantOperand());
   4236     SmiCheck check_needed =
   4237         instr->hydrogen()->value()->IsHeapObject()
   4238             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4239     // Compute address of modified element and store it into key register.
   4240     Register key_reg(ToRegister(key));
   4241     __ lea(key_reg, operand);
   4242     __ RecordWrite(elements,
   4243                    key_reg,
   4244                    value,
   4245                    kSaveFPRegs,
   4246                    EMIT_REMEMBERED_SET,
   4247                    check_needed);
   4248   }
   4249 }
   4250 
   4251 
   4252 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4253   if (instr->is_external()) {
   4254     DoStoreKeyedExternalArray(instr);
   4255   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4256     DoStoreKeyedFixedDoubleArray(instr);
   4257   } else {
   4258     DoStoreKeyedFixedArray(instr);
   4259   }
   4260 }
   4261 
   4262 
   4263 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4264   ASSERT(ToRegister(instr->object()).is(rdx));
   4265   ASSERT(ToRegister(instr->key()).is(rcx));
   4266   ASSERT(ToRegister(instr->value()).is(rax));
   4267 
   4268   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   4269       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
   4270       : isolate()->builtins()->KeyedStoreIC_Initialize();
   4271   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4272 }
   4273 
   4274 
   4275 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4276   Register object_reg = ToRegister(instr->object());
   4277 
   4278   Handle<Map> from_map = instr->original_map();
   4279   Handle<Map> to_map = instr->transitioned_map();
   4280   ElementsKind from_kind = instr->from_kind();
   4281   ElementsKind to_kind = instr->to_kind();
   4282 
   4283   Label not_applicable;
   4284   __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
   4285   __ j(not_equal, &not_applicable);
   4286   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4287     Register new_map_reg = ToRegister(instr->new_map_temp());
   4288     __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
   4289     __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
   4290     // Write barrier.
   4291     ASSERT_NE(instr->temp(), NULL);
   4292     __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
   4293                         ToRegister(instr->temp()), kDontSaveFPRegs);
   4294   } else {
   4295     PushSafepointRegistersScope scope(this);
   4296     if (!object_reg.is(rax)) {
   4297       __ movq(rax, object_reg);
   4298     }
   4299     __ Move(rbx, to_map);
   4300     TransitionElementsKindStub stub(from_kind, to_kind);
   4301     __ CallStub(&stub);
   4302     RecordSafepointWithRegisters(
   4303         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4304   }
   4305   __ bind(&not_applicable);
   4306 }
   4307 
   4308 
   4309 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4310   Register object = ToRegister(instr->object());
   4311   Register temp = ToRegister(instr->temp());
   4312   __ TestJSArrayForAllocationMemento(object, temp);
   4313   DeoptimizeIf(equal, instr->environment());
   4314 }
   4315 
   4316 
   4317 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4318   EmitPushTaggedOperand(instr->left());
   4319   EmitPushTaggedOperand(instr->right());
   4320   StringAddStub stub(instr->hydrogen()->flags());
   4321   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   4322 }
   4323 
   4324 
   4325 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4326   class DeferredStringCharCodeAt: public LDeferredCode {
   4327    public:
   4328     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4329         : LDeferredCode(codegen), instr_(instr) { }
   4330     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4331     virtual LInstruction* instr() { return instr_; }
   4332    private:
   4333     LStringCharCodeAt* instr_;
   4334   };
   4335 
   4336   DeferredStringCharCodeAt* deferred =
   4337       new(zone()) DeferredStringCharCodeAt(this, instr);
   4338 
   4339   StringCharLoadGenerator::Generate(masm(),
   4340                                     ToRegister(instr->string()),
   4341                                     ToRegister(instr->index()),
   4342                                     ToRegister(instr->result()),
   4343                                     deferred->entry());
   4344   __ bind(deferred->exit());
   4345 }
   4346 
   4347 
   4348 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4349   Register string = ToRegister(instr->string());
   4350   Register result = ToRegister(instr->result());
   4351 
   4352   // TODO(3095996): Get rid of this. For now, we need to make the
   4353   // result register contain a valid pointer because it is already
   4354   // contained in the register pointer map.
   4355   __ Set(result, 0);
   4356 
   4357   PushSafepointRegistersScope scope(this);
   4358   __ push(string);
   4359   // Push the index as a smi. This is safe because of the checks in
   4360   // DoStringCharCodeAt above.
   4361   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
   4362   if (instr->index()->IsConstantOperand()) {
   4363     int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4364     __ Push(Smi::FromInt(const_index));
   4365   } else {
   4366     Register index = ToRegister(instr->index());
   4367     __ Integer32ToSmi(index, index);
   4368     __ push(index);
   4369   }
   4370   CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
   4371   __ AssertSmi(rax);
   4372   __ SmiToInteger32(rax, rax);
   4373   __ StoreToSafepointRegisterSlot(result, rax);
   4374 }
   4375 
   4376 
   4377 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4378   class DeferredStringCharFromCode: public LDeferredCode {
   4379    public:
   4380     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4381         : LDeferredCode(codegen), instr_(instr) { }
   4382     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
   4383     virtual LInstruction* instr() { return instr_; }
   4384    private:
   4385     LStringCharFromCode* instr_;
   4386   };
   4387 
   4388   DeferredStringCharFromCode* deferred =
   4389       new(zone()) DeferredStringCharFromCode(this, instr);
   4390 
   4391   ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
   4392   Register char_code = ToRegister(instr->char_code());
   4393   Register result = ToRegister(instr->result());
   4394   ASSERT(!char_code.is(result));
   4395 
   4396   __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
   4397   __ j(above, deferred->entry());
   4398   __ movsxlq(char_code, char_code);
   4399   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4400   __ movq(result, FieldOperand(result,
   4401                                char_code, times_pointer_size,
   4402                                FixedArray::kHeaderSize));
   4403   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
   4404   __ j(equal, deferred->entry());
   4405   __ bind(deferred->exit());
   4406 }
   4407 
   4408 
   4409 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4410   Register char_code = ToRegister(instr->char_code());
   4411   Register result = ToRegister(instr->result());
   4412 
   4413   // TODO(3095996): Get rid of this. For now, we need to make the
   4414   // result register contain a valid pointer because it is already
   4415   // contained in the register pointer map.
   4416   __ Set(result, 0);
   4417 
   4418   PushSafepointRegistersScope scope(this);
   4419   __ Integer32ToSmi(char_code, char_code);
   4420   __ push(char_code);
   4421   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
   4422   __ StoreToSafepointRegisterSlot(result, rax);
   4423 }
   4424 
   4425 
   4426 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4427   LOperand* input = instr->value();
   4428   ASSERT(input->IsRegister() || input->IsStackSlot());
   4429   LOperand* output = instr->result();
   4430   ASSERT(output->IsDoubleRegister());
   4431   if (input->IsRegister()) {
   4432     __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
   4433   } else {
   4434     __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
   4435   }
   4436 }
   4437 
   4438 
   4439 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
   4440   LOperand* input = instr->value();
   4441   ASSERT(input->IsRegister());
   4442   LOperand* output = instr->result();
   4443   __ Integer32ToSmi(ToRegister(output), ToRegister(input));
   4444   if (!instr->hydrogen()->value()->HasRange() ||
   4445       !instr->hydrogen()->value()->range()->IsInSmiRange()) {
   4446     DeoptimizeIf(overflow, instr->environment());
   4447   }
   4448 }
   4449 
   4450 
   4451 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4452   LOperand* input = instr->value();
   4453   LOperand* output = instr->result();
   4454   LOperand* temp = instr->temp();
   4455 
   4456   __ LoadUint32(ToDoubleRegister(output),
   4457                 ToRegister(input),
   4458                 ToDoubleRegister(temp));
   4459 }
   4460 
   4461 
   4462 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4463   LOperand* input = instr->value();
   4464   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   4465   Register reg = ToRegister(input);
   4466 
   4467   __ Integer32ToSmi(reg, reg);
   4468 }
   4469 
   4470 
   4471 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4472   class DeferredNumberTagU: public LDeferredCode {
   4473    public:
   4474     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4475         : LDeferredCode(codegen), instr_(instr) { }
   4476     virtual void Generate() {
   4477       codegen()->DoDeferredNumberTagU(instr_);
   4478     }
   4479     virtual LInstruction* instr() { return instr_; }
   4480    private:
   4481     LNumberTagU* instr_;
   4482   };
   4483 
   4484   LOperand* input = instr->value();
   4485   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   4486   Register reg = ToRegister(input);
   4487 
   4488   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4489   __ cmpl(reg, Immediate(Smi::kMaxValue));
   4490   __ j(above, deferred->entry());
   4491   __ Integer32ToSmi(reg, reg);
   4492   __ bind(deferred->exit());
   4493 }
   4494 
   4495 
   4496 void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
   4497   Label slow;
   4498   Register reg = ToRegister(instr->value());
   4499   Register tmp = reg.is(rax) ? rcx : rax;
   4500 
   4501   // Preserve the value of all registers.
   4502   PushSafepointRegistersScope scope(this);
   4503 
   4504   Label done;
   4505   // Load value into xmm1 which will be preserved across potential call to
   4506   // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
   4507   // XMM registers on x64).
   4508   __ LoadUint32(xmm1, reg, xmm0);
   4509 
   4510   if (FLAG_inline_new) {
   4511     __ AllocateHeapNumber(reg, tmp, &slow);
   4512     __ jmp(&done, Label::kNear);
   4513   }
   4514 
   4515   // Slow case: Call the runtime system to do the number allocation.
   4516   __ bind(&slow);
   4517 
   4518   // Put a valid pointer value in the stack slot where the result
   4519   // register is stored, as this register is in the pointer map, but contains an
   4520   // integer value.
   4521   __ StoreToSafepointRegisterSlot(reg, Immediate(0));
   4522 
   4523   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   4524   if (!reg.is(rax)) __ movq(reg, rax);
   4525 
   4526   // Done. Put the value in xmm1 into the value of the allocated heap
   4527   // number.
   4528   __ bind(&done);
   4529   __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
   4530   __ StoreToSafepointRegisterSlot(reg, reg);
   4531 }
   4532 
   4533 
   4534 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4535   class DeferredNumberTagD: public LDeferredCode {
   4536    public:
   4537     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4538         : LDeferredCode(codegen), instr_(instr) { }
   4539     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
   4540     virtual LInstruction* instr() { return instr_; }
   4541    private:
   4542     LNumberTagD* instr_;
   4543   };
   4544 
   4545   XMMRegister input_reg = ToDoubleRegister(instr->value());
   4546   Register reg = ToRegister(instr->result());
   4547   Register tmp = ToRegister(instr->temp());
   4548 
   4549   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4550   if (FLAG_inline_new) {
   4551     __ AllocateHeapNumber(reg, tmp, deferred->entry());
   4552   } else {
   4553     __ jmp(deferred->entry());
   4554   }
   4555   __ bind(deferred->exit());
   4556   __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
   4557 }
   4558 
   4559 
   4560 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4561   // TODO(3095996): Get rid of this. For now, we need to make the
   4562   // result register contain a valid pointer because it is already
   4563   // contained in the register pointer map.
   4564   Register reg = ToRegister(instr->result());
   4565   __ Move(reg, Smi::FromInt(0));
   4566 
   4567   {
   4568     PushSafepointRegistersScope scope(this);
   4569     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   4570     // Ensure that value in rax survives popping registers.
   4571     __ movq(kScratchRegister, rax);
   4572   }
   4573   __ movq(reg, kScratchRegister);
   4574 }
   4575 
   4576 
   4577 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4578   ASSERT(instr->value()->Equals(instr->result()));
   4579   Register input = ToRegister(instr->value());
   4580   ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
   4581   __ Integer32ToSmi(input, input);
   4582 }
   4583 
   4584 
   4585 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4586   ASSERT(instr->value()->Equals(instr->result()));
   4587   Register input = ToRegister(instr->value());
   4588   if (instr->needs_check()) {
   4589     Condition is_smi = __ CheckSmi(input);
   4590     DeoptimizeIf(NegateCondition(is_smi), instr->environment());
   4591   } else {
   4592     __ AssertSmi(input);
   4593   }
   4594   __ SmiToInteger32(input, input);
   4595 }
   4596 
   4597 
   4598 void LCodeGen::EmitNumberUntagD(Register input_reg,
   4599                                 XMMRegister result_reg,
   4600                                 bool can_convert_undefined_to_nan,
   4601                                 bool deoptimize_on_minus_zero,
   4602                                 LEnvironment* env,
   4603                                 NumberUntagDMode mode) {
   4604   Label load_smi, done;
   4605 
   4606   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4607     // Smi check.
   4608     __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
   4609 
   4610     // Heap number map check.
   4611     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
   4612                    Heap::kHeapNumberMapRootIndex);
   4613     if (!can_convert_undefined_to_nan) {
   4614       DeoptimizeIf(not_equal, env);
   4615     } else {
   4616       Label heap_number, convert;
   4617       __ j(equal, &heap_number, Label::kNear);
   4618 
   4619       // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
   4620       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
   4621       DeoptimizeIf(not_equal, env);
   4622 
   4623       __ bind(&convert);
   4624       __ xorps(result_reg, result_reg);
   4625       __ divsd(result_reg, result_reg);
   4626       __ jmp(&done, Label::kNear);
   4627 
   4628       __ bind(&heap_number);
   4629     }
   4630     // Heap number to XMM conversion.
   4631     __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4632     if (deoptimize_on_minus_zero) {
   4633       XMMRegister xmm_scratch = xmm0;
   4634       __ xorps(xmm_scratch, xmm_scratch);
   4635       __ ucomisd(xmm_scratch, result_reg);
   4636       __ j(not_equal, &done, Label::kNear);
   4637       __ movmskpd(kScratchRegister, result_reg);
   4638       __ testq(kScratchRegister, Immediate(1));
   4639       DeoptimizeIf(not_zero, env);
   4640     }
   4641     __ jmp(&done, Label::kNear);
   4642   } else {
   4643     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   4644   }
   4645 
   4646   // Smi to XMM conversion
   4647   __ bind(&load_smi);
   4648   __ SmiToInteger32(kScratchRegister, input_reg);
   4649   __ cvtlsi2sd(result_reg, kScratchRegister);
   4650   __ bind(&done);
   4651 }
   4652 
   4653 
   4654 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4655   Label done, heap_number;
   4656   Register input_reg = ToRegister(instr->value());
   4657 
   4658   // Heap number map check.
   4659   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
   4660                  Heap::kHeapNumberMapRootIndex);
   4661 
   4662   if (instr->truncating()) {
   4663     __ j(equal, &heap_number, Label::kNear);
   4664     // Check for undefined. Undefined is converted to zero for truncating
   4665     // conversions.
   4666     __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
   4667     DeoptimizeIf(not_equal, instr->environment());
   4668     __ Set(input_reg, 0);
   4669     __ jmp(&done, Label::kNear);
   4670 
   4671     __ bind(&heap_number);
   4672 
   4673     __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4674     __ cvttsd2siq(input_reg, xmm0);
   4675     __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
   4676     __ cmpq(input_reg, kScratchRegister);
   4677     DeoptimizeIf(equal, instr->environment());
   4678   } else {
   4679     // Deoptimize if we don't have a heap number.
   4680     DeoptimizeIf(not_equal, instr->environment());
   4681 
   4682     XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
   4683     __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4684     __ cvttsd2si(input_reg, xmm0);
   4685     __ cvtlsi2sd(xmm_temp, input_reg);
   4686     __ ucomisd(xmm0, xmm_temp);
   4687     DeoptimizeIf(not_equal, instr->environment());
   4688     DeoptimizeIf(parity_even, instr->environment());  // NaN.
   4689     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4690       __ testl(input_reg, input_reg);
   4691       __ j(not_zero, &done);
   4692       __ movmskpd(input_reg, xmm0);
   4693       __ andl(input_reg, Immediate(1));
   4694       DeoptimizeIf(not_zero, instr->environment());
   4695     }
   4696   }
   4697   __ bind(&done);
   4698 }
   4699 
   4700 
   4701 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4702   class DeferredTaggedToI: public LDeferredCode {
   4703    public:
   4704     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4705         : LDeferredCode(codegen), instr_(instr) { }
   4706     virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
   4707     virtual LInstruction* instr() { return instr_; }
   4708    private:
   4709     LTaggedToI* instr_;
   4710   };
   4711 
   4712   LOperand* input = instr->value();
   4713   ASSERT(input->IsRegister());
   4714   ASSERT(input->Equals(instr->result()));
   4715 
   4716   Register input_reg = ToRegister(input);
   4717   DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   4718   __ JumpIfNotSmi(input_reg, deferred->entry());
   4719   __ SmiToInteger32(input_reg, input_reg);
   4720   __ bind(deferred->exit());
   4721 }
   4722 
   4723 
   4724 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4725   LOperand* input = instr->value();
   4726   ASSERT(input->IsRegister());
   4727   LOperand* result = instr->result();
   4728   ASSERT(result->IsDoubleRegister());
   4729 
   4730   Register input_reg = ToRegister(input);
   4731   XMMRegister result_reg = ToDoubleRegister(result);
   4732 
   4733   HValue* value = instr->hydrogen()->value();
   4734   NumberUntagDMode mode = value->representation().IsSmi()
   4735       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4736 
   4737   EmitNumberUntagD(input_reg, result_reg,
   4738                    instr->hydrogen()->can_convert_undefined_to_nan(),
   4739                    instr->hydrogen()->deoptimize_on_minus_zero(),
   4740                    instr->environment(),
   4741                    mode);
   4742 }
   4743 
   4744 
   4745 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4746   LOperand* input = instr->value();
   4747   ASSERT(input->IsDoubleRegister());
   4748   LOperand* result = instr->result();
   4749   ASSERT(result->IsRegister());
   4750 
   4751   XMMRegister input_reg = ToDoubleRegister(input);
   4752   Register result_reg = ToRegister(result);
   4753 
   4754   if (instr->truncating()) {
   4755     // Performs a truncating conversion of a floating point number as used by
   4756     // the JS bitwise operations.
   4757     __ cvttsd2siq(result_reg, input_reg);
   4758     __ movq(kScratchRegister,
   4759             V8_INT64_C(0x8000000000000000),
   4760             RelocInfo::NONE64);
   4761     __ cmpq(result_reg, kScratchRegister);
   4762     DeoptimizeIf(equal, instr->environment());
   4763   } else {
   4764     __ cvttsd2si(result_reg, input_reg);
   4765     __ cvtlsi2sd(xmm0, result_reg);
   4766     __ ucomisd(xmm0, input_reg);
   4767     DeoptimizeIf(not_equal, instr->environment());
   4768     DeoptimizeIf(parity_even, instr->environment());  // NaN.
   4769     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4770       Label done;
   4771       // The integer converted back is equal to the original. We
   4772       // only have to test if we got -0 as an input.
   4773       __ testl(result_reg, result_reg);
   4774       __ j(not_zero, &done, Label::kNear);
   4775       __ movmskpd(result_reg, input_reg);
   4776       // Bit 0 contains the sign of the double in input_reg.
   4777       // If input was positive, we are ok and return 0, otherwise
   4778       // deoptimize.
   4779       __ andl(result_reg, Immediate(1));
   4780       DeoptimizeIf(not_zero, instr->environment());
   4781       __ bind(&done);
   4782     }
   4783   }
   4784 }
   4785 
   4786 
   4787 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   4788   LOperand* input = instr->value();
   4789   ASSERT(input->IsDoubleRegister());
   4790   LOperand* result = instr->result();
   4791   ASSERT(result->IsRegister());
   4792   CpuFeatureScope scope(masm(), SSE2);
   4793 
   4794   XMMRegister input_reg = ToDoubleRegister(input);
   4795   Register result_reg = ToRegister(result);
   4796 
   4797   Label done;
   4798   __ cvttsd2si(result_reg, input_reg);
   4799   __ cvtlsi2sd(xmm0, result_reg);
   4800   __ ucomisd(xmm0, input_reg);
   4801   DeoptimizeIf(not_equal, instr->environment());
   4802   DeoptimizeIf(parity_even, instr->environment());  // NaN.
   4803 
   4804   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4805     // The integer converted back is equal to the original. We
   4806     // only have to test if we got -0 as an input.
   4807     __ testl(result_reg, result_reg);
   4808     __ j(not_zero, &done, Label::kNear);
   4809     __ movmskpd(result_reg, input_reg);
   4810     // Bit 0 contains the sign of the double in input_reg.
   4811     // If input was positive, we are ok and return 0, otherwise
   4812     // deoptimize.
   4813     __ andl(result_reg, Immediate(1));
   4814     DeoptimizeIf(not_zero, instr->environment());
   4815     __ bind(&done);
   4816   }
   4817   __ Integer32ToSmi(result_reg, result_reg);
   4818   DeoptimizeIf(overflow, instr->environment());
   4819 }
   4820 
   4821 
   4822 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   4823   LOperand* input = instr->value();
   4824   Condition cc = masm()->CheckSmi(ToRegister(input));
   4825   DeoptimizeIf(NegateCondition(cc), instr->environment());
   4826 }
   4827 
   4828 
   4829 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   4830   if (!instr->hydrogen()->value()->IsHeapObject()) {
   4831     LOperand* input = instr->value();
   4832     Condition cc = masm()->CheckSmi(ToRegister(input));
   4833     DeoptimizeIf(cc, instr->environment());
   4834   }
   4835 }
   4836 
   4837 
   4838 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   4839   Register input = ToRegister(instr->value());
   4840 
   4841   __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
   4842 
   4843   if (instr->hydrogen()->is_interval_check()) {
   4844     InstanceType first;
   4845     InstanceType last;
   4846     instr->hydrogen()->GetCheckInterval(&first, &last);
   4847 
   4848     __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
   4849             Immediate(static_cast<int8_t>(first)));
   4850 
   4851     // If there is only one type in the interval check for equality.
   4852     if (first == last) {
   4853       DeoptimizeIf(not_equal, instr->environment());
   4854     } else {
   4855       DeoptimizeIf(below, instr->environment());
   4856       // Omit check for the last type.
   4857       if (last != LAST_TYPE) {
   4858         __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
   4859                 Immediate(static_cast<int8_t>(last)));
   4860         DeoptimizeIf(above, instr->environment());
   4861       }
   4862     }
   4863   } else {
   4864     uint8_t mask;
   4865     uint8_t tag;
   4866     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   4867 
   4868     if (IsPowerOf2(mask)) {
   4869       ASSERT(tag == 0 || IsPowerOf2(tag));
   4870       __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
   4871                Immediate(mask));
   4872       DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
   4873     } else {
   4874       __ movzxbl(kScratchRegister,
   4875                  FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
   4876       __ andb(kScratchRegister, Immediate(mask));
   4877       __ cmpb(kScratchRegister, Immediate(tag));
   4878       DeoptimizeIf(not_equal, instr->environment());
   4879     }
   4880   }
   4881 }
   4882 
   4883 
   4884 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
   4885   Register reg = ToRegister(instr->value());
   4886   Handle<JSFunction> target = instr->hydrogen()->target();
   4887   __ CmpHeapObject(reg, target);
   4888   DeoptimizeIf(not_equal, instr->environment());
   4889 }
   4890 
   4891 
   4892 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   4893   {
   4894     PushSafepointRegistersScope scope(this);
   4895     __ push(object);
   4896     CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
   4897     __ testq(rax, Immediate(kSmiTagMask));
   4898   }
   4899   DeoptimizeIf(zero, instr->environment());
   4900 }
   4901 
   4902 
   4903 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   4904   class DeferredCheckMaps: public LDeferredCode {
   4905    public:
   4906     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   4907         : LDeferredCode(codegen), instr_(instr), object_(object) {
   4908       SetExit(check_maps());
   4909     }
   4910     virtual void Generate() {
   4911       codegen()->DoDeferredInstanceMigration(instr_, object_);
   4912     }
   4913     Label* check_maps() { return &check_maps_; }
   4914     virtual LInstruction* instr() { return instr_; }
   4915    private:
   4916     LCheckMaps* instr_;
   4917     Label check_maps_;
   4918     Register object_;
   4919   };
   4920 
   4921   if (instr->hydrogen()->CanOmitMapChecks()) return;
   4922 
   4923   LOperand* input = instr->value();
   4924   ASSERT(input->IsRegister());
   4925   Register reg = ToRegister(input);
   4926 
   4927   SmallMapList* map_set = instr->hydrogen()->map_set();
   4928 
   4929   DeferredCheckMaps* deferred = NULL;
   4930   if (instr->hydrogen()->has_migration_target()) {
   4931     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   4932     __ bind(deferred->check_maps());
   4933   }
   4934 
   4935   Label success;
   4936   for (int i = 0; i < map_set->length() - 1; i++) {
   4937     Handle<Map> map = map_set->at(i);
   4938     __ CompareMap(reg, map, &success);
   4939     __ j(equal, &success);
   4940   }
   4941 
   4942   Handle<Map> map = map_set->last();
   4943   __ CompareMap(reg, map, &success);
   4944   if (instr->hydrogen()->has_migration_target()) {
   4945     __ j(not_equal, deferred->entry());
   4946   } else {
   4947     DeoptimizeIf(not_equal, instr->environment());
   4948   }
   4949 
   4950   __ bind(&success);
   4951 }
   4952 
   4953 
   4954 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   4955   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
   4956   Register result_reg = ToRegister(instr->result());
   4957   __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
   4958 }
   4959 
   4960 
   4961 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   4962   ASSERT(instr->unclamped()->Equals(instr->result()));
   4963   Register value_reg = ToRegister(instr->result());
   4964   __ ClampUint8(value_reg);
   4965 }
   4966 
   4967 
   4968 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   4969   ASSERT(instr->unclamped()->Equals(instr->result()));
   4970   Register input_reg = ToRegister(instr->unclamped());
   4971   XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
   4972   Label is_smi, done, heap_number;
   4973 
   4974   __ JumpIfSmi(input_reg, &is_smi);
   4975 
   4976   // Check for heap number
   4977   __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   4978          factory()->heap_number_map());
   4979   __ j(equal, &heap_number, Label::kNear);
   4980 
   4981   // Check for undefined. Undefined is converted to zero for clamping
   4982   // conversions.
   4983   __ Cmp(input_reg, factory()->undefined_value());
   4984   DeoptimizeIf(not_equal, instr->environment());
   4985   __ movq(input_reg, Immediate(0));
   4986   __ jmp(&done, Label::kNear);
   4987 
   4988   // Heap number
   4989   __ bind(&heap_number);
   4990   __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4991   __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
   4992   __ jmp(&done, Label::kNear);
   4993 
   4994   // smi
   4995   __ bind(&is_smi);
   4996   __ SmiToInteger32(input_reg, input_reg);
   4997   __ ClampUint8(input_reg);
   4998 
   4999   __ bind(&done);
   5000 }
   5001 
   5002 
   5003 void LCodeGen::DoAllocate(LAllocate* instr) {
   5004   class DeferredAllocate: public LDeferredCode {
   5005    public:
   5006     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5007         : LDeferredCode(codegen), instr_(instr) { }
   5008     virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
   5009     virtual LInstruction* instr() { return instr_; }
   5010    private:
   5011     LAllocate* instr_;
   5012   };
   5013 
   5014   DeferredAllocate* deferred =
   5015       new(zone()) DeferredAllocate(this, instr);
   5016 
   5017   Register result = ToRegister(instr->result());
   5018   Register temp = ToRegister(instr->temp());
   5019 
   5020   // Allocate memory for the object.
   5021   AllocationFlags flags = TAG_OBJECT;
   5022   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5023     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5024   }
   5025   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5026     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5027     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5028     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   5029   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5030     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5031     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   5032   }
   5033 
   5034   if (instr->size()->IsConstantOperand()) {
   5035     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5036     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   5037   } else {
   5038     Register size = ToRegister(instr->size());
   5039     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   5040   }
   5041 
   5042   __ bind(deferred->exit());
   5043 
   5044   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5045     if (instr->size()->IsConstantOperand()) {
   5046       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5047       __ movl(temp, Immediate((size / kPointerSize) - 1));
   5048     } else {
   5049       temp = ToRegister(instr->size());
   5050       __ sar(temp, Immediate(kPointerSizeLog2));
   5051       __ decl(temp);
   5052     }
   5053     Label loop;
   5054     __ bind(&loop);
   5055     __ Move(FieldOperand(result, temp, times_pointer_size, 0),
   5056         isolate()->factory()->one_pointer_filler_map());
   5057     __ decl(temp);
   5058     __ j(not_zero, &loop);
   5059   }
   5060 }
   5061 
   5062 
   5063 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5064   Register result = ToRegister(instr->result());
   5065 
   5066   // TODO(3095996): Get rid of this. For now, we need to make the
   5067   // result register contain a valid pointer because it is already
   5068   // contained in the register pointer map.
   5069   __ Move(result, Smi::FromInt(0));
   5070 
   5071   PushSafepointRegistersScope scope(this);
   5072   if (instr->size()->IsRegister()) {
   5073     Register size = ToRegister(instr->size());
   5074     ASSERT(!size.is(result));
   5075     __ Integer32ToSmi(size, size);
   5076     __ push(size);
   5077   } else {
   5078     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5079     __ Push(Smi::FromInt(size));
   5080   }
   5081 
   5082   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5083     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5084     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5085     CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
   5086   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5087     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5088     CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
   5089   } else {
   5090     CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
   5091   }
   5092   __ StoreToSafepointRegisterSlot(result, rax);
   5093 }
   5094 
   5095 
   5096 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5097   ASSERT(ToRegister(instr->value()).is(rax));
   5098   __ push(rax);
   5099   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5100 }
   5101 
   5102 
   5103 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
   5104   Label materialized;
   5105   // Registers will be used as follows:
   5106   // rcx = literals array.
   5107   // rbx = regexp literal.
   5108   // rax = regexp literal clone.
   5109   int literal_offset =
   5110       FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
   5111   __ LoadHeapObject(rcx, instr->hydrogen()->literals());
   5112   __ movq(rbx, FieldOperand(rcx, literal_offset));
   5113   __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
   5114   __ j(not_equal, &materialized, Label::kNear);
   5115 
   5116   // Create regexp literal using runtime function
   5117   // Result will be in rax.
   5118   __ push(rcx);
   5119   __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
   5120   __ Push(instr->hydrogen()->pattern());
   5121   __ Push(instr->hydrogen()->flags());
   5122   CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   5123   __ movq(rbx, rax);
   5124 
   5125   __ bind(&materialized);
   5126   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
   5127   Label allocated, runtime_allocate;
   5128   __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
   5129   __ jmp(&allocated);
   5130 
   5131   __ bind(&runtime_allocate);
   5132   __ push(rbx);
   5133   __ Push(Smi::FromInt(size));
   5134   CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   5135   __ pop(rbx);
   5136 
   5137   __ bind(&allocated);
   5138   // Copy the content into the newly allocated memory.
   5139   // (Unroll copy loop once for better throughput).
   5140   for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
   5141     __ movq(rdx, FieldOperand(rbx, i));
   5142     __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
   5143     __ movq(FieldOperand(rax, i), rdx);
   5144     __ movq(FieldOperand(rax, i + kPointerSize), rcx);
   5145   }
   5146   if ((size % (2 * kPointerSize)) != 0) {
   5147     __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
   5148     __ movq(FieldOperand(rax, size - kPointerSize), rdx);
   5149   }
   5150 }
   5151 
   5152 
   5153 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
   5154   // Use the fast case closure allocation code that allocates in new
   5155   // space for nested functions that don't need literals cloning.
   5156   bool pretenure = instr->hydrogen()->pretenure();
   5157   if (!pretenure && instr->hydrogen()->has_no_literals()) {
   5158     FastNewClosureStub stub(instr->hydrogen()->language_mode(),
   5159                             instr->hydrogen()->is_generator());
   5160     __ Push(instr->hydrogen()->shared_info());
   5161     CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   5162   } else {
   5163     __ push(rsi);
   5164     __ Push(instr->hydrogen()->shared_info());
   5165     __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
   5166                             Heap::kFalseValueRootIndex);
   5167     CallRuntime(Runtime::kNewClosure, 3, instr);
   5168   }
   5169 }
   5170 
   5171 
   5172 void LCodeGen::DoTypeof(LTypeof* instr) {
   5173   LOperand* input = instr->value();
   5174   EmitPushTaggedOperand(input);
   5175   CallRuntime(Runtime::kTypeof, 1, instr);
   5176 }
   5177 
   5178 
   5179 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   5180   ASSERT(!operand->IsDoubleRegister());
   5181   if (operand->IsConstantOperand()) {
   5182     Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
   5183     AllowDeferredHandleDereference smi_check;
   5184     if (object->IsSmi()) {
   5185       __ Push(Handle<Smi>::cast(object));
   5186     } else {
   5187       __ PushHeapObject(Handle<HeapObject>::cast(object));
   5188     }
   5189   } else if (operand->IsRegister()) {
   5190     __ push(ToRegister(operand));
   5191   } else {
   5192     __ push(ToOperand(operand));
   5193   }
   5194 }
   5195 
   5196 
   5197 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5198   Register input = ToRegister(instr->value());
   5199 
   5200   Condition final_branch_condition =
   5201       EmitTypeofIs(instr->TrueLabel(chunk_),
   5202           instr->FalseLabel(chunk_), input, instr->type_literal());
   5203   if (final_branch_condition != no_condition) {
   5204     EmitBranch(instr, final_branch_condition);
   5205   }
   5206 }
   5207 
   5208 
   5209 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   5210                                  Label* false_label,
   5211                                  Register input,
   5212                                  Handle<String> type_name) {
   5213   Condition final_branch_condition = no_condition;
   5214   if (type_name->Equals(heap()->number_string())) {
   5215     __ JumpIfSmi(input, true_label);
   5216     __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
   5217                    Heap::kHeapNumberMapRootIndex);
   5218 
   5219     final_branch_condition = equal;
   5220 
   5221   } else if (type_name->Equals(heap()->string_string())) {
   5222     __ JumpIfSmi(input, false_label);
   5223     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
   5224     __ j(above_equal, false_label);
   5225     __ testb(FieldOperand(input, Map::kBitFieldOffset),
   5226              Immediate(1 << Map::kIsUndetectable));
   5227     final_branch_condition = zero;
   5228 
   5229   } else if (type_name->Equals(heap()->symbol_string())) {
   5230     __ JumpIfSmi(input, false_label);
   5231     __ CmpObjectType(input, SYMBOL_TYPE, input);
   5232     final_branch_condition = equal;
   5233 
   5234   } else if (type_name->Equals(heap()->boolean_string())) {
   5235     __ CompareRoot(input, Heap::kTrueValueRootIndex);
   5236     __ j(equal, true_label);
   5237     __ CompareRoot(input, Heap::kFalseValueRootIndex);
   5238     final_branch_condition = equal;
   5239 
   5240   } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
   5241     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5242     final_branch_condition = equal;
   5243 
   5244   } else if (type_name->Equals(heap()->undefined_string())) {
   5245     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
   5246     __ j(equal, true_label);
   5247     __ JumpIfSmi(input, false_label);
   5248     // Check for undetectable objects => true.
   5249     __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
   5250     __ testb(FieldOperand(input, Map::kBitFieldOffset),
   5251              Immediate(1 << Map::kIsUndetectable));
   5252     final_branch_condition = not_zero;
   5253 
   5254   } else if (type_name->Equals(heap()->function_string())) {
   5255     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   5256     __ JumpIfSmi(input, false_label);
   5257     __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
   5258     __ j(equal, true_label);
   5259     __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
   5260     final_branch_condition = equal;
   5261 
   5262   } else if (type_name->Equals(heap()->object_string())) {
   5263     __ JumpIfSmi(input, false_label);
   5264     if (!FLAG_harmony_typeof) {
   5265       __ CompareRoot(input, Heap::kNullValueRootIndex);
   5266       __ j(equal, true_label);
   5267     }
   5268     __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
   5269     __ j(below, false_label);
   5270     __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
   5271     __ j(above, false_label);
   5272     // Check for undetectable objects => false.
   5273     __ testb(FieldOperand(input, Map::kBitFieldOffset),
   5274              Immediate(1 << Map::kIsUndetectable));
   5275     final_branch_condition = zero;
   5276 
   5277   } else {
   5278     __ jmp(false_label);
   5279   }
   5280 
   5281   return final_branch_condition;
   5282 }
   5283 
   5284 
   5285 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   5286   Register temp = ToRegister(instr->temp());
   5287 
   5288   EmitIsConstructCall(temp);
   5289   EmitBranch(instr, equal);
   5290 }
   5291 
   5292 
   5293 void LCodeGen::EmitIsConstructCall(Register temp) {
   5294   // Get the frame pointer for the calling frame.
   5295   __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   5296 
   5297   // Skip the arguments adaptor frame if it exists.
   5298   Label check_frame_marker;
   5299   __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
   5300          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   5301   __ j(not_equal, &check_frame_marker, Label::kNear);
   5302   __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
   5303 
   5304   // Check the marker in the calling frame.
   5305   __ bind(&check_frame_marker);
   5306   __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
   5307          Smi::FromInt(StackFrame::CONSTRUCT));
   5308 }
   5309 
   5310 
   5311 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5312   if (info()->IsStub()) return;
   5313   // Ensure that we have enough space after the previous lazy-bailout
   5314   // instruction for patching the code here.
   5315   int current_pc = masm()->pc_offset();
   5316   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5317     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5318     __ Nop(padding_size);
   5319   }
   5320 }
   5321 
   5322 
   5323 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5324   EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5325   last_lazy_deopt_pc_ = masm()->pc_offset();
   5326   ASSERT(instr->HasEnvironment());
   5327   LEnvironment* env = instr->environment();
   5328   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5329   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5330 }
   5331 
   5332 
   5333 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5334   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5335   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5336   // needed return address), even though the implementation of LAZY and EAGER is
   5337   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5338   // the special case below.
   5339   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5340     type = Deoptimizer::LAZY;
   5341   }
   5342 
   5343   Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
   5344   DeoptimizeIf(no_condition, instr->environment(), type);
   5345 }
   5346 
   5347 
   5348 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5349   // Nothing to see here, move on!
   5350 }
   5351 
   5352 
   5353 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5354   PushSafepointRegistersScope scope(this);
   5355   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   5356   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5357   RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
   5358   ASSERT(instr->HasEnvironment());
   5359   LEnvironment* env = instr->environment();
   5360   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5361 }
   5362 
   5363 
   5364 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5365   class DeferredStackCheck: public LDeferredCode {
   5366    public:
   5367     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5368         : LDeferredCode(codegen), instr_(instr) { }
   5369     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
   5370     virtual LInstruction* instr() { return instr_; }
   5371    private:
   5372     LStackCheck* instr_;
   5373   };
   5374 
   5375   ASSERT(instr->HasEnvironment());
   5376   LEnvironment* env = instr->environment();
   5377   // There is no LLazyBailout instruction for stack-checks. We have to
   5378   // prepare for lazy deoptimization explicitly here.
   5379   if (instr->hydrogen()->is_function_entry()) {
   5380     // Perform stack overflow check.
   5381     Label done;
   5382     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
   5383     __ j(above_equal, &done, Label::kNear);
   5384     StackCheckStub stub;
   5385     CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   5386     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5387     last_lazy_deopt_pc_ = masm()->pc_offset();
   5388     __ bind(&done);
   5389     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5390     safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5391   } else {
   5392     ASSERT(instr->hydrogen()->is_backwards_branch());
   5393     // Perform stack overflow check if this goto needs it before jumping.
   5394     DeferredStackCheck* deferred_stack_check =
   5395         new(zone()) DeferredStackCheck(this, instr);
   5396     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
   5397     __ j(below, deferred_stack_check->entry());
   5398     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5399     last_lazy_deopt_pc_ = masm()->pc_offset();
   5400     __ bind(instr->done_label());
   5401     deferred_stack_check->SetExit(instr->done_label());
   5402     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5403     // Don't record a deoptimization index for the safepoint here.
   5404     // This will be done explicitly when emitting call and the safepoint in
   5405     // the deferred code.
   5406   }
   5407 }
   5408 
   5409 
   5410 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5411   // This is a pseudo-instruction that ensures that the environment here is
   5412   // properly registered for deoptimization and records the assembler's PC
   5413   // offset.
   5414   LEnvironment* environment = instr->environment();
   5415 
   5416   // If the environment were already registered, we would have no way of
   5417   // backpatching it with the spill slot operands.
   5418   ASSERT(!environment->HasBeenRegistered());
   5419   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5420 
   5421   // Normally we record the first unknown OSR value as the entrypoint to the OSR
   5422   // code, but if there were none, record the entrypoint here.
   5423   if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
   5424 }
   5425 
   5426 
   5427 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5428   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   5429   DeoptimizeIf(equal, instr->environment());
   5430 
   5431   Register null_value = rdi;
   5432   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   5433   __ cmpq(rax, null_value);
   5434   DeoptimizeIf(equal, instr->environment());
   5435 
   5436   Condition cc = masm()->CheckSmi(rax);
   5437   DeoptimizeIf(cc, instr->environment());
   5438 
   5439   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   5440   __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
   5441   DeoptimizeIf(below_equal, instr->environment());
   5442 
   5443   Label use_cache, call_runtime;
   5444   __ CheckEnumCache(null_value, &call_runtime);
   5445 
   5446   __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
   5447   __ jmp(&use_cache, Label::kNear);
   5448 
   5449   // Get the set of properties to enumerate.
   5450   __ bind(&call_runtime);
   5451   __ push(rax);
   5452   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
   5453 
   5454   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
   5455                  Heap::kMetaMapRootIndex);
   5456   DeoptimizeIf(not_equal, instr->environment());
   5457   __ bind(&use_cache);
   5458 }
   5459 
   5460 
   5461 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5462   Register map = ToRegister(instr->map());
   5463   Register result = ToRegister(instr->result());
   5464   Label load_cache, done;
   5465   __ EnumLength(result, map);
   5466   __ Cmp(result, Smi::FromInt(0));
   5467   __ j(not_equal, &load_cache);
   5468   __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
   5469   __ jmp(&done);
   5470   __ bind(&load_cache);
   5471   __ LoadInstanceDescriptors(map, result);
   5472   __ movq(result,
   5473           FieldOperand(result, DescriptorArray::kEnumCacheOffset));
   5474   __ movq(result,
   5475           FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   5476   __ bind(&done);
   5477   Condition cc = masm()->CheckSmi(result);
   5478   DeoptimizeIf(cc, instr->environment());
   5479 }
   5480 
   5481 
   5482 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5483   Register object = ToRegister(instr->value());
   5484   __ cmpq(ToRegister(instr->map()),
   5485           FieldOperand(object, HeapObject::kMapOffset));
   5486   DeoptimizeIf(not_equal, instr->environment());
   5487 }
   5488 
   5489 
   5490 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5491   Register object = ToRegister(instr->object());
   5492   Register index = ToRegister(instr->index());
   5493 
   5494   Label out_of_object, done;
   5495   __ SmiToInteger32(index, index);
   5496   __ cmpl(index, Immediate(0));
   5497   __ j(less, &out_of_object);
   5498   __ movq(object, FieldOperand(object,
   5499                                index,
   5500                                times_pointer_size,
   5501                                JSObject::kHeaderSize));
   5502   __ jmp(&done, Label::kNear);
   5503 
   5504   __ bind(&out_of_object);
   5505   __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
   5506   __ negl(index);
   5507   // Index is now equal to out of object property index plus 1.
   5508   __ movq(object, FieldOperand(object,
   5509                                index,
   5510                                times_pointer_size,
   5511                                FixedArray::kHeaderSize - kPointerSize));
   5512   __ bind(&done);
   5513 }
   5514 
   5515 
   5516 #undef __
   5517 
   5518 } }  // namespace v8::internal
   5519 
   5520 #endif  // V8_TARGET_ARCH_X64
   5521