Home | History | Annotate | Download | only in x64
      1 // Copyright 2010 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "bootstrapper.h"
     31 #include "codegen-inl.h"
     32 #include "compiler.h"
     33 #include "debug.h"
     34 #include "ic-inl.h"
     35 #include "parser.h"
     36 #include "regexp-macro-assembler.h"
     37 #include "register-allocator-inl.h"
     38 #include "scopes.h"
     39 
     40 namespace v8 {
     41 namespace internal {
     42 
     43 #define __ ACCESS_MASM(masm_)
     44 
     45 // -------------------------------------------------------------------------
     46 // Platform-specific DeferredCode functions.
     47 
     48 void DeferredCode::SaveRegisters() {
     49   for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
     50     int action = registers_[i];
     51     if (action == kPush) {
     52       __ push(RegisterAllocator::ToRegister(i));
     53     } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
     54       __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
     55     }
     56   }
     57 }
     58 
     59 
     60 void DeferredCode::RestoreRegisters() {
     61   // Restore registers in reverse order due to the stack.
     62   for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
     63     int action = registers_[i];
     64     if (action == kPush) {
     65       __ pop(RegisterAllocator::ToRegister(i));
     66     } else if (action != kIgnore) {
     67       action &= ~kSyncedFlag;
     68       __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
     69     }
     70   }
     71 }
     72 
     73 
     74 // -------------------------------------------------------------------------
     75 // CodeGenState implementation.
     76 
     77 CodeGenState::CodeGenState(CodeGenerator* owner)
     78     : owner_(owner),
     79       destination_(NULL),
     80       previous_(NULL) {
     81   owner_->set_state(this);
     82 }
     83 
     84 
     85 CodeGenState::CodeGenState(CodeGenerator* owner,
     86                            ControlDestination* destination)
     87     : owner_(owner),
     88       destination_(destination),
     89       previous_(owner->state()) {
     90   owner_->set_state(this);
     91 }
     92 
     93 
     94 CodeGenState::~CodeGenState() {
     95   ASSERT(owner_->state() == this);
     96   owner_->set_state(previous_);
     97 }
     98 
     99 
    100 // -------------------------------------------------------------------------
    101 // Deferred code objects
    102 //
    103 // These subclasses of DeferredCode add pieces of code to the end of generated
    104 // code.  They are branched to from the generated code, and
    105 // keep some slower code out of the main body of the generated code.
    106 // Many of them call a code stub or a runtime function.
    107 
    108 class DeferredInlineSmiAdd: public DeferredCode {
    109  public:
    110   DeferredInlineSmiAdd(Register dst,
    111                        Smi* value,
    112                        OverwriteMode overwrite_mode)
    113       : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
    114     set_comment("[ DeferredInlineSmiAdd");
    115   }
    116 
    117   virtual void Generate();
    118 
    119  private:
    120   Register dst_;
    121   Smi* value_;
    122   OverwriteMode overwrite_mode_;
    123 };
    124 
    125 
    126 // The result of value + src is in dst.  It either overflowed or was not
    127 // smi tagged.  Undo the speculative addition and call the appropriate
    128 // specialized stub for add.  The result is left in dst.
    129 class DeferredInlineSmiAddReversed: public DeferredCode {
    130  public:
    131   DeferredInlineSmiAddReversed(Register dst,
    132                                Smi* value,
    133                                OverwriteMode overwrite_mode)
    134       : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
    135     set_comment("[ DeferredInlineSmiAddReversed");
    136   }
    137 
    138   virtual void Generate();
    139 
    140  private:
    141   Register dst_;
    142   Smi* value_;
    143   OverwriteMode overwrite_mode_;
    144 };
    145 
    146 
    147 class DeferredInlineSmiSub: public DeferredCode {
    148  public:
    149   DeferredInlineSmiSub(Register dst,
    150                        Smi* value,
    151                        OverwriteMode overwrite_mode)
    152       : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
    153     set_comment("[ DeferredInlineSmiSub");
    154   }
    155 
    156   virtual void Generate();
    157 
    158  private:
    159   Register dst_;
    160   Smi* value_;
    161   OverwriteMode overwrite_mode_;
    162 };
    163 
    164 
    165 // Call the appropriate binary operation stub to compute src op value
    166 // and leave the result in dst.
    167 class DeferredInlineSmiOperation: public DeferredCode {
    168  public:
    169   DeferredInlineSmiOperation(Token::Value op,
    170                              Register dst,
    171                              Register src,
    172                              Smi* value,
    173                              OverwriteMode overwrite_mode)
    174       : op_(op),
    175         dst_(dst),
    176         src_(src),
    177         value_(value),
    178         overwrite_mode_(overwrite_mode) {
    179     set_comment("[ DeferredInlineSmiOperation");
    180   }
    181 
    182   virtual void Generate();
    183 
    184  private:
    185   Token::Value op_;
    186   Register dst_;
    187   Register src_;
    188   Smi* value_;
    189   OverwriteMode overwrite_mode_;
    190 };
    191 
    192 
    193 class FloatingPointHelper : public AllStatic {
    194  public:
    195   // Code pattern for loading a floating point value. Input value must
    196   // be either a smi or a heap number object (fp value). Requirements:
    197   // operand on TOS+1. Returns operand as floating point number on FPU
    198   // stack.
    199   static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
    200 
    201   // Code pattern for loading a floating point value. Input value must
    202   // be either a smi or a heap number object (fp value). Requirements:
    203   // operand in src register. Returns operand as floating point number
    204   // in XMM register
    205   static void LoadFloatOperand(MacroAssembler* masm,
    206                                Register src,
    207                                XMMRegister dst);
    208 
    209   // Code pattern for loading floating point values. Input values must
    210   // be either smi or heap number objects (fp values). Requirements:
    211   // operand_1 in rdx, operand_2 in rax; Returns operands as
    212   // floating point numbers in XMM registers.
    213   static void LoadFloatOperands(MacroAssembler* masm,
    214                                 XMMRegister dst1,
    215                                 XMMRegister dst2);
    216 
    217   // Similar to LoadFloatOperands, assumes that the operands are smis.
    218   static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
    219                                         XMMRegister dst1,
    220                                         XMMRegister dst2);
    221 
    222   // Code pattern for loading floating point values onto the fp stack.
    223   // Input values must be either smi or heap number objects (fp values).
    224   // Requirements:
    225   // Register version: operands in registers lhs and rhs.
    226   // Stack version: operands on TOS+1 and TOS+2.
    227   // Returns operands as floating point numbers on fp stack.
    228   static void LoadFloatOperands(MacroAssembler* masm,
    229                                 Register lhs,
    230                                 Register rhs);
    231 
    232   // Test if operands are smi or number objects (fp). Requirements:
    233   // operand_1 in rax, operand_2 in rdx; falls through on float or smi
    234   // operands, jumps to the non_float label otherwise.
    235   static void CheckNumberOperands(MacroAssembler* masm,
    236                                   Label* non_float);
    237 
    238   // Takes the operands in rdx and rax and loads them as integers in rax
    239   // and rcx.
    240   static void LoadAsIntegers(MacroAssembler* masm,
    241                              bool use_sse3,
    242                              Label* operand_conversion_failure);
    243 };
    244 
    245 
    246 // -----------------------------------------------------------------------------
    247 // CodeGenerator implementation.
    248 
    249 CodeGenerator::CodeGenerator(MacroAssembler* masm)
    250     : deferred_(8),
    251       masm_(masm),
    252       info_(NULL),
    253       frame_(NULL),
    254       allocator_(NULL),
    255       state_(NULL),
    256       loop_nesting_(0),
    257       function_return_is_shadowed_(false),
    258       in_spilled_code_(false) {
    259 }
    260 
    261 
    262 Scope* CodeGenerator::scope() { return info_->function()->scope(); }
    263 
    264 
    265 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
    266   // Call the runtime to declare the globals.  The inevitable call
    267   // will sync frame elements to memory anyway, so we do it eagerly to
    268   // allow us to push the arguments directly into place.
    269   frame_->SyncRange(0, frame_->element_count() - 1);
    270 
    271   __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
    272   frame_->EmitPush(rsi);  // The context is the first argument.
    273   frame_->EmitPush(kScratchRegister);
    274   frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
    275   Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
    276   // Return value is ignored.
    277 }
    278 
    279 
    280 void CodeGenerator::Generate(CompilationInfo* info) {
    281   // Record the position for debugging purposes.
    282   CodeForFunctionPosition(info->function());
    283 
    284   // Initialize state.
    285   info_ = info;
    286   ASSERT(allocator_ == NULL);
    287   RegisterAllocator register_allocator(this);
    288   allocator_ = &register_allocator;
    289   ASSERT(frame_ == NULL);
    290   frame_ = new VirtualFrame();
    291   set_in_spilled_code(false);
    292 
    293   // Adjust for function-level loop nesting.
    294   loop_nesting_ += info->loop_nesting();
    295 
    296   JumpTarget::set_compiling_deferred_code(false);
    297 
    298 #ifdef DEBUG
    299   if (strlen(FLAG_stop_at) > 0 &&
    300       info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
    301     frame_->SpillAll();
    302     __ int3();
    303   }
    304 #endif
    305 
    306   // New scope to get automatic timing calculation.
    307   {  // NOLINT
    308     HistogramTimerScope codegen_timer(&Counters::code_generation);
    309     CodeGenState state(this);
    310 
    311     // Entry:
    312     // Stack: receiver, arguments, return address.
    313     // rbp: caller's frame pointer
    314     // rsp: stack pointer
    315     // rdi: called JS function
    316     // rsi: callee's context
    317     allocator_->Initialize();
    318 
    319     if (info->mode() == CompilationInfo::PRIMARY) {
    320       frame_->Enter();
    321 
    322       // Allocate space for locals and initialize them.
    323       frame_->AllocateStackSlots();
    324 
    325       // Allocate the local context if needed.
    326       int heap_slots = scope()->num_heap_slots();
    327       if (heap_slots > 0) {
    328         Comment cmnt(masm_, "[ allocate local context");
    329         // Allocate local context.
    330         // Get outer context and create a new context based on it.
    331         frame_->PushFunction();
    332         Result context;
    333         if (heap_slots <= FastNewContextStub::kMaximumSlots) {
    334           FastNewContextStub stub(heap_slots);
    335           context = frame_->CallStub(&stub, 1);
    336         } else {
    337           context = frame_->CallRuntime(Runtime::kNewContext, 1);
    338         }
    339 
    340         // Update context local.
    341         frame_->SaveContextRegister();
    342 
    343         // Verify that the runtime call result and rsi agree.
    344         if (FLAG_debug_code) {
    345           __ cmpq(context.reg(), rsi);
    346           __ Assert(equal, "Runtime::NewContext should end up in rsi");
    347         }
    348       }
    349 
    350       // TODO(1241774): Improve this code:
    351       // 1) only needed if we have a context
    352       // 2) no need to recompute context ptr every single time
    353       // 3) don't copy parameter operand code from SlotOperand!
    354       {
    355         Comment cmnt2(masm_, "[ copy context parameters into .context");
    356         // Note that iteration order is relevant here! If we have the same
    357         // parameter twice (e.g., function (x, y, x)), and that parameter
    358         // needs to be copied into the context, it must be the last argument
    359         // passed to the parameter that needs to be copied. This is a rare
    360         // case so we don't check for it, instead we rely on the copying
    361         // order: such a parameter is copied repeatedly into the same
    362         // context location and thus the last value is what is seen inside
    363         // the function.
    364         for (int i = 0; i < scope()->num_parameters(); i++) {
    365           Variable* par = scope()->parameter(i);
    366           Slot* slot = par->slot();
    367           if (slot != NULL && slot->type() == Slot::CONTEXT) {
    368             // The use of SlotOperand below is safe in unspilled code
    369             // because the slot is guaranteed to be a context slot.
    370             //
    371             // There are no parameters in the global scope.
    372             ASSERT(!scope()->is_global_scope());
    373             frame_->PushParameterAt(i);
    374             Result value = frame_->Pop();
    375             value.ToRegister();
    376 
    377             // SlotOperand loads context.reg() with the context object
    378             // stored to, used below in RecordWrite.
    379             Result context = allocator_->Allocate();
    380             ASSERT(context.is_valid());
    381             __ movq(SlotOperand(slot, context.reg()), value.reg());
    382             int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
    383             Result scratch = allocator_->Allocate();
    384             ASSERT(scratch.is_valid());
    385             frame_->Spill(context.reg());
    386             frame_->Spill(value.reg());
    387             __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
    388           }
    389         }
    390       }
    391 
    392       // Store the arguments object.  This must happen after context
    393       // initialization because the arguments object may be stored in
    394       // the context.
    395       if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
    396         StoreArgumentsObject(true);
    397       }
    398 
    399       // Initialize ThisFunction reference if present.
    400       if (scope()->is_function_scope() && scope()->function() != NULL) {
    401         frame_->Push(Factory::the_hole_value());
    402         StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
    403       }
    404     } else {
    405       // When used as the secondary compiler for splitting, rbp, rsi,
    406       // and rdi have been pushed on the stack.  Adjust the virtual
    407       // frame to match this state.
    408       frame_->Adjust(3);
    409       allocator_->Unuse(rdi);
    410 
    411       // Bind all the bailout labels to the beginning of the function.
    412       List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
    413       for (int i = 0; i < bailouts->length(); i++) {
    414         __ bind(bailouts->at(i)->label());
    415       }
    416     }
    417 
    418     // Initialize the function return target after the locals are set
    419     // up, because it needs the expected frame height from the frame.
    420     function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
    421     function_return_is_shadowed_ = false;
    422 
    423     // Generate code to 'execute' declarations and initialize functions
    424     // (source elements). In case of an illegal redeclaration we need to
    425     // handle that instead of processing the declarations.
    426     if (scope()->HasIllegalRedeclaration()) {
    427       Comment cmnt(masm_, "[ illegal redeclarations");
    428       scope()->VisitIllegalRedeclaration(this);
    429     } else {
    430       Comment cmnt(masm_, "[ declarations");
    431       ProcessDeclarations(scope()->declarations());
    432       // Bail out if a stack-overflow exception occurred when processing
    433       // declarations.
    434       if (HasStackOverflow()) return;
    435     }
    436 
    437     if (FLAG_trace) {
    438       frame_->CallRuntime(Runtime::kTraceEnter, 0);
    439       // Ignore the return value.
    440     }
    441     CheckStack();
    442 
    443     // Compile the body of the function in a vanilla state. Don't
    444     // bother compiling all the code if the scope has an illegal
    445     // redeclaration.
    446     if (!scope()->HasIllegalRedeclaration()) {
    447       Comment cmnt(masm_, "[ function body");
    448 #ifdef DEBUG
    449       bool is_builtin = Bootstrapper::IsActive();
    450       bool should_trace =
    451           is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
    452       if (should_trace) {
    453         frame_->CallRuntime(Runtime::kDebugTrace, 0);
    454         // Ignore the return value.
    455       }
    456 #endif
    457       VisitStatements(info->function()->body());
    458 
    459       // Handle the return from the function.
    460       if (has_valid_frame()) {
    461         // If there is a valid frame, control flow can fall off the end of
    462         // the body.  In that case there is an implicit return statement.
    463         ASSERT(!function_return_is_shadowed_);
    464         CodeForReturnPosition(info->function());
    465         frame_->PrepareForReturn();
    466         Result undefined(Factory::undefined_value());
    467         if (function_return_.is_bound()) {
    468           function_return_.Jump(&undefined);
    469         } else {
    470           function_return_.Bind(&undefined);
    471           GenerateReturnSequence(&undefined);
    472         }
    473       } else if (function_return_.is_linked()) {
    474         // If the return target has dangling jumps to it, then we have not
    475         // yet generated the return sequence.  This can happen when (a)
    476         // control does not flow off the end of the body so we did not
    477         // compile an artificial return statement just above, and (b) there
    478         // are return statements in the body but (c) they are all shadowed.
    479         Result return_value;
    480         function_return_.Bind(&return_value);
    481         GenerateReturnSequence(&return_value);
    482       }
    483     }
    484   }
    485 
    486   // Adjust for function-level loop nesting.
    487   loop_nesting_ -= info->loop_nesting();
    488 
    489   // Code generation state must be reset.
    490   ASSERT(state_ == NULL);
    491   ASSERT(loop_nesting() == 0);
    492   ASSERT(!function_return_is_shadowed_);
    493   function_return_.Unuse();
    494   DeleteFrame();
    495 
    496   // Process any deferred code using the register allocator.
    497   if (!HasStackOverflow()) {
    498     HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
    499     JumpTarget::set_compiling_deferred_code(true);
    500     ProcessDeferred();
    501     JumpTarget::set_compiling_deferred_code(false);
    502   }
    503 
    504   // There is no need to delete the register allocator, it is a
    505   // stack-allocated local.
    506   allocator_ = NULL;
    507 }
    508 
    509 void CodeGenerator::GenerateReturnSequence(Result* return_value) {
    510   // The return value is a live (but not currently reference counted)
    511   // reference to rax.  This is safe because the current frame does not
    512   // contain a reference to rax (it is prepared for the return by spilling
    513   // all registers).
    514   if (FLAG_trace) {
    515     frame_->Push(return_value);
    516     *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
    517   }
    518   return_value->ToRegister(rax);
    519 
    520   // Add a label for checking the size of the code used for returning.
    521 #ifdef DEBUG
    522   Label check_exit_codesize;
    523   masm_->bind(&check_exit_codesize);
    524 #endif
    525 
    526   // Leave the frame and return popping the arguments and the
    527   // receiver.
    528   frame_->Exit();
    529   masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
    530 #ifdef ENABLE_DEBUGGER_SUPPORT
    531   // Add padding that will be overwritten by a debugger breakpoint.
    532   // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
    533   // with length 7 (3 + 1 + 3).
    534   const int kPadding = Assembler::kJSReturnSequenceLength - 7;
    535   for (int i = 0; i < kPadding; ++i) {
    536     masm_->int3();
    537   }
    538   // Check that the size of the code used for returning matches what is
    539   // expected by the debugger.
    540   ASSERT_EQ(Assembler::kJSReturnSequenceLength,
    541             masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
    542 #endif
    543   DeleteFrame();
    544 }
    545 
    546 
    547 #ifdef DEBUG
    548 bool CodeGenerator::HasValidEntryRegisters() {
    549   return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
    550       && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
    551       && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
    552       && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
    553       && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
    554       && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
    555       && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
    556       && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
    557       && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
    558       && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
    559       && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
    560 }
    561 #endif
    562 
    563 
    564 class DeferredReferenceGetKeyedValue: public DeferredCode {
    565  public:
    566   explicit DeferredReferenceGetKeyedValue(Register dst,
    567                                           Register receiver,
    568                                           Register key,
    569                                           bool is_global)
    570       : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
    571     set_comment("[ DeferredReferenceGetKeyedValue");
    572   }
    573 
    574   virtual void Generate();
    575 
    576   Label* patch_site() { return &patch_site_; }
    577 
    578  private:
    579   Label patch_site_;
    580   Register dst_;
    581   Register receiver_;
    582   Register key_;
    583   bool is_global_;
    584 };
    585 
    586 
    587 void DeferredReferenceGetKeyedValue::Generate() {
    588   __ push(receiver_);  // First IC argument.
    589   __ push(key_);       // Second IC argument.
    590 
    591   // Calculate the delta from the IC call instruction to the map check
    592   // movq instruction in the inlined version.  This delta is stored in
    593   // a test(rax, delta) instruction after the call so that we can find
    594   // it in the IC initialization code and patch the movq instruction.
    595   // This means that we cannot allow test instructions after calls to
    596   // KeyedLoadIC stubs in other places.
    597   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
    598   RelocInfo::Mode mode = is_global_
    599                          ? RelocInfo::CODE_TARGET_CONTEXT
    600                          : RelocInfo::CODE_TARGET;
    601   __ Call(ic, mode);
    602   // The delta from the start of the map-compare instruction to the
    603   // test instruction.  We use masm_-> directly here instead of the __
    604   // macro because the macro sometimes uses macro expansion to turn
    605   // into something that can't return a value.  This is encountered
    606   // when doing generated code coverage tests.
    607   int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
    608   // Here we use masm_-> instead of the __ macro because this is the
    609   // instruction that gets patched and coverage code gets in the way.
    610   // TODO(X64): Consider whether it's worth switching the test to a
    611   // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
    612   // be generated normally.
    613   masm_->testl(rax, Immediate(-delta_to_patch_site));
    614   __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
    615 
    616   if (!dst_.is(rax)) __ movq(dst_, rax);
    617   __ pop(key_);
    618   __ pop(receiver_);
    619 }
    620 
    621 
    622 class DeferredReferenceSetKeyedValue: public DeferredCode {
    623  public:
    624   DeferredReferenceSetKeyedValue(Register value,
    625                                  Register key,
    626                                  Register receiver)
    627       : value_(value), key_(key), receiver_(receiver) {
    628     set_comment("[ DeferredReferenceSetKeyedValue");
    629   }
    630 
    631   virtual void Generate();
    632 
    633   Label* patch_site() { return &patch_site_; }
    634 
    635  private:
    636   Register value_;
    637   Register key_;
    638   Register receiver_;
    639   Label patch_site_;
    640 };
    641 
    642 
    643 void DeferredReferenceSetKeyedValue::Generate() {
    644   __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
    645   // Push receiver and key arguments on the stack.
    646   __ push(receiver_);
    647   __ push(key_);
    648   // Move value argument to eax as expected by the IC stub.
    649   if (!value_.is(rax)) __ movq(rax, value_);
    650   // Call the IC stub.
    651   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
    652   __ Call(ic, RelocInfo::CODE_TARGET);
    653   // The delta from the start of the map-compare instructions (initial movq)
    654   // to the test instruction.  We use masm_-> directly here instead of the
    655   // __ macro because the macro sometimes uses macro expansion to turn
    656   // into something that can't return a value.  This is encountered
    657   // when doing generated code coverage tests.
    658   int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
    659   // Here we use masm_-> instead of the __ macro because this is the
    660   // instruction that gets patched and coverage code gets in the way.
    661   masm_->testl(rax, Immediate(-delta_to_patch_site));
    662   // Restore value (returned from store IC), key and receiver
    663   // registers.
    664   if (!value_.is(rax)) __ movq(value_, rax);
    665   __ pop(key_);
    666   __ pop(receiver_);
    667 }
    668 
    669 
    670 void CodeGenerator::CallApplyLazy(Expression* applicand,
    671                                   Expression* receiver,
    672                                   VariableProxy* arguments,
    673                                   int position) {
    674   // An optimized implementation of expressions of the form
    675   // x.apply(y, arguments).
    676   // If the arguments object of the scope has not been allocated,
    677   // and x.apply is Function.prototype.apply, this optimization
    678   // just copies y and the arguments of the current function on the
    679   // stack, as receiver and arguments, and calls x.
    680   // In the implementation comments, we call x the applicand
    681   // and y the receiver.
    682   ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
    683   ASSERT(arguments->IsArguments());
    684 
    685   // Load applicand.apply onto the stack. This will usually
    686   // give us a megamorphic load site. Not super, but it works.
    687   Load(applicand);
    688   Handle<String> name = Factory::LookupAsciiSymbol("apply");
    689   frame()->Push(name);
    690   Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
    691   __ nop();
    692   frame()->Push(&answer);
    693 
    694   // Load the receiver and the existing arguments object onto the
    695   // expression stack. Avoid allocating the arguments object here.
    696   Load(receiver);
    697   LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
    698 
    699   // Emit the source position information after having loaded the
    700   // receiver and the arguments.
    701   CodeForSourcePosition(position);
    702   // Contents of frame at this point:
    703   // Frame[0]: arguments object of the current function or the hole.
    704   // Frame[1]: receiver
    705   // Frame[2]: applicand.apply
    706   // Frame[3]: applicand.
    707 
    708   // Check if the arguments object has been lazily allocated
    709   // already. If so, just use that instead of copying the arguments
    710   // from the stack. This also deals with cases where a local variable
    711   // named 'arguments' has been introduced.
    712   frame_->Dup();
    713   Result probe = frame_->Pop();
    714   { VirtualFrame::SpilledScope spilled_scope;
    715     Label slow, done;
    716     bool try_lazy = true;
    717     if (probe.is_constant()) {
    718       try_lazy = probe.handle()->IsTheHole();
    719     } else {
    720       __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
    721       probe.Unuse();
    722       __ j(not_equal, &slow);
    723     }
    724 
    725     if (try_lazy) {
    726       Label build_args;
    727       // Get rid of the arguments object probe.
    728       frame_->Drop();  // Can be called on a spilled frame.
    729       // Stack now has 3 elements on it.
    730       // Contents of stack at this point:
    731       // rsp[0]: receiver
    732       // rsp[1]: applicand.apply
    733       // rsp[2]: applicand.
    734 
    735       // Check that the receiver really is a JavaScript object.
    736       __ movq(rax, Operand(rsp, 0));
    737       Condition is_smi = masm_->CheckSmi(rax);
    738       __ j(is_smi, &build_args);
    739       // We allow all JSObjects including JSFunctions.  As long as
    740       // JS_FUNCTION_TYPE is the last instance type and it is right
    741       // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
    742       // bound.
    743       ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
    744       ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
    745       __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
    746       __ j(below, &build_args);
    747 
    748       // Check that applicand.apply is Function.prototype.apply.
    749       __ movq(rax, Operand(rsp, kPointerSize));
    750       is_smi = masm_->CheckSmi(rax);
    751       __ j(is_smi, &build_args);
    752       __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
    753       __ j(not_equal, &build_args);
    754       __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
    755       Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
    756       __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
    757       __ j(not_equal, &build_args);
    758 
    759       // Check that applicand is a function.
    760       __ movq(rdi, Operand(rsp, 2 * kPointerSize));
    761       is_smi = masm_->CheckSmi(rdi);
    762       __ j(is_smi, &build_args);
    763       __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
    764       __ j(not_equal, &build_args);
    765 
    766       // Copy the arguments to this function possibly from the
    767       // adaptor frame below it.
    768       Label invoke, adapted;
    769       __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
    770       __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
    771                     Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
    772       __ j(equal, &adapted);
    773 
    774       // No arguments adaptor frame. Copy fixed number of arguments.
    775       __ movq(rax, Immediate(scope()->num_parameters()));
    776       for (int i = 0; i < scope()->num_parameters(); i++) {
    777         __ push(frame_->ParameterAt(i));
    778       }
    779       __ jmp(&invoke);
    780 
    781       // Arguments adaptor frame present. Copy arguments from there, but
    782       // avoid copying too many arguments to avoid stack overflows.
    783       __ bind(&adapted);
    784       static const uint32_t kArgumentsLimit = 1 * KB;
    785       __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
    786       __ SmiToInteger32(rax, rax);
    787       __ movq(rcx, rax);
    788       __ cmpq(rax, Immediate(kArgumentsLimit));
    789       __ j(above, &build_args);
    790 
    791       // Loop through the arguments pushing them onto the execution
    792       // stack. We don't inform the virtual frame of the push, so we don't
    793       // have to worry about getting rid of the elements from the virtual
    794       // frame.
    795       Label loop;
    796       // rcx is a small non-negative integer, due to the test above.
    797       __ testl(rcx, rcx);
    798       __ j(zero, &invoke);
    799       __ bind(&loop);
    800       __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
    801       __ decl(rcx);
    802       __ j(not_zero, &loop);
    803 
    804       // Invoke the function.
    805       __ bind(&invoke);
    806       ParameterCount actual(rax);
    807       __ InvokeFunction(rdi, actual, CALL_FUNCTION);
    808       // Drop applicand.apply and applicand from the stack, and push
    809       // the result of the function call, but leave the spilled frame
    810       // unchanged, with 3 elements, so it is correct when we compile the
    811       // slow-case code.
    812       __ addq(rsp, Immediate(2 * kPointerSize));
    813       __ push(rax);
    814       // Stack now has 1 element:
    815       //   rsp[0]: result
    816       __ jmp(&done);
    817 
    818       // Slow-case: Allocate the arguments object since we know it isn't
    819       // there, and fall-through to the slow-case where we call
    820       // applicand.apply.
    821       __ bind(&build_args);
    822       // Stack now has 3 elements, because we have jumped from where:
    823       // rsp[0]: receiver
    824       // rsp[1]: applicand.apply
    825       // rsp[2]: applicand.
    826 
    827       // StoreArgumentsObject requires a correct frame, and may modify it.
    828       Result arguments_object = StoreArgumentsObject(false);
    829       frame_->SpillAll();
    830       arguments_object.ToRegister();
    831       frame_->EmitPush(arguments_object.reg());
    832       arguments_object.Unuse();
    833       // Stack and frame now have 4 elements.
    834       __ bind(&slow);
    835     }
    836 
    837     // Generic computation of x.apply(y, args) with no special optimization.
    838     // Flip applicand.apply and applicand on the stack, so
    839     // applicand looks like the receiver of the applicand.apply call.
    840     // Then process it as a normal function call.
    841     __ movq(rax, Operand(rsp, 3 * kPointerSize));
    842     __ movq(rbx, Operand(rsp, 2 * kPointerSize));
    843     __ movq(Operand(rsp, 2 * kPointerSize), rax);
    844     __ movq(Operand(rsp, 3 * kPointerSize), rbx);
    845 
    846     CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
    847     Result res = frame_->CallStub(&call_function, 3);
    848     // The function and its two arguments have been dropped.
    849     frame_->Drop(1);  // Drop the receiver as well.
    850     res.ToRegister();
    851     frame_->EmitPush(res.reg());
    852     // Stack now has 1 element:
    853     //   rsp[0]: result
    854     if (try_lazy) __ bind(&done);
    855   }  // End of spilled scope.
    856   // Restore the context register after a call.
    857   frame_->RestoreContextRegister();
    858 }
    859 
    860 
    861 class DeferredStackCheck: public DeferredCode {
    862  public:
    863   DeferredStackCheck() {
    864     set_comment("[ DeferredStackCheck");
    865   }
    866 
    867   virtual void Generate();
    868 };
    869 
    870 
    871 void DeferredStackCheck::Generate() {
    872   StackCheckStub stub;
    873   __ CallStub(&stub);
    874 }
    875 
    876 
    877 void CodeGenerator::CheckStack() {
    878   DeferredStackCheck* deferred = new DeferredStackCheck;
    879   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
    880   deferred->Branch(below);
    881   deferred->BindExit();
    882 }
    883 
    884 
    885 void CodeGenerator::VisitAndSpill(Statement* statement) {
    886   // TODO(X64): No architecture specific code. Move to shared location.
    887   ASSERT(in_spilled_code());
    888   set_in_spilled_code(false);
    889   Visit(statement);
    890   if (frame_ != NULL) {
    891     frame_->SpillAll();
    892   }
    893   set_in_spilled_code(true);
    894 }
    895 
    896 
    897 void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
    898   ASSERT(in_spilled_code());
    899   set_in_spilled_code(false);
    900   VisitStatements(statements);
    901   if (frame_ != NULL) {
    902     frame_->SpillAll();
    903   }
    904   set_in_spilled_code(true);
    905 }
    906 
    907 
    908 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
    909   ASSERT(!in_spilled_code());
    910   for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
    911     Visit(statements->at(i));
    912   }
    913 }
    914 
    915 
    916 void CodeGenerator::VisitBlock(Block* node) {
    917   ASSERT(!in_spilled_code());
    918   Comment cmnt(masm_, "[ Block");
    919   CodeForStatementPosition(node);
    920   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
    921   VisitStatements(node->statements());
    922   if (node->break_target()->is_linked()) {
    923     node->break_target()->Bind();
    924   }
    925   node->break_target()->Unuse();
    926 }
    927 
    928 
    929 void CodeGenerator::VisitDeclaration(Declaration* node) {
    930   Comment cmnt(masm_, "[ Declaration");
    931   Variable* var = node->proxy()->var();
    932   ASSERT(var != NULL);  // must have been resolved
    933   Slot* slot = var->slot();
    934 
    935   // If it was not possible to allocate the variable at compile time,
    936   // we need to "declare" it at runtime to make sure it actually
    937   // exists in the local context.
    938   if (slot != NULL && slot->type() == Slot::LOOKUP) {
    939     // Variables with a "LOOKUP" slot were introduced as non-locals
    940     // during variable resolution and must have mode DYNAMIC.
    941     ASSERT(var->is_dynamic());
    942     // For now, just do a runtime call.  Sync the virtual frame eagerly
    943     // so we can simply push the arguments into place.
    944     frame_->SyncRange(0, frame_->element_count() - 1);
    945     frame_->EmitPush(rsi);
    946     __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
    947     frame_->EmitPush(kScratchRegister);
    948     // Declaration nodes are always introduced in one of two modes.
    949     ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
    950     PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
    951     frame_->EmitPush(Smi::FromInt(attr));
    952     // Push initial value, if any.
    953     // Note: For variables we must not push an initial value (such as
    954     // 'undefined') because we may have a (legal) redeclaration and we
    955     // must not destroy the current value.
    956     if (node->mode() == Variable::CONST) {
    957       frame_->EmitPush(Heap::kTheHoleValueRootIndex);
    958     } else if (node->fun() != NULL) {
    959       Load(node->fun());
    960     } else {
    961       frame_->EmitPush(Smi::FromInt(0));  // no initial value!
    962     }
    963     Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
    964     // Ignore the return value (declarations are statements).
    965     return;
    966   }
    967 
    968   ASSERT(!var->is_global());
    969 
    970   // If we have a function or a constant, we need to initialize the variable.
    971   Expression* val = NULL;
    972   if (node->mode() == Variable::CONST) {
    973     val = new Literal(Factory::the_hole_value());
    974   } else {
    975     val = node->fun();  // NULL if we don't have a function
    976   }
    977 
    978   if (val != NULL) {
    979     {
    980       // Set the initial value.
    981       Reference target(this, node->proxy());
    982       Load(val);
    983       target.SetValue(NOT_CONST_INIT);
    984       // The reference is removed from the stack (preserving TOS) when
    985       // it goes out of scope.
    986     }
    987     // Get rid of the assigned value (declarations are statements).
    988     frame_->Drop();
    989   }
    990 }
    991 
    992 
    993 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
    994   ASSERT(!in_spilled_code());
    995   Comment cmnt(masm_, "[ ExpressionStatement");
    996   CodeForStatementPosition(node);
    997   Expression* expression = node->expression();
    998   expression->MarkAsStatement();
    999   Load(expression);
   1000   // Remove the lingering expression result from the top of stack.
   1001   frame_->Drop();
   1002 }
   1003 
   1004 
   1005 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
   1006   ASSERT(!in_spilled_code());
   1007   Comment cmnt(masm_, "// EmptyStatement");
   1008   CodeForStatementPosition(node);
   1009   // nothing to do
   1010 }
   1011 
   1012 
   1013 void CodeGenerator::VisitIfStatement(IfStatement* node) {
   1014   ASSERT(!in_spilled_code());
   1015   Comment cmnt(masm_, "[ IfStatement");
   1016   // Generate different code depending on which parts of the if statement
   1017   // are present or not.
   1018   bool has_then_stm = node->HasThenStatement();
   1019   bool has_else_stm = node->HasElseStatement();
   1020 
   1021   CodeForStatementPosition(node);
   1022   JumpTarget exit;
   1023   if (has_then_stm && has_else_stm) {
   1024     JumpTarget then;
   1025     JumpTarget else_;
   1026     ControlDestination dest(&then, &else_, true);
   1027     LoadCondition(node->condition(), &dest, true);
   1028 
   1029     if (dest.false_was_fall_through()) {
   1030       // The else target was bound, so we compile the else part first.
   1031       Visit(node->else_statement());
   1032 
   1033       // We may have dangling jumps to the then part.
   1034       if (then.is_linked()) {
   1035         if (has_valid_frame()) exit.Jump();
   1036         then.Bind();
   1037         Visit(node->then_statement());
   1038       }
   1039     } else {
   1040       // The then target was bound, so we compile the then part first.
   1041       Visit(node->then_statement());
   1042 
   1043       if (else_.is_linked()) {
   1044         if (has_valid_frame()) exit.Jump();
   1045         else_.Bind();
   1046         Visit(node->else_statement());
   1047       }
   1048     }
   1049 
   1050   } else if (has_then_stm) {
   1051     ASSERT(!has_else_stm);
   1052     JumpTarget then;
   1053     ControlDestination dest(&then, &exit, true);
   1054     LoadCondition(node->condition(), &dest, true);
   1055 
   1056     if (dest.false_was_fall_through()) {
   1057       // The exit label was bound.  We may have dangling jumps to the
   1058       // then part.
   1059       if (then.is_linked()) {
   1060         exit.Unuse();
   1061         exit.Jump();
   1062         then.Bind();
   1063         Visit(node->then_statement());
   1064       }
   1065     } else {
   1066       // The then label was bound.
   1067       Visit(node->then_statement());
   1068     }
   1069 
   1070   } else if (has_else_stm) {
   1071     ASSERT(!has_then_stm);
   1072     JumpTarget else_;
   1073     ControlDestination dest(&exit, &else_, false);
   1074     LoadCondition(node->condition(), &dest, true);
   1075 
   1076     if (dest.true_was_fall_through()) {
   1077       // The exit label was bound.  We may have dangling jumps to the
   1078       // else part.
   1079       if (else_.is_linked()) {
   1080         exit.Unuse();
   1081         exit.Jump();
   1082         else_.Bind();
   1083         Visit(node->else_statement());
   1084       }
   1085     } else {
   1086       // The else label was bound.
   1087       Visit(node->else_statement());
   1088     }
   1089 
   1090   } else {
   1091     ASSERT(!has_then_stm && !has_else_stm);
   1092     // We only care about the condition's side effects (not its value
   1093     // or control flow effect).  LoadCondition is called without
   1094     // forcing control flow.
   1095     ControlDestination dest(&exit, &exit, true);
   1096     LoadCondition(node->condition(), &dest, false);
   1097     if (!dest.is_used()) {
   1098       // We got a value on the frame rather than (or in addition to)
   1099       // control flow.
   1100       frame_->Drop();
   1101     }
   1102   }
   1103 
   1104   if (exit.is_linked()) {
   1105     exit.Bind();
   1106   }
   1107 }
   1108 
   1109 
   1110 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
   1111   ASSERT(!in_spilled_code());
   1112   Comment cmnt(masm_, "[ ContinueStatement");
   1113   CodeForStatementPosition(node);
   1114   node->target()->continue_target()->Jump();
   1115 }
   1116 
   1117 
   1118 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
   1119   ASSERT(!in_spilled_code());
   1120   Comment cmnt(masm_, "[ BreakStatement");
   1121   CodeForStatementPosition(node);
   1122   node->target()->break_target()->Jump();
   1123 }
   1124 
   1125 
   1126 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
   1127   ASSERT(!in_spilled_code());
   1128   Comment cmnt(masm_, "[ ReturnStatement");
   1129 
   1130   CodeForStatementPosition(node);
   1131   Load(node->expression());
   1132   Result return_value = frame_->Pop();
   1133   if (function_return_is_shadowed_) {
   1134     function_return_.Jump(&return_value);
   1135   } else {
   1136     frame_->PrepareForReturn();
   1137     if (function_return_.is_bound()) {
   1138       // If the function return label is already bound we reuse the
   1139       // code by jumping to the return site.
   1140       function_return_.Jump(&return_value);
   1141     } else {
   1142       function_return_.Bind(&return_value);
   1143       GenerateReturnSequence(&return_value);
   1144     }
   1145   }
   1146 }
   1147 
   1148 
   1149 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
   1150   ASSERT(!in_spilled_code());
   1151   Comment cmnt(masm_, "[ WithEnterStatement");
   1152   CodeForStatementPosition(node);
   1153   Load(node->expression());
   1154   Result context;
   1155   if (node->is_catch_block()) {
   1156     context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
   1157   } else {
   1158     context = frame_->CallRuntime(Runtime::kPushContext, 1);
   1159   }
   1160 
   1161   // Update context local.
   1162   frame_->SaveContextRegister();
   1163 
   1164   // Verify that the runtime call result and rsi agree.
   1165   if (FLAG_debug_code) {
   1166     __ cmpq(context.reg(), rsi);
   1167     __ Assert(equal, "Runtime::NewContext should end up in rsi");
   1168   }
   1169 }
   1170 
   1171 
   1172 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
   1173   ASSERT(!in_spilled_code());
   1174   Comment cmnt(masm_, "[ WithExitStatement");
   1175   CodeForStatementPosition(node);
   1176   // Pop context.
   1177   __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
   1178   // Update context local.
   1179   frame_->SaveContextRegister();
   1180 }
   1181 
   1182 
   1183 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
   1184   // TODO(X64): This code is completely generic and should be moved somewhere
   1185   // where it can be shared between architectures.
   1186   ASSERT(!in_spilled_code());
   1187   Comment cmnt(masm_, "[ SwitchStatement");
   1188   CodeForStatementPosition(node);
   1189   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1190 
   1191   // Compile the switch value.
   1192   Load(node->tag());
   1193 
   1194   ZoneList<CaseClause*>* cases = node->cases();
   1195   int length = cases->length();
   1196   CaseClause* default_clause = NULL;
   1197 
   1198   JumpTarget next_test;
   1199   // Compile the case label expressions and comparisons.  Exit early
   1200   // if a comparison is unconditionally true.  The target next_test is
   1201   // bound before the loop in order to indicate control flow to the
   1202   // first comparison.
   1203   next_test.Bind();
   1204   for (int i = 0; i < length && !next_test.is_unused(); i++) {
   1205     CaseClause* clause = cases->at(i);
   1206     // The default is not a test, but remember it for later.
   1207     if (clause->is_default()) {
   1208       default_clause = clause;
   1209       continue;
   1210     }
   1211 
   1212     Comment cmnt(masm_, "[ Case comparison");
   1213     // We recycle the same target next_test for each test.  Bind it if
   1214     // the previous test has not done so and then unuse it for the
   1215     // loop.
   1216     if (next_test.is_linked()) {
   1217       next_test.Bind();
   1218     }
   1219     next_test.Unuse();
   1220 
   1221     // Duplicate the switch value.
   1222     frame_->Dup();
   1223 
   1224     // Compile the label expression.
   1225     Load(clause->label());
   1226 
   1227     // Compare and branch to the body if true or the next test if
   1228     // false.  Prefer the next test as a fall through.
   1229     ControlDestination dest(clause->body_target(), &next_test, false);
   1230     Comparison(node, equal, true, &dest);
   1231 
   1232     // If the comparison fell through to the true target, jump to the
   1233     // actual body.
   1234     if (dest.true_was_fall_through()) {
   1235       clause->body_target()->Unuse();
   1236       clause->body_target()->Jump();
   1237     }
   1238   }
   1239 
   1240   // If there was control flow to a next test from the last one
   1241   // compiled, compile a jump to the default or break target.
   1242   if (!next_test.is_unused()) {
   1243     if (next_test.is_linked()) {
   1244       next_test.Bind();
   1245     }
   1246     // Drop the switch value.
   1247     frame_->Drop();
   1248     if (default_clause != NULL) {
   1249       default_clause->body_target()->Jump();
   1250     } else {
   1251       node->break_target()->Jump();
   1252     }
   1253   }
   1254 
   1255   // The last instruction emitted was a jump, either to the default
   1256   // clause or the break target, or else to a case body from the loop
   1257   // that compiles the tests.
   1258   ASSERT(!has_valid_frame());
   1259   // Compile case bodies as needed.
   1260   for (int i = 0; i < length; i++) {
   1261     CaseClause* clause = cases->at(i);
   1262 
   1263     // There are two ways to reach the body: from the corresponding
   1264     // test or as the fall through of the previous body.
   1265     if (clause->body_target()->is_linked() || has_valid_frame()) {
   1266       if (clause->body_target()->is_linked()) {
   1267         if (has_valid_frame()) {
   1268           // If we have both a jump to the test and a fall through, put
   1269           // a jump on the fall through path to avoid the dropping of
   1270           // the switch value on the test path.  The exception is the
   1271           // default which has already had the switch value dropped.
   1272           if (clause->is_default()) {
   1273             clause->body_target()->Bind();
   1274           } else {
   1275             JumpTarget body;
   1276             body.Jump();
   1277             clause->body_target()->Bind();
   1278             frame_->Drop();
   1279             body.Bind();
   1280           }
   1281         } else {
   1282           // No fall through to worry about.
   1283           clause->body_target()->Bind();
   1284           if (!clause->is_default()) {
   1285             frame_->Drop();
   1286           }
   1287         }
   1288       } else {
   1289         // Otherwise, we have only fall through.
   1290         ASSERT(has_valid_frame());
   1291       }
   1292 
   1293       // We are now prepared to compile the body.
   1294       Comment cmnt(masm_, "[ Case body");
   1295       VisitStatements(clause->statements());
   1296     }
   1297     clause->body_target()->Unuse();
   1298   }
   1299 
   1300   // We may not have a valid frame here so bind the break target only
   1301   // if needed.
   1302   if (node->break_target()->is_linked()) {
   1303     node->break_target()->Bind();
   1304   }
   1305   node->break_target()->Unuse();
   1306 }
   1307 
   1308 
   1309 void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
   1310   ASSERT(!in_spilled_code());
   1311   Comment cmnt(masm_, "[ DoWhileStatement");
   1312   CodeForStatementPosition(node);
   1313   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1314   JumpTarget body(JumpTarget::BIDIRECTIONAL);
   1315   IncrementLoopNesting();
   1316 
   1317   ConditionAnalysis info = AnalyzeCondition(node->cond());
   1318   // Label the top of the loop for the backward jump if necessary.
   1319   switch (info) {
   1320     case ALWAYS_TRUE:
   1321       // Use the continue target.
   1322       node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
   1323       node->continue_target()->Bind();
   1324       break;
   1325     case ALWAYS_FALSE:
   1326       // No need to label it.
   1327       node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1328       break;
   1329     case DONT_KNOW:
   1330       // Continue is the test, so use the backward body target.
   1331       node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1332       body.Bind();
   1333       break;
   1334   }
   1335 
   1336   CheckStack();  // TODO(1222600): ignore if body contains calls.
   1337   Visit(node->body());
   1338 
   1339   // Compile the test.
   1340   switch (info) {
   1341     case ALWAYS_TRUE:
   1342       // If control flow can fall off the end of the body, jump back
   1343       // to the top and bind the break target at the exit.
   1344       if (has_valid_frame()) {
   1345         node->continue_target()->Jump();
   1346       }
   1347       if (node->break_target()->is_linked()) {
   1348         node->break_target()->Bind();
   1349       }
   1350       break;
   1351     case ALWAYS_FALSE:
   1352       // We may have had continues or breaks in the body.
   1353       if (node->continue_target()->is_linked()) {
   1354         node->continue_target()->Bind();
   1355       }
   1356       if (node->break_target()->is_linked()) {
   1357         node->break_target()->Bind();
   1358       }
   1359       break;
   1360     case DONT_KNOW:
   1361       // We have to compile the test expression if it can be reached by
   1362       // control flow falling out of the body or via continue.
   1363       if (node->continue_target()->is_linked()) {
   1364         node->continue_target()->Bind();
   1365       }
   1366       if (has_valid_frame()) {
   1367         Comment cmnt(masm_, "[ DoWhileCondition");
   1368         CodeForDoWhileConditionPosition(node);
   1369         ControlDestination dest(&body, node->break_target(), false);
   1370         LoadCondition(node->cond(), &dest, true);
   1371       }
   1372       if (node->break_target()->is_linked()) {
   1373         node->break_target()->Bind();
   1374       }
   1375       break;
   1376   }
   1377 
   1378   DecrementLoopNesting();
   1379   node->continue_target()->Unuse();
   1380   node->break_target()->Unuse();
   1381 }
   1382 
   1383 
   1384 void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
   1385   ASSERT(!in_spilled_code());
   1386   Comment cmnt(masm_, "[ WhileStatement");
   1387   CodeForStatementPosition(node);
   1388 
   1389   // If the condition is always false and has no side effects, we do not
   1390   // need to compile anything.
   1391   ConditionAnalysis info = AnalyzeCondition(node->cond());
   1392   if (info == ALWAYS_FALSE) return;
   1393 
   1394   // Do not duplicate conditions that may have function literal
   1395   // subexpressions.  This can cause us to compile the function literal
   1396   // twice.
   1397   bool test_at_bottom = !node->may_have_function_literal();
   1398   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1399   IncrementLoopNesting();
   1400   JumpTarget body;
   1401   if (test_at_bottom) {
   1402     body.set_direction(JumpTarget::BIDIRECTIONAL);
   1403   }
   1404 
   1405   // Based on the condition analysis, compile the test as necessary.
   1406   switch (info) {
   1407     case ALWAYS_TRUE:
   1408       // We will not compile the test expression.  Label the top of the
   1409       // loop with the continue target.
   1410       node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
   1411       node->continue_target()->Bind();
   1412       break;
   1413     case DONT_KNOW: {
   1414       if (test_at_bottom) {
   1415         // Continue is the test at the bottom, no need to label the test
   1416         // at the top.  The body is a backward target.
   1417         node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1418       } else {
   1419         // Label the test at the top as the continue target.  The body
   1420         // is a forward-only target.
   1421         node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
   1422         node->continue_target()->Bind();
   1423       }
   1424       // Compile the test with the body as the true target and preferred
   1425       // fall-through and with the break target as the false target.
   1426       ControlDestination dest(&body, node->break_target(), true);
   1427       LoadCondition(node->cond(), &dest, true);
   1428 
   1429       if (dest.false_was_fall_through()) {
   1430         // If we got the break target as fall-through, the test may have
   1431         // been unconditionally false (if there are no jumps to the
   1432         // body).
   1433         if (!body.is_linked()) {
   1434           DecrementLoopNesting();
   1435           return;
   1436         }
   1437 
   1438         // Otherwise, jump around the body on the fall through and then
   1439         // bind the body target.
   1440         node->break_target()->Unuse();
   1441         node->break_target()->Jump();
   1442         body.Bind();
   1443       }
   1444       break;
   1445     }
   1446     case ALWAYS_FALSE:
   1447       UNREACHABLE();
   1448       break;
   1449   }
   1450 
   1451   CheckStack();  // TODO(1222600): ignore if body contains calls.
   1452   Visit(node->body());
   1453 
   1454   // Based on the condition analysis, compile the backward jump as
   1455   // necessary.
   1456   switch (info) {
   1457     case ALWAYS_TRUE:
   1458       // The loop body has been labeled with the continue target.
   1459       if (has_valid_frame()) {
   1460         node->continue_target()->Jump();
   1461       }
   1462       break;
   1463     case DONT_KNOW:
   1464       if (test_at_bottom) {
   1465         // If we have chosen to recompile the test at the bottom,
   1466         // then it is the continue target.
   1467         if (node->continue_target()->is_linked()) {
   1468           node->continue_target()->Bind();
   1469         }
   1470         if (has_valid_frame()) {
   1471           // The break target is the fall-through (body is a backward
   1472           // jump from here and thus an invalid fall-through).
   1473           ControlDestination dest(&body, node->break_target(), false);
   1474           LoadCondition(node->cond(), &dest, true);
   1475         }
   1476       } else {
   1477         // If we have chosen not to recompile the test at the
   1478         // bottom, jump back to the one at the top.
   1479         if (has_valid_frame()) {
   1480           node->continue_target()->Jump();
   1481         }
   1482       }
   1483       break;
   1484     case ALWAYS_FALSE:
   1485       UNREACHABLE();
   1486       break;
   1487   }
   1488 
   1489   // The break target may be already bound (by the condition), or there
   1490   // may not be a valid frame.  Bind it only if needed.
   1491   if (node->break_target()->is_linked()) {
   1492     node->break_target()->Bind();
   1493   }
   1494   DecrementLoopNesting();
   1495 }
   1496 
   1497 
   1498 void CodeGenerator::VisitForStatement(ForStatement* node) {
   1499   ASSERT(!in_spilled_code());
   1500   Comment cmnt(masm_, "[ ForStatement");
   1501   CodeForStatementPosition(node);
   1502 
   1503   // Compile the init expression if present.
   1504   if (node->init() != NULL) {
   1505     Visit(node->init());
   1506   }
   1507 
   1508   // If the condition is always false and has no side effects, we do not
   1509   // need to compile anything else.
   1510   ConditionAnalysis info = AnalyzeCondition(node->cond());
   1511   if (info == ALWAYS_FALSE) return;
   1512 
   1513   // Do not duplicate conditions that may have function literal
   1514   // subexpressions.  This can cause us to compile the function literal
   1515   // twice.
   1516   bool test_at_bottom = !node->may_have_function_literal();
   1517   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1518   IncrementLoopNesting();
   1519 
   1520   // Target for backward edge if no test at the bottom, otherwise
   1521   // unused.
   1522   JumpTarget loop(JumpTarget::BIDIRECTIONAL);
   1523 
   1524   // Target for backward edge if there is a test at the bottom,
   1525   // otherwise used as target for test at the top.
   1526   JumpTarget body;
   1527   if (test_at_bottom) {
   1528     body.set_direction(JumpTarget::BIDIRECTIONAL);
   1529   }
   1530 
   1531   // Based on the condition analysis, compile the test as necessary.
   1532   switch (info) {
   1533     case ALWAYS_TRUE:
   1534       // We will not compile the test expression.  Label the top of the
   1535       // loop.
   1536       if (node->next() == NULL) {
   1537         // Use the continue target if there is no update expression.
   1538         node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
   1539         node->continue_target()->Bind();
   1540       } else {
   1541         // Otherwise use the backward loop target.
   1542         node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1543         loop.Bind();
   1544       }
   1545       break;
   1546     case DONT_KNOW: {
   1547       if (test_at_bottom) {
   1548         // Continue is either the update expression or the test at the
   1549         // bottom, no need to label the test at the top.
   1550         node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1551       } else if (node->next() == NULL) {
   1552         // We are not recompiling the test at the bottom and there is no
   1553         // update expression.
   1554         node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
   1555         node->continue_target()->Bind();
   1556       } else {
   1557         // We are not recompiling the test at the bottom and there is an
   1558         // update expression.
   1559         node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1560         loop.Bind();
   1561       }
   1562 
   1563       // Compile the test with the body as the true target and preferred
   1564       // fall-through and with the break target as the false target.
   1565       ControlDestination dest(&body, node->break_target(), true);
   1566       LoadCondition(node->cond(), &dest, true);
   1567 
   1568       if (dest.false_was_fall_through()) {
   1569         // If we got the break target as fall-through, the test may have
   1570         // been unconditionally false (if there are no jumps to the
   1571         // body).
   1572         if (!body.is_linked()) {
   1573           DecrementLoopNesting();
   1574           return;
   1575         }
   1576 
   1577         // Otherwise, jump around the body on the fall through and then
   1578         // bind the body target.
   1579         node->break_target()->Unuse();
   1580         node->break_target()->Jump();
   1581         body.Bind();
   1582       }
   1583       break;
   1584     }
   1585     case ALWAYS_FALSE:
   1586       UNREACHABLE();
   1587       break;
   1588   }
   1589 
   1590   CheckStack();  // TODO(1222600): ignore if body contains calls.
   1591   Visit(node->body());
   1592 
   1593   // If there is an update expression, compile it if necessary.
   1594   if (node->next() != NULL) {
   1595     if (node->continue_target()->is_linked()) {
   1596       node->continue_target()->Bind();
   1597     }
   1598 
   1599     // Control can reach the update by falling out of the body or by a
   1600     // continue.
   1601     if (has_valid_frame()) {
   1602       // Record the source position of the statement as this code which
   1603       // is after the code for the body actually belongs to the loop
   1604       // statement and not the body.
   1605       CodeForStatementPosition(node);
   1606       Visit(node->next());
   1607     }
   1608   }
   1609 
   1610   // Based on the condition analysis, compile the backward jump as
   1611   // necessary.
   1612   switch (info) {
   1613     case ALWAYS_TRUE:
   1614       if (has_valid_frame()) {
   1615         if (node->next() == NULL) {
   1616           node->continue_target()->Jump();
   1617         } else {
   1618           loop.Jump();
   1619         }
   1620       }
   1621       break;
   1622     case DONT_KNOW:
   1623       if (test_at_bottom) {
   1624         if (node->continue_target()->is_linked()) {
   1625           // We can have dangling jumps to the continue target if there
   1626           // was no update expression.
   1627           node->continue_target()->Bind();
   1628         }
   1629         // Control can reach the test at the bottom by falling out of
   1630         // the body, by a continue in the body, or from the update
   1631         // expression.
   1632         if (has_valid_frame()) {
   1633           // The break target is the fall-through (body is a backward
   1634           // jump from here).
   1635           ControlDestination dest(&body, node->break_target(), false);
   1636           LoadCondition(node->cond(), &dest, true);
   1637         }
   1638       } else {
   1639         // Otherwise, jump back to the test at the top.
   1640         if (has_valid_frame()) {
   1641           if (node->next() == NULL) {
   1642             node->continue_target()->Jump();
   1643           } else {
   1644             loop.Jump();
   1645           }
   1646         }
   1647       }
   1648       break;
   1649     case ALWAYS_FALSE:
   1650       UNREACHABLE();
   1651       break;
   1652   }
   1653 
   1654   // The break target may be already bound (by the condition), or there
   1655   // may not be a valid frame.  Bind it only if needed.
   1656   if (node->break_target()->is_linked()) {
   1657     node->break_target()->Bind();
   1658   }
   1659   DecrementLoopNesting();
   1660 }
   1661 
   1662 
   1663 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
   1664   ASSERT(!in_spilled_code());
   1665   VirtualFrame::SpilledScope spilled_scope;
   1666   Comment cmnt(masm_, "[ ForInStatement");
   1667   CodeForStatementPosition(node);
   1668 
   1669   JumpTarget primitive;
   1670   JumpTarget jsobject;
   1671   JumpTarget fixed_array;
   1672   JumpTarget entry(JumpTarget::BIDIRECTIONAL);
   1673   JumpTarget end_del_check;
   1674   JumpTarget exit;
   1675 
   1676   // Get the object to enumerate over (converted to JSObject).
   1677   LoadAndSpill(node->enumerable());
   1678 
   1679   // Both SpiderMonkey and kjs ignore null and undefined in contrast
   1680   // to the specification.  12.6.4 mandates a call to ToObject.
   1681   frame_->EmitPop(rax);
   1682 
   1683   // rax: value to be iterated over
   1684   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   1685   exit.Branch(equal);
   1686   __ CompareRoot(rax, Heap::kNullValueRootIndex);
   1687   exit.Branch(equal);
   1688 
   1689   // Stack layout in body:
   1690   // [iteration counter (smi)] <- slot 0
   1691   // [length of array]         <- slot 1
   1692   // [FixedArray]              <- slot 2
   1693   // [Map or 0]                <- slot 3
   1694   // [Object]                  <- slot 4
   1695 
   1696   // Check if enumerable is already a JSObject
   1697   // rax: value to be iterated over
   1698   Condition is_smi = masm_->CheckSmi(rax);
   1699   primitive.Branch(is_smi);
   1700   __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
   1701   jsobject.Branch(above_equal);
   1702 
   1703   primitive.Bind();
   1704   frame_->EmitPush(rax);
   1705   frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
   1706   // function call returns the value in rax, which is where we want it below
   1707 
   1708   jsobject.Bind();
   1709   // Get the set of properties (as a FixedArray or Map).
   1710   // rax: value to be iterated over
   1711   frame_->EmitPush(rax);  // Push the object being iterated over.
   1712 
   1713 
   1714   // Check cache validity in generated code. This is a fast case for
   1715   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   1716   // guarantee cache validity, call the runtime system to check cache
   1717   // validity or get the property names in a fixed array.
   1718   JumpTarget call_runtime;
   1719   JumpTarget loop(JumpTarget::BIDIRECTIONAL);
   1720   JumpTarget check_prototype;
   1721   JumpTarget use_cache;
   1722   __ movq(rcx, rax);
   1723   loop.Bind();
   1724   // Check that there are no elements.
   1725   __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
   1726   __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
   1727   call_runtime.Branch(not_equal);
   1728   // Check that instance descriptors are not empty so that we can
   1729   // check for an enum cache.  Leave the map in ebx for the subsequent
   1730   // prototype load.
   1731   __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
   1732   __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
   1733   __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
   1734   call_runtime.Branch(equal);
   1735   // Check that there in an enum cache in the non-empty instance
   1736   // descriptors.  This is the case if the next enumeration index
   1737   // field does not contain a smi.
   1738   __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
   1739   is_smi = masm_->CheckSmi(rdx);
   1740   call_runtime.Branch(is_smi);
   1741   // For all objects but the receiver, check that the cache is empty.
   1742   __ cmpq(rcx, rax);
   1743   check_prototype.Branch(equal);
   1744   __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   1745   __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
   1746   call_runtime.Branch(not_equal);
   1747   check_prototype.Bind();
   1748   // Load the prototype from the map and loop if non-null.
   1749   __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
   1750   __ CompareRoot(rcx, Heap::kNullValueRootIndex);
   1751   loop.Branch(not_equal);
   1752   // The enum cache is valid.  Load the map of the object being
   1753   // iterated over and use the cache for the iteration.
   1754   __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
   1755   use_cache.Jump();
   1756 
   1757   call_runtime.Bind();
   1758   // Call the runtime to get the property names for the object.
   1759   frame_->EmitPush(rax);  // push the Object (slot 4) for the runtime call
   1760   frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
   1761 
   1762   // If we got a Map, we can do a fast modification check.
   1763   // Otherwise, we got a FixedArray, and we have to do a slow check.
   1764   // rax: map or fixed array (result from call to
   1765   // Runtime::kGetPropertyNamesFast)
   1766   __ movq(rdx, rax);
   1767   __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
   1768   __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
   1769   fixed_array.Branch(not_equal);
   1770 
   1771   use_cache.Bind();
   1772   // Get enum cache
   1773   // rax: map (either the result from a call to
   1774   // Runtime::kGetPropertyNamesFast or has been fetched directly from
   1775   // the object)
   1776   __ movq(rcx, rax);
   1777   __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
   1778   // Get the bridge array held in the enumeration index field.
   1779   __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
   1780   // Get the cache from the bridge array.
   1781   __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   1782 
   1783   frame_->EmitPush(rax);  // <- slot 3
   1784   frame_->EmitPush(rdx);  // <- slot 2
   1785   __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
   1786   __ Integer32ToSmi(rax, rax);
   1787   frame_->EmitPush(rax);  // <- slot 1
   1788   frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
   1789   entry.Jump();
   1790 
   1791   fixed_array.Bind();
   1792   // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
   1793   frame_->EmitPush(Smi::FromInt(0));  // <- slot 3
   1794   frame_->EmitPush(rax);  // <- slot 2
   1795 
   1796   // Push the length of the array and the initial index onto the stack.
   1797   __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   1798   __ Integer32ToSmi(rax, rax);
   1799   frame_->EmitPush(rax);  // <- slot 1
   1800   frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
   1801 
   1802   // Condition.
   1803   entry.Bind();
   1804   // Grab the current frame's height for the break and continue
   1805   // targets only after all the state is pushed on the frame.
   1806   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1807   node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
   1808 
   1809   __ movq(rax, frame_->ElementAt(0));  // load the current count
   1810   __ SmiCompare(frame_->ElementAt(1), rax);  // compare to the array length
   1811   node->break_target()->Branch(below_equal);
   1812 
   1813   // Get the i'th entry of the array.
   1814   __ movq(rdx, frame_->ElementAt(2));
   1815   SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
   1816   __ movq(rbx,
   1817           FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
   1818 
   1819   // Get the expected map from the stack or a zero map in the
   1820   // permanent slow case rax: current iteration count rbx: i'th entry
   1821   // of the enum cache
   1822   __ movq(rdx, frame_->ElementAt(3));
   1823   // Check if the expected map still matches that of the enumerable.
   1824   // If not, we have to filter the key.
   1825   // rax: current iteration count
   1826   // rbx: i'th entry of the enum cache
   1827   // rdx: expected map value
   1828   __ movq(rcx, frame_->ElementAt(4));
   1829   __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
   1830   __ cmpq(rcx, rdx);
   1831   end_del_check.Branch(equal);
   1832 
   1833   // Convert the entry to a string (or null if it isn't a property anymore).
   1834   frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
   1835   frame_->EmitPush(rbx);  // push entry
   1836   frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
   1837   __ movq(rbx, rax);
   1838 
   1839   // If the property has been removed while iterating, we just skip it.
   1840   __ CompareRoot(rbx, Heap::kNullValueRootIndex);
   1841   node->continue_target()->Branch(equal);
   1842 
   1843   end_del_check.Bind();
   1844   // Store the entry in the 'each' expression and take another spin in the
   1845   // loop.  rdx: i'th entry of the enum cache (or string there of)
   1846   frame_->EmitPush(rbx);
   1847   { Reference each(this, node->each());
   1848     // Loading a reference may leave the frame in an unspilled state.
   1849     frame_->SpillAll();
   1850     if (!each.is_illegal()) {
   1851       if (each.size() > 0) {
   1852         frame_->EmitPush(frame_->ElementAt(each.size()));
   1853         each.SetValue(NOT_CONST_INIT);
   1854         frame_->Drop(2);  // Drop the original and the copy of the element.
   1855       } else {
   1856         // If the reference has size zero then we can use the value below
   1857         // the reference as if it were above the reference, instead of pushing
   1858         // a new copy of it above the reference.
   1859         each.SetValue(NOT_CONST_INIT);
   1860         frame_->Drop();  // Drop the original of the element.
   1861       }
   1862     }
   1863   }
   1864   // Unloading a reference may leave the frame in an unspilled state.
   1865   frame_->SpillAll();
   1866 
   1867   // Body.
   1868   CheckStack();  // TODO(1222600): ignore if body contains calls.
   1869   VisitAndSpill(node->body());
   1870 
   1871   // Next.  Reestablish a spilled frame in case we are coming here via
   1872   // a continue in the body.
   1873   node->continue_target()->Bind();
   1874   frame_->SpillAll();
   1875   frame_->EmitPop(rax);
   1876   __ SmiAddConstant(rax, rax, Smi::FromInt(1));
   1877   frame_->EmitPush(rax);
   1878   entry.Jump();
   1879 
   1880   // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
   1881   // any frame.
   1882   node->break_target()->Bind();
   1883   frame_->Drop(5);
   1884 
   1885   // Exit.
   1886   exit.Bind();
   1887 
   1888   node->continue_target()->Unuse();
   1889   node->break_target()->Unuse();
   1890 }
   1891 
   1892 void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
   1893   ASSERT(!in_spilled_code());
   1894   VirtualFrame::SpilledScope spilled_scope;
   1895   Comment cmnt(masm_, "[ TryCatchStatement");
   1896   CodeForStatementPosition(node);
   1897 
   1898   JumpTarget try_block;
   1899   JumpTarget exit;
   1900 
   1901   try_block.Call();
   1902   // --- Catch block ---
   1903   frame_->EmitPush(rax);
   1904 
   1905   // Store the caught exception in the catch variable.
   1906   Variable* catch_var = node->catch_var()->var();
   1907   ASSERT(catch_var != NULL && catch_var->slot() != NULL);
   1908   StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
   1909 
   1910   // Remove the exception from the stack.
   1911   frame_->Drop();
   1912 
   1913   VisitStatementsAndSpill(node->catch_block()->statements());
   1914   if (has_valid_frame()) {
   1915     exit.Jump();
   1916   }
   1917 
   1918 
   1919   // --- Try block ---
   1920   try_block.Bind();
   1921 
   1922   frame_->PushTryHandler(TRY_CATCH_HANDLER);
   1923   int handler_height = frame_->height();
   1924 
   1925   // Shadow the jump targets for all escapes from the try block, including
   1926   // returns.  During shadowing, the original target is hidden as the
   1927   // ShadowTarget and operations on the original actually affect the
   1928   // shadowing target.
   1929   //
   1930   // We should probably try to unify the escaping targets and the return
   1931   // target.
   1932   int nof_escapes = node->escaping_targets()->length();
   1933   List<ShadowTarget*> shadows(1 + nof_escapes);
   1934 
   1935   // Add the shadow target for the function return.
   1936   static const int kReturnShadowIndex = 0;
   1937   shadows.Add(new ShadowTarget(&function_return_));
   1938   bool function_return_was_shadowed = function_return_is_shadowed_;
   1939   function_return_is_shadowed_ = true;
   1940   ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
   1941 
   1942   // Add the remaining shadow targets.
   1943   for (int i = 0; i < nof_escapes; i++) {
   1944     shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
   1945   }
   1946 
   1947   // Generate code for the statements in the try block.
   1948   VisitStatementsAndSpill(node->try_block()->statements());
   1949 
   1950   // Stop the introduced shadowing and count the number of required unlinks.
   1951   // After shadowing stops, the original targets are unshadowed and the
   1952   // ShadowTargets represent the formerly shadowing targets.
   1953   bool has_unlinks = false;
   1954   for (int i = 0; i < shadows.length(); i++) {
   1955     shadows[i]->StopShadowing();
   1956     has_unlinks = has_unlinks || shadows[i]->is_linked();
   1957   }
   1958   function_return_is_shadowed_ = function_return_was_shadowed;
   1959 
   1960   // Get an external reference to the handler address.
   1961   ExternalReference handler_address(Top::k_handler_address);
   1962 
   1963   // Make sure that there's nothing left on the stack above the
   1964   // handler structure.
   1965   if (FLAG_debug_code) {
   1966     __ movq(kScratchRegister, handler_address);
   1967     __ cmpq(rsp, Operand(kScratchRegister, 0));
   1968     __ Assert(equal, "stack pointer should point to top handler");
   1969   }
   1970 
   1971   // If we can fall off the end of the try block, unlink from try chain.
   1972   if (has_valid_frame()) {
   1973     // The next handler address is on top of the frame.  Unlink from
   1974     // the handler list and drop the rest of this handler from the
   1975     // frame.
   1976     ASSERT(StackHandlerConstants::kNextOffset == 0);
   1977     __ movq(kScratchRegister, handler_address);
   1978     frame_->EmitPop(Operand(kScratchRegister, 0));
   1979     frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
   1980     if (has_unlinks) {
   1981       exit.Jump();
   1982     }
   1983   }
   1984 
   1985   // Generate unlink code for the (formerly) shadowing targets that
   1986   // have been jumped to.  Deallocate each shadow target.
   1987   Result return_value;
   1988   for (int i = 0; i < shadows.length(); i++) {
   1989     if (shadows[i]->is_linked()) {
   1990       // Unlink from try chain; be careful not to destroy the TOS if
   1991       // there is one.
   1992       if (i == kReturnShadowIndex) {
   1993         shadows[i]->Bind(&return_value);
   1994         return_value.ToRegister(rax);
   1995       } else {
   1996         shadows[i]->Bind();
   1997       }
   1998       // Because we can be jumping here (to spilled code) from
   1999       // unspilled code, we need to reestablish a spilled frame at
   2000       // this block.
   2001       frame_->SpillAll();
   2002 
   2003       // Reload sp from the top handler, because some statements that we
   2004       // break from (eg, for...in) may have left stuff on the stack.
   2005       __ movq(kScratchRegister, handler_address);
   2006       __ movq(rsp, Operand(kScratchRegister, 0));
   2007       frame_->Forget(frame_->height() - handler_height);
   2008 
   2009       ASSERT(StackHandlerConstants::kNextOffset == 0);
   2010       __ movq(kScratchRegister, handler_address);
   2011       frame_->EmitPop(Operand(kScratchRegister, 0));
   2012       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
   2013 
   2014       if (i == kReturnShadowIndex) {
   2015         if (!function_return_is_shadowed_) frame_->PrepareForReturn();
   2016         shadows[i]->other_target()->Jump(&return_value);
   2017       } else {
   2018         shadows[i]->other_target()->Jump();
   2019       }
   2020     }
   2021   }
   2022 
   2023   exit.Bind();
   2024 }
   2025 
   2026 
   2027 void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
   2028   ASSERT(!in_spilled_code());
   2029   VirtualFrame::SpilledScope spilled_scope;
   2030   Comment cmnt(masm_, "[ TryFinallyStatement");
   2031   CodeForStatementPosition(node);
   2032 
   2033   // State: Used to keep track of reason for entering the finally
   2034   // block. Should probably be extended to hold information for
   2035   // break/continue from within the try block.
   2036   enum { FALLING, THROWING, JUMPING };
   2037 
   2038   JumpTarget try_block;
   2039   JumpTarget finally_block;
   2040 
   2041   try_block.Call();
   2042 
   2043   frame_->EmitPush(rax);
   2044   // In case of thrown exceptions, this is where we continue.
   2045   __ Move(rcx, Smi::FromInt(THROWING));
   2046   finally_block.Jump();
   2047 
   2048   // --- Try block ---
   2049   try_block.Bind();
   2050 
   2051   frame_->PushTryHandler(TRY_FINALLY_HANDLER);
   2052   int handler_height = frame_->height();
   2053 
   2054   // Shadow the jump targets for all escapes from the try block, including
   2055   // returns.  During shadowing, the original target is hidden as the
   2056   // ShadowTarget and operations on the original actually affect the
   2057   // shadowing target.
   2058   //
   2059   // We should probably try to unify the escaping targets and the return
   2060   // target.
   2061   int nof_escapes = node->escaping_targets()->length();
   2062   List<ShadowTarget*> shadows(1 + nof_escapes);
   2063 
   2064   // Add the shadow target for the function return.
   2065   static const int kReturnShadowIndex = 0;
   2066   shadows.Add(new ShadowTarget(&function_return_));
   2067   bool function_return_was_shadowed = function_return_is_shadowed_;
   2068   function_return_is_shadowed_ = true;
   2069   ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
   2070 
   2071   // Add the remaining shadow targets.
   2072   for (int i = 0; i < nof_escapes; i++) {
   2073     shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
   2074   }
   2075 
   2076   // Generate code for the statements in the try block.
   2077   VisitStatementsAndSpill(node->try_block()->statements());
   2078 
   2079   // Stop the introduced shadowing and count the number of required unlinks.
   2080   // After shadowing stops, the original targets are unshadowed and the
   2081   // ShadowTargets represent the formerly shadowing targets.
   2082   int nof_unlinks = 0;
   2083   for (int i = 0; i < shadows.length(); i++) {
   2084     shadows[i]->StopShadowing();
   2085     if (shadows[i]->is_linked()) nof_unlinks++;
   2086   }
   2087   function_return_is_shadowed_ = function_return_was_shadowed;
   2088 
   2089   // Get an external reference to the handler address.
   2090   ExternalReference handler_address(Top::k_handler_address);
   2091 
   2092   // If we can fall off the end of the try block, unlink from the try
   2093   // chain and set the state on the frame to FALLING.
   2094   if (has_valid_frame()) {
   2095     // The next handler address is on top of the frame.
   2096     ASSERT(StackHandlerConstants::kNextOffset == 0);
   2097     __ movq(kScratchRegister, handler_address);
   2098     frame_->EmitPop(Operand(kScratchRegister, 0));
   2099     frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
   2100 
   2101     // Fake a top of stack value (unneeded when FALLING) and set the
   2102     // state in ecx, then jump around the unlink blocks if any.
   2103     frame_->EmitPush(Heap::kUndefinedValueRootIndex);
   2104     __ Move(rcx, Smi::FromInt(FALLING));
   2105     if (nof_unlinks > 0) {
   2106       finally_block.Jump();
   2107     }
   2108   }
   2109 
   2110   // Generate code to unlink and set the state for the (formerly)
   2111   // shadowing targets that have been jumped to.
   2112   for (int i = 0; i < shadows.length(); i++) {
   2113     if (shadows[i]->is_linked()) {
   2114       // If we have come from the shadowed return, the return value is
   2115       // on the virtual frame.  We must preserve it until it is
   2116       // pushed.
   2117       if (i == kReturnShadowIndex) {
   2118         Result return_value;
   2119         shadows[i]->Bind(&return_value);
   2120         return_value.ToRegister(rax);
   2121       } else {
   2122         shadows[i]->Bind();
   2123       }
   2124       // Because we can be jumping here (to spilled code) from
   2125       // unspilled code, we need to reestablish a spilled frame at
   2126       // this block.
   2127       frame_->SpillAll();
   2128 
   2129       // Reload sp from the top handler, because some statements that
   2130       // we break from (eg, for...in) may have left stuff on the
   2131       // stack.
   2132       __ movq(kScratchRegister, handler_address);
   2133       __ movq(rsp, Operand(kScratchRegister, 0));
   2134       frame_->Forget(frame_->height() - handler_height);
   2135 
   2136       // Unlink this handler and drop it from the frame.
   2137       ASSERT(StackHandlerConstants::kNextOffset == 0);
   2138       __ movq(kScratchRegister, handler_address);
   2139       frame_->EmitPop(Operand(kScratchRegister, 0));
   2140       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
   2141 
   2142       if (i == kReturnShadowIndex) {
   2143         // If this target shadowed the function return, materialize
   2144         // the return value on the stack.
   2145         frame_->EmitPush(rax);
   2146       } else {
   2147         // Fake TOS for targets that shadowed breaks and continues.
   2148         frame_->EmitPush(Heap::kUndefinedValueRootIndex);
   2149       }
   2150       __ Move(rcx, Smi::FromInt(JUMPING + i));
   2151       if (--nof_unlinks > 0) {
   2152         // If this is not the last unlink block, jump around the next.
   2153         finally_block.Jump();
   2154       }
   2155     }
   2156   }
   2157 
   2158   // --- Finally block ---
   2159   finally_block.Bind();
   2160 
   2161   // Push the state on the stack.
   2162   frame_->EmitPush(rcx);
   2163 
   2164   // We keep two elements on the stack - the (possibly faked) result
   2165   // and the state - while evaluating the finally block.
   2166   //
   2167   // Generate code for the statements in the finally block.
   2168   VisitStatementsAndSpill(node->finally_block()->statements());
   2169 
   2170   if (has_valid_frame()) {
   2171     // Restore state and return value or faked TOS.
   2172     frame_->EmitPop(rcx);
   2173     frame_->EmitPop(rax);
   2174   }
   2175 
   2176   // Generate code to jump to the right destination for all used
   2177   // formerly shadowing targets.  Deallocate each shadow target.
   2178   for (int i = 0; i < shadows.length(); i++) {
   2179     if (has_valid_frame() && shadows[i]->is_bound()) {
   2180       BreakTarget* original = shadows[i]->other_target();
   2181       __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
   2182       if (i == kReturnShadowIndex) {
   2183         // The return value is (already) in rax.
   2184         Result return_value = allocator_->Allocate(rax);
   2185         ASSERT(return_value.is_valid());
   2186         if (function_return_is_shadowed_) {
   2187           original->Branch(equal, &return_value);
   2188         } else {
   2189           // Branch around the preparation for return which may emit
   2190           // code.
   2191           JumpTarget skip;
   2192           skip.Branch(not_equal);
   2193           frame_->PrepareForReturn();
   2194           original->Jump(&return_value);
   2195           skip.Bind();
   2196         }
   2197       } else {
   2198         original->Branch(equal);
   2199       }
   2200     }
   2201   }
   2202 
   2203   if (has_valid_frame()) {
   2204     // Check if we need to rethrow the exception.
   2205     JumpTarget exit;
   2206     __ SmiCompare(rcx, Smi::FromInt(THROWING));
   2207     exit.Branch(not_equal);
   2208 
   2209     // Rethrow exception.
   2210     frame_->EmitPush(rax);  // undo pop from above
   2211     frame_->CallRuntime(Runtime::kReThrow, 1);
   2212 
   2213     // Done.
   2214     exit.Bind();
   2215   }
   2216 }
   2217 
   2218 
   2219 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
   2220   ASSERT(!in_spilled_code());
   2221   Comment cmnt(masm_, "[ DebuggerStatement");
   2222   CodeForStatementPosition(node);
   2223 #ifdef ENABLE_DEBUGGER_SUPPORT
   2224   // Spill everything, even constants, to the frame.
   2225   frame_->SpillAll();
   2226 
   2227   frame_->DebugBreak();
   2228   // Ignore the return value.
   2229 #endif
   2230 }
   2231 
   2232 
   2233 void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
   2234   ASSERT(boilerplate->IsBoilerplate());
   2235 
   2236   // The inevitable call will sync frame elements to memory anyway, so
   2237   // we do it eagerly to allow us to push the arguments directly into
   2238   // place.
   2239   frame_->SyncRange(0, frame_->element_count() - 1);
   2240 
   2241   // Use the fast case closure allocation code that allocates in new
   2242   // space for nested functions that don't need literals cloning.
   2243   if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
   2244     FastNewClosureStub stub;
   2245     frame_->Push(boilerplate);
   2246     Result answer = frame_->CallStub(&stub, 1);
   2247     frame_->Push(&answer);
   2248   } else {
   2249     // Call the runtime to instantiate the function boilerplate
   2250     // object.
   2251     frame_->EmitPush(rsi);
   2252     frame_->EmitPush(boilerplate);
   2253     Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
   2254     frame_->Push(&result);
   2255   }
   2256 }
   2257 
   2258 
   2259 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
   2260   Comment cmnt(masm_, "[ FunctionLiteral");
   2261 
   2262   // Build the function boilerplate and instantiate it.
   2263   Handle<JSFunction> boilerplate =
   2264       Compiler::BuildBoilerplate(node, script(), this);
   2265   // Check for stack-overflow exception.
   2266   if (HasStackOverflow()) return;
   2267   InstantiateBoilerplate(boilerplate);
   2268 }
   2269 
   2270 
   2271 void CodeGenerator::VisitFunctionBoilerplateLiteral(
   2272     FunctionBoilerplateLiteral* node) {
   2273   Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
   2274   InstantiateBoilerplate(node->boilerplate());
   2275 }
   2276 
   2277 
   2278 void CodeGenerator::VisitConditional(Conditional* node) {
   2279   Comment cmnt(masm_, "[ Conditional");
   2280   JumpTarget then;
   2281   JumpTarget else_;
   2282   JumpTarget exit;
   2283   ControlDestination dest(&then, &else_, true);
   2284   LoadCondition(node->condition(), &dest, true);
   2285 
   2286   if (dest.false_was_fall_through()) {
   2287     // The else target was bound, so we compile the else part first.
   2288     Load(node->else_expression());
   2289 
   2290     if (then.is_linked()) {
   2291       exit.Jump();
   2292       then.Bind();
   2293       Load(node->then_expression());
   2294     }
   2295   } else {
   2296     // The then target was bound, so we compile the then part first.
   2297     Load(node->then_expression());
   2298 
   2299     if (else_.is_linked()) {
   2300       exit.Jump();
   2301       else_.Bind();
   2302       Load(node->else_expression());
   2303     }
   2304   }
   2305 
   2306   exit.Bind();
   2307 }
   2308 
   2309 
   2310 void CodeGenerator::VisitSlot(Slot* node) {
   2311   Comment cmnt(masm_, "[ Slot");
   2312   LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
   2313 }
   2314 
   2315 
   2316 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
   2317   Comment cmnt(masm_, "[ VariableProxy");
   2318   Variable* var = node->var();
   2319   Expression* expr = var->rewrite();
   2320   if (expr != NULL) {
   2321     Visit(expr);
   2322   } else {
   2323     ASSERT(var->is_global());
   2324     Reference ref(this, node);
   2325     ref.GetValue();
   2326   }
   2327 }
   2328 
   2329 
   2330 void CodeGenerator::VisitLiteral(Literal* node) {
   2331   Comment cmnt(masm_, "[ Literal");
   2332   frame_->Push(node->handle());
   2333 }
   2334 
   2335 
   2336 // Materialize the regexp literal 'node' in the literals array
   2337 // 'literals' of the function.  Leave the regexp boilerplate in
   2338 // 'boilerplate'.
   2339 class DeferredRegExpLiteral: public DeferredCode {
   2340  public:
   2341   DeferredRegExpLiteral(Register boilerplate,
   2342                         Register literals,
   2343                         RegExpLiteral* node)
   2344       : boilerplate_(boilerplate), literals_(literals), node_(node) {
   2345     set_comment("[ DeferredRegExpLiteral");
   2346   }
   2347 
   2348   void Generate();
   2349 
   2350  private:
   2351   Register boilerplate_;
   2352   Register literals_;
   2353   RegExpLiteral* node_;
   2354 };
   2355 
   2356 
   2357 void DeferredRegExpLiteral::Generate() {
   2358   // Since the entry is undefined we call the runtime system to
   2359   // compute the literal.
   2360   // Literal array (0).
   2361   __ push(literals_);
   2362   // Literal index (1).
   2363   __ Push(Smi::FromInt(node_->literal_index()));
   2364   // RegExp pattern (2).
   2365   __ Push(node_->pattern());
   2366   // RegExp flags (3).
   2367   __ Push(node_->flags());
   2368   __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   2369   if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
   2370 }
   2371 
   2372 
   2373 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
   2374   Comment cmnt(masm_, "[ RegExp Literal");
   2375 
   2376   // Retrieve the literals array and check the allocated entry.  Begin
   2377   // with a writable copy of the function of this activation in a
   2378   // register.
   2379   frame_->PushFunction();
   2380   Result literals = frame_->Pop();
   2381   literals.ToRegister();
   2382   frame_->Spill(literals.reg());
   2383 
   2384   // Load the literals array of the function.
   2385   __ movq(literals.reg(),
   2386           FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
   2387 
   2388   // Load the literal at the ast saved index.
   2389   Result boilerplate = allocator_->Allocate();
   2390   ASSERT(boilerplate.is_valid());
   2391   int literal_offset =
   2392       FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   2393   __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
   2394 
   2395   // Check whether we need to materialize the RegExp object.  If so,
   2396   // jump to the deferred code passing the literals array.
   2397   DeferredRegExpLiteral* deferred =
   2398       new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
   2399   __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
   2400   deferred->Branch(equal);
   2401   deferred->BindExit();
   2402   literals.Unuse();
   2403 
   2404   // Push the boilerplate object.
   2405   frame_->Push(&boilerplate);
   2406 }
   2407 
   2408 
   2409 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
   2410   Comment cmnt(masm_, "[ ObjectLiteral");
   2411 
   2412   // Load a writable copy of the function of this activation in a
   2413   // register.
   2414   frame_->PushFunction();
   2415   Result literals = frame_->Pop();
   2416   literals.ToRegister();
   2417   frame_->Spill(literals.reg());
   2418 
   2419   // Load the literals array of the function.
   2420   __ movq(literals.reg(),
   2421           FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
   2422   // Literal array.
   2423   frame_->Push(&literals);
   2424   // Literal index.
   2425   frame_->Push(Smi::FromInt(node->literal_index()));
   2426   // Constant properties.
   2427   frame_->Push(node->constant_properties());
   2428   Result clone;
   2429   if (node->depth() > 1) {
   2430     clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
   2431   } else {
   2432     clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
   2433   }
   2434   frame_->Push(&clone);
   2435 
   2436   for (int i = 0; i < node->properties()->length(); i++) {
   2437     ObjectLiteral::Property* property = node->properties()->at(i);
   2438     switch (property->kind()) {
   2439       case ObjectLiteral::Property::CONSTANT:
   2440         break;
   2441       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
   2442         if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
   2443         // else fall through.
   2444       case ObjectLiteral::Property::COMPUTED: {
   2445         Handle<Object> key(property->key()->handle());
   2446         if (key->IsSymbol()) {
   2447           // Duplicate the object as the IC receiver.
   2448           frame_->Dup();
   2449           Load(property->value());
   2450           frame_->Push(key);
   2451           Result ignored = frame_->CallStoreIC();
   2452           break;
   2453         }
   2454         // Fall through
   2455       }
   2456       case ObjectLiteral::Property::PROTOTYPE: {
   2457         // Duplicate the object as an argument to the runtime call.
   2458         frame_->Dup();
   2459         Load(property->key());
   2460         Load(property->value());
   2461         Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
   2462         // Ignore the result.
   2463         break;
   2464       }
   2465       case ObjectLiteral::Property::SETTER: {
   2466         // Duplicate the object as an argument to the runtime call.
   2467         frame_->Dup();
   2468         Load(property->key());
   2469         frame_->Push(Smi::FromInt(1));
   2470         Load(property->value());
   2471         Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
   2472         // Ignore the result.
   2473         break;
   2474       }
   2475       case ObjectLiteral::Property::GETTER: {
   2476         // Duplicate the object as an argument to the runtime call.
   2477         frame_->Dup();
   2478         Load(property->key());
   2479         frame_->Push(Smi::FromInt(0));
   2480         Load(property->value());
   2481         Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
   2482         // Ignore the result.
   2483         break;
   2484       }
   2485       default: UNREACHABLE();
   2486     }
   2487   }
   2488 }
   2489 
   2490 
   2491 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
   2492   Comment cmnt(masm_, "[ ArrayLiteral");
   2493 
   2494   // Load a writable copy of the function of this activation in a
   2495   // register.
   2496   frame_->PushFunction();
   2497   Result literals = frame_->Pop();
   2498   literals.ToRegister();
   2499   frame_->Spill(literals.reg());
   2500 
   2501   // Load the literals array of the function.
   2502   __ movq(literals.reg(),
   2503           FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
   2504 
   2505   frame_->Push(&literals);
   2506   frame_->Push(Smi::FromInt(node->literal_index()));
   2507   frame_->Push(node->constant_elements());
   2508   int length = node->values()->length();
   2509   Result clone;
   2510   if (node->depth() > 1) {
   2511     clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
   2512   } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
   2513     clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   2514   } else {
   2515     FastCloneShallowArrayStub stub(length);
   2516     clone = frame_->CallStub(&stub, 3);
   2517   }
   2518   frame_->Push(&clone);
   2519 
   2520   // Generate code to set the elements in the array that are not
   2521   // literals.
   2522   for (int i = 0; i < node->values()->length(); i++) {
   2523     Expression* value = node->values()->at(i);
   2524 
   2525     // If value is a literal the property value is already set in the
   2526     // boilerplate object.
   2527     if (value->AsLiteral() != NULL) continue;
   2528     // If value is a materialized literal the property value is already set
   2529     // in the boilerplate object if it is simple.
   2530     if (CompileTimeValue::IsCompileTimeValue(value)) continue;
   2531 
   2532     // The property must be set by generated code.
   2533     Load(value);
   2534 
   2535     // Get the property value off the stack.
   2536     Result prop_value = frame_->Pop();
   2537     prop_value.ToRegister();
   2538 
   2539     // Fetch the array literal while leaving a copy on the stack and
   2540     // use it to get the elements array.
   2541     frame_->Dup();
   2542     Result elements = frame_->Pop();
   2543     elements.ToRegister();
   2544     frame_->Spill(elements.reg());
   2545     // Get the elements FixedArray.
   2546     __ movq(elements.reg(),
   2547             FieldOperand(elements.reg(), JSObject::kElementsOffset));
   2548 
   2549     // Write to the indexed properties array.
   2550     int offset = i * kPointerSize + FixedArray::kHeaderSize;
   2551     __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
   2552 
   2553     // Update the write barrier for the array address.
   2554     frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
   2555     Result scratch = allocator_->Allocate();
   2556     ASSERT(scratch.is_valid());
   2557     __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
   2558   }
   2559 }
   2560 
   2561 
   2562 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
   2563   ASSERT(!in_spilled_code());
   2564   // Call runtime routine to allocate the catch extension object and
   2565   // assign the exception value to the catch variable.
   2566   Comment cmnt(masm_, "[ CatchExtensionObject");
   2567   Load(node->key());
   2568   Load(node->value());
   2569   Result result =
   2570       frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
   2571   frame_->Push(&result);
   2572 }
   2573 
   2574 
   2575 void CodeGenerator::VisitAssignment(Assignment* node) {
   2576   Comment cmnt(masm_, "[ Assignment");
   2577 
   2578   { Reference target(this, node->target(), node->is_compound());
   2579     if (target.is_illegal()) {
   2580       // Fool the virtual frame into thinking that we left the assignment's
   2581       // value on the frame.
   2582       frame_->Push(Smi::FromInt(0));
   2583       return;
   2584     }
   2585     Variable* var = node->target()->AsVariableProxy()->AsVariable();
   2586 
   2587     if (node->starts_initialization_block()) {
   2588       ASSERT(target.type() == Reference::NAMED ||
   2589              target.type() == Reference::KEYED);
   2590       // Change to slow case in the beginning of an initialization
   2591       // block to avoid the quadratic behavior of repeatedly adding
   2592       // fast properties.
   2593 
   2594       // The receiver is the argument to the runtime call.  It is the
   2595       // first value pushed when the reference was loaded to the
   2596       // frame.
   2597       frame_->PushElementAt(target.size() - 1);
   2598       Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
   2599     }
   2600     if (node->ends_initialization_block()) {
   2601       // Add an extra copy of the receiver to the frame, so that it can be
   2602       // converted back to fast case after the assignment.
   2603       ASSERT(target.type() == Reference::NAMED ||
   2604              target.type() == Reference::KEYED);
   2605       if (target.type() == Reference::NAMED) {
   2606         frame_->Dup();
   2607         // Dup target receiver on stack.
   2608       } else {
   2609         ASSERT(target.type() == Reference::KEYED);
   2610         Result temp = frame_->Pop();
   2611         frame_->Dup();
   2612         frame_->Push(&temp);
   2613       }
   2614     }
   2615     if (node->op() == Token::ASSIGN ||
   2616         node->op() == Token::INIT_VAR ||
   2617         node->op() == Token::INIT_CONST) {
   2618       Load(node->value());
   2619 
   2620     } else {  // Assignment is a compound assignment.
   2621       Literal* literal = node->value()->AsLiteral();
   2622       bool overwrite_value =
   2623           (node->value()->AsBinaryOperation() != NULL &&
   2624            node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
   2625       Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
   2626       // There are two cases where the target is not read in the right hand
   2627       // side, that are easy to test for: the right hand side is a literal,
   2628       // or the right hand side is a different variable.  TakeValue invalidates
   2629       // the target, with an implicit promise that it will be written to again
   2630       // before it is read.
   2631       if (literal != NULL || (right_var != NULL && right_var != var)) {
   2632         target.TakeValue();
   2633       } else {
   2634         target.GetValue();
   2635       }
   2636       Load(node->value());
   2637       GenericBinaryOperation(node->binary_op(),
   2638                              node->type(),
   2639                              overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
   2640     }
   2641 
   2642     if (var != NULL &&
   2643         var->mode() == Variable::CONST &&
   2644         node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
   2645       // Assignment ignored - leave the value on the stack.
   2646       UnloadReference(&target);
   2647     } else {
   2648       CodeForSourcePosition(node->position());
   2649       if (node->op() == Token::INIT_CONST) {
   2650         // Dynamic constant initializations must use the function context
   2651         // and initialize the actual constant declared. Dynamic variable
   2652         // initializations are simply assignments and use SetValue.
   2653         target.SetValue(CONST_INIT);
   2654       } else {
   2655         target.SetValue(NOT_CONST_INIT);
   2656       }
   2657       if (node->ends_initialization_block()) {
   2658         ASSERT(target.type() == Reference::UNLOADED);
   2659         // End of initialization block. Revert to fast case.  The
   2660         // argument to the runtime call is the extra copy of the receiver,
   2661         // which is below the value of the assignment.
   2662         // Swap the receiver and the value of the assignment expression.
   2663         Result lhs = frame_->Pop();
   2664         Result receiver = frame_->Pop();
   2665         frame_->Push(&lhs);
   2666         frame_->Push(&receiver);
   2667         Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
   2668       }
   2669     }
   2670   }
   2671 }
   2672 
   2673 
   2674 void CodeGenerator::VisitThrow(Throw* node) {
   2675   Comment cmnt(masm_, "[ Throw");
   2676   Load(node->exception());
   2677   Result result = frame_->CallRuntime(Runtime::kThrow, 1);
   2678   frame_->Push(&result);
   2679 }
   2680 
   2681 
   2682 void CodeGenerator::VisitProperty(Property* node) {
   2683   Comment cmnt(masm_, "[ Property");
   2684   Reference property(this, node);
   2685   property.GetValue();
   2686 }
   2687 
   2688 
   2689 void CodeGenerator::VisitCall(Call* node) {
   2690   Comment cmnt(masm_, "[ Call");
   2691 
   2692   ZoneList<Expression*>* args = node->arguments();
   2693 
   2694   // Check if the function is a variable or a property.
   2695   Expression* function = node->expression();
   2696   Variable* var = function->AsVariableProxy()->AsVariable();
   2697   Property* property = function->AsProperty();
   2698 
   2699   // ------------------------------------------------------------------------
   2700   // Fast-case: Use inline caching.
   2701   // ---
   2702   // According to ECMA-262, section 11.2.3, page 44, the function to call
   2703   // must be resolved after the arguments have been evaluated. The IC code
   2704   // automatically handles this by loading the arguments before the function
   2705   // is resolved in cache misses (this also holds for megamorphic calls).
   2706   // ------------------------------------------------------------------------
   2707 
   2708   if (var != NULL && var->is_possibly_eval()) {
   2709     // ----------------------------------
   2710     // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
   2711     // ----------------------------------
   2712 
   2713     // In a call to eval, we first call %ResolvePossiblyDirectEval to
   2714     // resolve the function we need to call and the receiver of the
   2715     // call.  Then we call the resolved function using the given
   2716     // arguments.
   2717 
   2718     // Prepare the stack for the call to the resolved function.
   2719     Load(function);
   2720 
   2721     // Allocate a frame slot for the receiver.
   2722     frame_->Push(Factory::undefined_value());
   2723     int arg_count = args->length();
   2724     for (int i = 0; i < arg_count; i++) {
   2725       Load(args->at(i));
   2726     }
   2727 
   2728     // Prepare the stack for the call to ResolvePossiblyDirectEval.
   2729     frame_->PushElementAt(arg_count + 1);
   2730     if (arg_count > 0) {
   2731       frame_->PushElementAt(arg_count);
   2732     } else {
   2733       frame_->Push(Factory::undefined_value());
   2734     }
   2735 
   2736     // Push the receiver.
   2737     frame_->PushParameterAt(-1);
   2738 
   2739     // Resolve the call.
   2740     Result result =
   2741         frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
   2742 
   2743     // The runtime call returns a pair of values in rax (function) and
   2744     // rdx (receiver). Touch up the stack with the right values.
   2745     Result receiver = allocator_->Allocate(rdx);
   2746     frame_->SetElementAt(arg_count + 1, &result);
   2747     frame_->SetElementAt(arg_count, &receiver);
   2748     receiver.Unuse();
   2749 
   2750     // Call the function.
   2751     CodeForSourcePosition(node->position());
   2752     InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
   2753     CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   2754     result = frame_->CallStub(&call_function, arg_count + 1);
   2755 
   2756     // Restore the context and overwrite the function on the stack with
   2757     // the result.
   2758     frame_->RestoreContextRegister();
   2759     frame_->SetElementAt(0, &result);
   2760 
   2761   } else if (var != NULL && !var->is_this() && var->is_global()) {
   2762     // ----------------------------------
   2763     // JavaScript example: 'foo(1, 2, 3)'  // foo is global
   2764     // ----------------------------------
   2765 
   2766     // Pass the global object as the receiver and let the IC stub
   2767     // patch the stack to use the global proxy as 'this' in the
   2768     // invoked function.
   2769     LoadGlobal();
   2770 
   2771     // Load the arguments.
   2772     int arg_count = args->length();
   2773     for (int i = 0; i < arg_count; i++) {
   2774       Load(args->at(i));
   2775     }
   2776 
   2777     // Push the name of the function on the frame.
   2778     frame_->Push(var->name());
   2779 
   2780     // Call the IC initialization code.
   2781     CodeForSourcePosition(node->position());
   2782     Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
   2783                                        arg_count,
   2784                                        loop_nesting());
   2785     frame_->RestoreContextRegister();
   2786     // Replace the function on the stack with the result.
   2787     frame_->Push(&result);
   2788 
   2789   } else if (var != NULL && var->slot() != NULL &&
   2790              var->slot()->type() == Slot::LOOKUP) {
   2791     // ----------------------------------
   2792     // JavaScript example: 'with (obj) foo(1, 2, 3)'  // foo is in obj
   2793     // ----------------------------------
   2794 
   2795     // Load the function from the context.  Sync the frame so we can
   2796     // push the arguments directly into place.
   2797     frame_->SyncRange(0, frame_->element_count() - 1);
   2798     frame_->EmitPush(rsi);
   2799     frame_->EmitPush(var->name());
   2800     frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
   2801     // The runtime call returns a pair of values in rax and rdx.  The
   2802     // looked-up function is in rax and the receiver is in rdx.  These
   2803     // register references are not ref counted here.  We spill them
   2804     // eagerly since they are arguments to an inevitable call (and are
   2805     // not sharable by the arguments).
   2806     ASSERT(!allocator()->is_used(rax));
   2807     frame_->EmitPush(rax);
   2808 
   2809     // Load the receiver.
   2810     ASSERT(!allocator()->is_used(rdx));
   2811     frame_->EmitPush(rdx);
   2812 
   2813     // Call the function.
   2814     CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
   2815 
   2816   } else if (property != NULL) {
   2817     // Check if the key is a literal string.
   2818     Literal* literal = property->key()->AsLiteral();
   2819 
   2820     if (literal != NULL && literal->handle()->IsSymbol()) {
   2821       // ------------------------------------------------------------------
   2822       // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
   2823       // ------------------------------------------------------------------
   2824 
   2825       Handle<String> name = Handle<String>::cast(literal->handle());
   2826 
   2827       if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
   2828           name->IsEqualTo(CStrVector("apply")) &&
   2829           args->length() == 2 &&
   2830           args->at(1)->AsVariableProxy() != NULL &&
   2831           args->at(1)->AsVariableProxy()->IsArguments()) {
   2832         // Use the optimized Function.prototype.apply that avoids
   2833         // allocating lazily allocated arguments objects.
   2834         CallApplyLazy(property->obj(),
   2835                       args->at(0),
   2836                       args->at(1)->AsVariableProxy(),
   2837                       node->position());
   2838 
   2839       } else {
   2840         // Push the receiver onto the frame.
   2841         Load(property->obj());
   2842 
   2843         // Load the arguments.
   2844         int arg_count = args->length();
   2845         for (int i = 0; i < arg_count; i++) {
   2846           Load(args->at(i));
   2847         }
   2848 
   2849         // Push the name of the function onto the frame.
   2850         frame_->Push(name);
   2851 
   2852         // Call the IC initialization code.
   2853         CodeForSourcePosition(node->position());
   2854         Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
   2855                                            arg_count,
   2856                                            loop_nesting());
   2857         frame_->RestoreContextRegister();
   2858         frame_->Push(&result);
   2859       }
   2860 
   2861     } else {
   2862       // -------------------------------------------
   2863       // JavaScript example: 'array[index](1, 2, 3)'
   2864       // -------------------------------------------
   2865 
   2866       // Load the function to call from the property through a reference.
   2867       if (property->is_synthetic()) {
   2868         Reference ref(this, property, false);
   2869         ref.GetValue();
   2870         // Use global object as receiver.
   2871         LoadGlobalReceiver();
   2872       } else {
   2873         Reference ref(this, property, false);
   2874         ASSERT(ref.size() == 2);
   2875         Result key = frame_->Pop();
   2876         frame_->Dup();  // Duplicate the receiver.
   2877         frame_->Push(&key);
   2878         ref.GetValue();
   2879         // Top of frame contains function to call, with duplicate copy of
   2880         // receiver below it.  Swap them.
   2881         Result function = frame_->Pop();
   2882         Result receiver = frame_->Pop();
   2883         frame_->Push(&function);
   2884         frame_->Push(&receiver);
   2885       }
   2886 
   2887       // Call the function.
   2888       CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
   2889     }
   2890 
   2891   } else {
   2892     // ----------------------------------
   2893     // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
   2894     // ----------------------------------
   2895 
   2896     // Load the function.
   2897     Load(function);
   2898 
   2899     // Pass the global proxy as the receiver.
   2900     LoadGlobalReceiver();
   2901 
   2902     // Call the function.
   2903     CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
   2904   }
   2905 }
   2906 
   2907 
   2908 void CodeGenerator::VisitCallNew(CallNew* node) {
   2909   Comment cmnt(masm_, "[ CallNew");
   2910 
   2911   // According to ECMA-262, section 11.2.2, page 44, the function
   2912   // expression in new calls must be evaluated before the
   2913   // arguments. This is different from ordinary calls, where the
   2914   // actual function to call is resolved after the arguments have been
   2915   // evaluated.
   2916 
   2917   // Compute function to call and use the global object as the
   2918   // receiver. There is no need to use the global proxy here because
   2919   // it will always be replaced with a newly allocated object.
   2920   Load(node->expression());
   2921   LoadGlobal();
   2922 
   2923   // Push the arguments ("left-to-right") on the stack.
   2924   ZoneList<Expression*>* args = node->arguments();
   2925   int arg_count = args->length();
   2926   for (int i = 0; i < arg_count; i++) {
   2927     Load(args->at(i));
   2928   }
   2929 
   2930   // Call the construct call builtin that handles allocation and
   2931   // constructor invocation.
   2932   CodeForSourcePosition(node->position());
   2933   Result result = frame_->CallConstructor(arg_count);
   2934   // Replace the function on the stack with the result.
   2935   frame_->SetElementAt(0, &result);
   2936 }
   2937 
   2938 
   2939 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
   2940   if (CheckForInlineRuntimeCall(node)) {
   2941     return;
   2942   }
   2943 
   2944   ZoneList<Expression*>* args = node->arguments();
   2945   Comment cmnt(masm_, "[ CallRuntime");
   2946   Runtime::Function* function = node->function();
   2947 
   2948   if (function == NULL) {
   2949     // Push the builtins object found in the current global object.
   2950     Result temp = allocator()->Allocate();
   2951     ASSERT(temp.is_valid());
   2952     __ movq(temp.reg(), GlobalObject());
   2953     __ movq(temp.reg(),
   2954             FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
   2955     frame_->Push(&temp);
   2956   }
   2957 
   2958   // Push the arguments ("left-to-right").
   2959   int arg_count = args->length();
   2960   for (int i = 0; i < arg_count; i++) {
   2961     Load(args->at(i));
   2962   }
   2963 
   2964   if (function == NULL) {
   2965     // Call the JS runtime function.
   2966     frame_->Push(node->name());
   2967     Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
   2968                                        arg_count,
   2969                                        loop_nesting_);
   2970     frame_->RestoreContextRegister();
   2971     frame_->Push(&answer);
   2972   } else {
   2973     // Call the C runtime function.
   2974     Result answer = frame_->CallRuntime(function, arg_count);
   2975     frame_->Push(&answer);
   2976   }
   2977 }
   2978 
   2979 
   2980 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
   2981   Comment cmnt(masm_, "[ UnaryOperation");
   2982 
   2983   Token::Value op = node->op();
   2984 
   2985   if (op == Token::NOT) {
   2986     // Swap the true and false targets but keep the same actual label
   2987     // as the fall through.
   2988     destination()->Invert();
   2989     LoadCondition(node->expression(), destination(), true);
   2990     // Swap the labels back.
   2991     destination()->Invert();
   2992 
   2993   } else if (op == Token::DELETE) {
   2994     Property* property = node->expression()->AsProperty();
   2995     if (property != NULL) {
   2996       Load(property->obj());
   2997       Load(property->key());
   2998       Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
   2999       frame_->Push(&answer);
   3000       return;
   3001     }
   3002 
   3003     Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
   3004     if (variable != NULL) {
   3005       Slot* slot = variable->slot();
   3006       if (variable->is_global()) {
   3007         LoadGlobal();
   3008         frame_->Push(variable->name());
   3009         Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
   3010                                               CALL_FUNCTION, 2);
   3011         frame_->Push(&answer);
   3012         return;
   3013 
   3014       } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
   3015         // Call the runtime to look up the context holding the named
   3016         // variable.  Sync the virtual frame eagerly so we can push the
   3017         // arguments directly into place.
   3018         frame_->SyncRange(0, frame_->element_count() - 1);
   3019         frame_->EmitPush(rsi);
   3020         frame_->EmitPush(variable->name());
   3021         Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
   3022         ASSERT(context.is_register());
   3023         frame_->EmitPush(context.reg());
   3024         context.Unuse();
   3025         frame_->EmitPush(variable->name());
   3026         Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
   3027                                               CALL_FUNCTION, 2);
   3028         frame_->Push(&answer);
   3029         return;
   3030       }
   3031 
   3032       // Default: Result of deleting non-global, not dynamically
   3033       // introduced variables is false.
   3034       frame_->Push(Factory::false_value());
   3035 
   3036     } else {
   3037       // Default: Result of deleting expressions is true.
   3038       Load(node->expression());  // may have side-effects
   3039       frame_->SetElementAt(0, Factory::true_value());
   3040     }
   3041 
   3042   } else if (op == Token::TYPEOF) {
   3043     // Special case for loading the typeof expression; see comment on
   3044     // LoadTypeofExpression().
   3045     LoadTypeofExpression(node->expression());
   3046     Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
   3047     frame_->Push(&answer);
   3048 
   3049   } else if (op == Token::VOID) {
   3050     Expression* expression = node->expression();
   3051     if (expression && expression->AsLiteral() && (
   3052         expression->AsLiteral()->IsTrue() ||
   3053         expression->AsLiteral()->IsFalse() ||
   3054         expression->AsLiteral()->handle()->IsNumber() ||
   3055         expression->AsLiteral()->handle()->IsString() ||
   3056         expression->AsLiteral()->handle()->IsJSRegExp() ||
   3057         expression->AsLiteral()->IsNull())) {
   3058       // Omit evaluating the value of the primitive literal.
   3059       // It will be discarded anyway, and can have no side effect.
   3060       frame_->Push(Factory::undefined_value());
   3061     } else {
   3062       Load(node->expression());
   3063       frame_->SetElementAt(0, Factory::undefined_value());
   3064     }
   3065 
   3066   } else {
   3067     bool overwrite =
   3068       (node->expression()->AsBinaryOperation() != NULL &&
   3069        node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
   3070     Load(node->expression());
   3071     switch (op) {
   3072       case Token::NOT:
   3073       case Token::DELETE:
   3074       case Token::TYPEOF:
   3075         UNREACHABLE();  // handled above
   3076         break;
   3077 
   3078       case Token::SUB: {
   3079         GenericUnaryOpStub stub(Token::SUB, overwrite);
   3080         Result operand = frame_->Pop();
   3081         Result answer = frame_->CallStub(&stub, &operand);
   3082         frame_->Push(&answer);
   3083         break;
   3084       }
   3085 
   3086       case Token::BIT_NOT: {
   3087         // Smi check.
   3088         JumpTarget smi_label;
   3089         JumpTarget continue_label;
   3090         Result operand = frame_->Pop();
   3091         operand.ToRegister();
   3092 
   3093         Condition is_smi = masm_->CheckSmi(operand.reg());
   3094         smi_label.Branch(is_smi, &operand);
   3095 
   3096         GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
   3097         Result answer = frame_->CallStub(&stub, &operand);
   3098         continue_label.Jump(&answer);
   3099 
   3100         smi_label.Bind(&answer);
   3101         answer.ToRegister();
   3102         frame_->Spill(answer.reg());
   3103         __ SmiNot(answer.reg(), answer.reg());
   3104         continue_label.Bind(&answer);
   3105         frame_->Push(&answer);
   3106         break;
   3107       }
   3108 
   3109       case Token::ADD: {
   3110         // Smi check.
   3111         JumpTarget continue_label;
   3112         Result operand = frame_->Pop();
   3113         operand.ToRegister();
   3114         Condition is_smi = masm_->CheckSmi(operand.reg());
   3115         continue_label.Branch(is_smi, &operand);
   3116         frame_->Push(&operand);
   3117         Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
   3118                                               CALL_FUNCTION, 1);
   3119 
   3120         continue_label.Bind(&answer);
   3121         frame_->Push(&answer);
   3122         break;
   3123       }
   3124 
   3125       default:
   3126         UNREACHABLE();
   3127     }
   3128   }
   3129 }
   3130 
   3131 
   3132 // The value in dst was optimistically incremented or decremented.  The
   3133 // result overflowed or was not smi tagged.  Undo the operation, call
   3134 // into the runtime to convert the argument to a number, and call the
   3135 // specialized add or subtract stub.  The result is left in dst.
   3136 class DeferredPrefixCountOperation: public DeferredCode {
   3137  public:
   3138   DeferredPrefixCountOperation(Register dst, bool is_increment)
   3139       : dst_(dst), is_increment_(is_increment) {
   3140     set_comment("[ DeferredCountOperation");
   3141   }
   3142 
   3143   virtual void Generate();
   3144 
   3145  private:
   3146   Register dst_;
   3147   bool is_increment_;
   3148 };
   3149 
   3150 
   3151 void DeferredPrefixCountOperation::Generate() {
   3152   __ push(dst_);
   3153   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
   3154   __ push(rax);
   3155   __ Push(Smi::FromInt(1));
   3156   if (is_increment_) {
   3157     __ CallRuntime(Runtime::kNumberAdd, 2);
   3158   } else {
   3159     __ CallRuntime(Runtime::kNumberSub, 2);
   3160   }
   3161   if (!dst_.is(rax)) __ movq(dst_, rax);
   3162 }
   3163 
   3164 
   3165 // The value in dst was optimistically incremented or decremented.  The
   3166 // result overflowed or was not smi tagged.  Undo the operation and call
   3167 // into the runtime to convert the argument to a number.  Update the
   3168 // original value in old.  Call the specialized add or subtract stub.
   3169 // The result is left in dst.
   3170 class DeferredPostfixCountOperation: public DeferredCode {
   3171  public:
   3172   DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
   3173       : dst_(dst), old_(old), is_increment_(is_increment) {
   3174     set_comment("[ DeferredCountOperation");
   3175   }
   3176 
   3177   virtual void Generate();
   3178 
   3179  private:
   3180   Register dst_;
   3181   Register old_;
   3182   bool is_increment_;
   3183 };
   3184 
   3185 
   3186 void DeferredPostfixCountOperation::Generate() {
   3187   __ push(dst_);
   3188   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
   3189 
   3190   // Save the result of ToNumber to use as the old value.
   3191   __ push(rax);
   3192 
   3193   // Call the runtime for the addition or subtraction.
   3194   __ push(rax);
   3195   __ Push(Smi::FromInt(1));
   3196   if (is_increment_) {
   3197     __ CallRuntime(Runtime::kNumberAdd, 2);
   3198   } else {
   3199     __ CallRuntime(Runtime::kNumberSub, 2);
   3200   }
   3201   if (!dst_.is(rax)) __ movq(dst_, rax);
   3202   __ pop(old_);
   3203 }
   3204 
   3205 
   3206 void CodeGenerator::VisitCountOperation(CountOperation* node) {
   3207   Comment cmnt(masm_, "[ CountOperation");
   3208 
   3209   bool is_postfix = node->is_postfix();
   3210   bool is_increment = node->op() == Token::INC;
   3211 
   3212   Variable* var = node->expression()->AsVariableProxy()->AsVariable();
   3213   bool is_const = (var != NULL && var->mode() == Variable::CONST);
   3214 
   3215   // Postfix operations need a stack slot under the reference to hold
   3216   // the old value while the new value is being stored.  This is so that
   3217   // in the case that storing the new value requires a call, the old
   3218   // value will be in the frame to be spilled.
   3219   if (is_postfix) frame_->Push(Smi::FromInt(0));
   3220 
   3221   // A constant reference is not saved to, so the reference is not a
   3222   // compound assignment reference.
   3223   { Reference target(this, node->expression(), !is_const);
   3224     if (target.is_illegal()) {
   3225       // Spoof the virtual frame to have the expected height (one higher
   3226       // than on entry).
   3227       if (!is_postfix) frame_->Push(Smi::FromInt(0));
   3228       return;
   3229     }
   3230     target.TakeValue();
   3231 
   3232     Result new_value = frame_->Pop();
   3233     new_value.ToRegister();
   3234 
   3235     Result old_value;  // Only allocated in the postfix case.
   3236     if (is_postfix) {
   3237       // Allocate a temporary to preserve the old value.
   3238       old_value = allocator_->Allocate();
   3239       ASSERT(old_value.is_valid());
   3240       __ movq(old_value.reg(), new_value.reg());
   3241     }
   3242     // Ensure the new value is writable.
   3243     frame_->Spill(new_value.reg());
   3244 
   3245     DeferredCode* deferred = NULL;
   3246     if (is_postfix) {
   3247       deferred = new DeferredPostfixCountOperation(new_value.reg(),
   3248                                                    old_value.reg(),
   3249                                                    is_increment);
   3250     } else {
   3251       deferred = new DeferredPrefixCountOperation(new_value.reg(),
   3252                                                   is_increment);
   3253     }
   3254 
   3255     __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
   3256     if (is_increment) {
   3257       __ SmiAddConstant(kScratchRegister,
   3258                         new_value.reg(),
   3259                         Smi::FromInt(1),
   3260                         deferred->entry_label());
   3261     } else {
   3262       __ SmiSubConstant(kScratchRegister,
   3263                         new_value.reg(),
   3264                         Smi::FromInt(1),
   3265                         deferred->entry_label());
   3266     }
   3267     __ movq(new_value.reg(), kScratchRegister);
   3268     deferred->BindExit();
   3269 
   3270     // Postfix: store the old value in the allocated slot under the
   3271     // reference.
   3272     if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
   3273 
   3274     frame_->Push(&new_value);
   3275     // Non-constant: update the reference.
   3276     if (!is_const) target.SetValue(NOT_CONST_INIT);
   3277   }
   3278 
   3279   // Postfix: drop the new value and use the old.
   3280   if (is_postfix) frame_->Drop();
   3281 }
   3282 
   3283 
   3284 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
   3285   // TODO(X64): This code was copied verbatim from codegen-ia32.
   3286   //     Either find a reason to change it or move it to a shared location.
   3287 
   3288   Comment cmnt(masm_, "[ BinaryOperation");
   3289   Token::Value op = node->op();
   3290 
   3291   // According to ECMA-262 section 11.11, page 58, the binary logical
   3292   // operators must yield the result of one of the two expressions
   3293   // before any ToBoolean() conversions. This means that the value
   3294   // produced by a && or || operator is not necessarily a boolean.
   3295 
   3296   // NOTE: If the left hand side produces a materialized value (not
   3297   // control flow), we force the right hand side to do the same. This
   3298   // is necessary because we assume that if we get control flow on the
   3299   // last path out of an expression we got it on all paths.
   3300   if (op == Token::AND) {
   3301     JumpTarget is_true;
   3302     ControlDestination dest(&is_true, destination()->false_target(), true);
   3303     LoadCondition(node->left(), &dest, false);
   3304 
   3305     if (dest.false_was_fall_through()) {
   3306       // The current false target was used as the fall-through.  If
   3307       // there are no dangling jumps to is_true then the left
   3308       // subexpression was unconditionally false.  Otherwise we have
   3309       // paths where we do have to evaluate the right subexpression.
   3310       if (is_true.is_linked()) {
   3311         // We need to compile the right subexpression.  If the jump to
   3312         // the current false target was a forward jump then we have a
   3313         // valid frame, we have just bound the false target, and we
   3314         // have to jump around the code for the right subexpression.
   3315         if (has_valid_frame()) {
   3316           destination()->false_target()->Unuse();
   3317           destination()->false_target()->Jump();
   3318         }
   3319         is_true.Bind();
   3320         // The left subexpression compiled to control flow, so the
   3321         // right one is free to do so as well.
   3322         LoadCondition(node->right(), destination(), false);
   3323       } else {
   3324         // We have actually just jumped to or bound the current false
   3325         // target but the current control destination is not marked as
   3326         // used.
   3327         destination()->Use(false);
   3328       }
   3329 
   3330     } else if (dest.is_used()) {
   3331       // The left subexpression compiled to control flow (and is_true
   3332       // was just bound), so the right is free to do so as well.
   3333       LoadCondition(node->right(), destination(), false);
   3334 
   3335     } else {
   3336       // We have a materialized value on the frame, so we exit with
   3337       // one on all paths.  There are possibly also jumps to is_true
   3338       // from nested subexpressions.
   3339       JumpTarget pop_and_continue;
   3340       JumpTarget exit;
   3341 
   3342       // Avoid popping the result if it converts to 'false' using the
   3343       // standard ToBoolean() conversion as described in ECMA-262,
   3344       // section 9.2, page 30.
   3345       //
   3346       // Duplicate the TOS value. The duplicate will be popped by
   3347       // ToBoolean.
   3348       frame_->Dup();
   3349       ControlDestination dest(&pop_and_continue, &exit, true);
   3350       ToBoolean(&dest);
   3351 
   3352       // Pop the result of evaluating the first part.
   3353       frame_->Drop();
   3354 
   3355       // Compile right side expression.
   3356       is_true.Bind();
   3357       Load(node->right());
   3358 
   3359       // Exit (always with a materialized value).
   3360       exit.Bind();
   3361     }
   3362 
   3363   } else if (op == Token::OR) {
   3364     JumpTarget is_false;
   3365     ControlDestination dest(destination()->true_target(), &is_false, false);
   3366     LoadCondition(node->left(), &dest, false);
   3367 
   3368     if (dest.true_was_fall_through()) {
   3369       // The current true target was used as the fall-through.  If
   3370       // there are no dangling jumps to is_false then the left
   3371       // subexpression was unconditionally true.  Otherwise we have
   3372       // paths where we do have to evaluate the right subexpression.
   3373       if (is_false.is_linked()) {
   3374         // We need to compile the right subexpression.  If the jump to
   3375         // the current true target was a forward jump then we have a
   3376         // valid frame, we have just bound the true target, and we
   3377         // have to jump around the code for the right subexpression.
   3378         if (has_valid_frame()) {
   3379           destination()->true_target()->Unuse();
   3380           destination()->true_target()->Jump();
   3381         }
   3382         is_false.Bind();
   3383         // The left subexpression compiled to control flow, so the
   3384         // right one is free to do so as well.
   3385         LoadCondition(node->right(), destination(), false);
   3386       } else {
   3387         // We have just jumped to or bound the current true target but
   3388         // the current control destination is not marked as used.
   3389         destination()->Use(true);
   3390       }
   3391 
   3392     } else if (dest.is_used()) {
   3393       // The left subexpression compiled to control flow (and is_false
   3394       // was just bound), so the right is free to do so as well.
   3395       LoadCondition(node->right(), destination(), false);
   3396 
   3397     } else {
   3398       // We have a materialized value on the frame, so we exit with
   3399       // one on all paths.  There are possibly also jumps to is_false
   3400       // from nested subexpressions.
   3401       JumpTarget pop_and_continue;
   3402       JumpTarget exit;
   3403 
   3404       // Avoid popping the result if it converts to 'true' using the
   3405       // standard ToBoolean() conversion as described in ECMA-262,
   3406       // section 9.2, page 30.
   3407       //
   3408       // Duplicate the TOS value. The duplicate will be popped by
   3409       // ToBoolean.
   3410       frame_->Dup();
   3411       ControlDestination dest(&exit, &pop_and_continue, false);
   3412       ToBoolean(&dest);
   3413 
   3414       // Pop the result of evaluating the first part.
   3415       frame_->Drop();
   3416 
   3417       // Compile right side expression.
   3418       is_false.Bind();
   3419       Load(node->right());
   3420 
   3421       // Exit (always with a materialized value).
   3422       exit.Bind();
   3423     }
   3424 
   3425   } else {
   3426     // NOTE: The code below assumes that the slow cases (calls to runtime)
   3427     // never return a constant/immutable object.
   3428     OverwriteMode overwrite_mode = NO_OVERWRITE;
   3429     if (node->left()->AsBinaryOperation() != NULL &&
   3430         node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
   3431       overwrite_mode = OVERWRITE_LEFT;
   3432     } else if (node->right()->AsBinaryOperation() != NULL &&
   3433                node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
   3434       overwrite_mode = OVERWRITE_RIGHT;
   3435     }
   3436 
   3437     Load(node->left());
   3438     Load(node->right());
   3439     GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
   3440   }
   3441 }
   3442 
   3443 
   3444 
   3445 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
   3446   Comment cmnt(masm_, "[ CompareOperation");
   3447 
   3448   // Get the expressions from the node.
   3449   Expression* left = node->left();
   3450   Expression* right = node->right();
   3451   Token::Value op = node->op();
   3452   // To make typeof testing for natives implemented in JavaScript really
   3453   // efficient, we generate special code for expressions of the form:
   3454   // 'typeof <expression> == <string>'.
   3455   UnaryOperation* operation = left->AsUnaryOperation();
   3456   if ((op == Token::EQ || op == Token::EQ_STRICT) &&
   3457       (operation != NULL && operation->op() == Token::TYPEOF) &&
   3458       (right->AsLiteral() != NULL &&
   3459        right->AsLiteral()->handle()->IsString())) {
   3460     Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
   3461 
   3462     // Load the operand and move it to a register.
   3463     LoadTypeofExpression(operation->expression());
   3464     Result answer = frame_->Pop();
   3465     answer.ToRegister();
   3466 
   3467     if (check->Equals(Heap::number_symbol())) {
   3468       Condition is_smi = masm_->CheckSmi(answer.reg());
   3469       destination()->true_target()->Branch(is_smi);
   3470       frame_->Spill(answer.reg());
   3471       __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
   3472       __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
   3473       answer.Unuse();
   3474       destination()->Split(equal);
   3475 
   3476     } else if (check->Equals(Heap::string_symbol())) {
   3477       Condition is_smi = masm_->CheckSmi(answer.reg());
   3478       destination()->false_target()->Branch(is_smi);
   3479 
   3480       // It can be an undetectable string object.
   3481       __ movq(kScratchRegister,
   3482               FieldOperand(answer.reg(), HeapObject::kMapOffset));
   3483       __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
   3484                Immediate(1 << Map::kIsUndetectable));
   3485       destination()->false_target()->Branch(not_zero);
   3486       __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
   3487       answer.Unuse();
   3488       destination()->Split(below);  // Unsigned byte comparison needed.
   3489 
   3490     } else if (check->Equals(Heap::boolean_symbol())) {
   3491       __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
   3492       destination()->true_target()->Branch(equal);
   3493       __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
   3494       answer.Unuse();
   3495       destination()->Split(equal);
   3496 
   3497     } else if (check->Equals(Heap::undefined_symbol())) {
   3498       __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
   3499       destination()->true_target()->Branch(equal);
   3500 
   3501       Condition is_smi = masm_->CheckSmi(answer.reg());
   3502       destination()->false_target()->Branch(is_smi);
   3503 
   3504       // It can be an undetectable object.
   3505       __ movq(kScratchRegister,
   3506               FieldOperand(answer.reg(), HeapObject::kMapOffset));
   3507       __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
   3508                Immediate(1 << Map::kIsUndetectable));
   3509       answer.Unuse();
   3510       destination()->Split(not_zero);
   3511 
   3512     } else if (check->Equals(Heap::function_symbol())) {
   3513       Condition is_smi = masm_->CheckSmi(answer.reg());
   3514       destination()->false_target()->Branch(is_smi);
   3515       frame_->Spill(answer.reg());
   3516       __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
   3517       destination()->true_target()->Branch(equal);
   3518       // Regular expressions are callable so typeof == 'function'.
   3519       __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
   3520       answer.Unuse();
   3521       destination()->Split(equal);
   3522 
   3523     } else if (check->Equals(Heap::object_symbol())) {
   3524       Condition is_smi = masm_->CheckSmi(answer.reg());
   3525       destination()->false_target()->Branch(is_smi);
   3526       __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
   3527       destination()->true_target()->Branch(equal);
   3528 
   3529       // Regular expressions are typeof == 'function', not 'object'.
   3530       __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
   3531       destination()->false_target()->Branch(equal);
   3532 
   3533       // It can be an undetectable object.
   3534       __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
   3535                Immediate(1 << Map::kIsUndetectable));
   3536       destination()->false_target()->Branch(not_zero);
   3537       __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
   3538       destination()->false_target()->Branch(below);
   3539       __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
   3540       answer.Unuse();
   3541       destination()->Split(below_equal);
   3542     } else {
   3543       // Uncommon case: typeof testing against a string literal that is
   3544       // never returned from the typeof operator.
   3545       answer.Unuse();
   3546       destination()->Goto(false);
   3547     }
   3548     return;
   3549   }
   3550 
   3551   Condition cc = no_condition;
   3552   bool strict = false;
   3553   switch (op) {
   3554     case Token::EQ_STRICT:
   3555       strict = true;
   3556       // Fall through
   3557     case Token::EQ:
   3558       cc = equal;
   3559       break;
   3560     case Token::LT:
   3561       cc = less;
   3562       break;
   3563     case Token::GT:
   3564       cc = greater;
   3565       break;
   3566     case Token::LTE:
   3567       cc = less_equal;
   3568       break;
   3569     case Token::GTE:
   3570       cc = greater_equal;
   3571       break;
   3572     case Token::IN: {
   3573       Load(left);
   3574       Load(right);
   3575       Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
   3576       frame_->Push(&answer);  // push the result
   3577       return;
   3578     }
   3579     case Token::INSTANCEOF: {
   3580       Load(left);
   3581       Load(right);
   3582       InstanceofStub stub;
   3583       Result answer = frame_->CallStub(&stub, 2);
   3584       answer.ToRegister();
   3585       __ testq(answer.reg(), answer.reg());
   3586       answer.Unuse();
   3587       destination()->Split(zero);
   3588       return;
   3589     }
   3590     default:
   3591       UNREACHABLE();
   3592   }
   3593   Load(left);
   3594   Load(right);
   3595   Comparison(node, cc, strict, destination());
   3596 }
   3597 
   3598 
   3599 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
   3600   frame_->PushFunction();
   3601 }
   3602 
   3603 
   3604 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
   3605   ASSERT(args->length() == 1);
   3606 
   3607   // ArgumentsAccessStub expects the key in rdx and the formal
   3608   // parameter count in rax.
   3609   Load(args->at(0));
   3610   Result key = frame_->Pop();
   3611   // Explicitly create a constant result.
   3612   Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   3613   // Call the shared stub to get to arguments[key].
   3614   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   3615   Result result = frame_->CallStub(&stub, &key, &count);
   3616   frame_->Push(&result);
   3617 }
   3618 
   3619 
   3620 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
   3621   ASSERT(args->length() == 1);
   3622   Load(args->at(0));
   3623   Result value = frame_->Pop();
   3624   value.ToRegister();
   3625   ASSERT(value.is_valid());
   3626   Condition is_smi = masm_->CheckSmi(value.reg());
   3627   destination()->false_target()->Branch(is_smi);
   3628   // It is a heap object - get map.
   3629   // Check if the object is a JS array or not.
   3630   __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
   3631   value.Unuse();
   3632   destination()->Split(equal);
   3633 }
   3634 
   3635 
   3636 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
   3637   ASSERT(args->length() == 1);
   3638   Load(args->at(0));
   3639   Result value = frame_->Pop();
   3640   value.ToRegister();
   3641   ASSERT(value.is_valid());
   3642   Condition is_smi = masm_->CheckSmi(value.reg());
   3643   destination()->false_target()->Branch(is_smi);
   3644   // It is a heap object - get map.
   3645   // Check if the object is a regexp.
   3646   __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
   3647   value.Unuse();
   3648   destination()->Split(equal);
   3649 }
   3650 
   3651 
   3652 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
   3653   // This generates a fast version of:
   3654   // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
   3655   ASSERT(args->length() == 1);
   3656   Load(args->at(0));
   3657   Result obj = frame_->Pop();
   3658   obj.ToRegister();
   3659   Condition is_smi = masm_->CheckSmi(obj.reg());
   3660   destination()->false_target()->Branch(is_smi);
   3661 
   3662   __ Move(kScratchRegister, Factory::null_value());
   3663   __ cmpq(obj.reg(), kScratchRegister);
   3664   destination()->true_target()->Branch(equal);
   3665 
   3666   __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
   3667   // Undetectable objects behave like undefined when tested with typeof.
   3668   __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
   3669           Immediate(1 << Map::kIsUndetectable));
   3670   destination()->false_target()->Branch(not_zero);
   3671   __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
   3672   destination()->false_target()->Branch(less);
   3673   __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
   3674   obj.Unuse();
   3675   destination()->Split(less_equal);
   3676 }
   3677 
   3678 
   3679 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
   3680   // This generates a fast version of:
   3681   // (%_ClassOf(arg) === 'Function')
   3682   ASSERT(args->length() == 1);
   3683   Load(args->at(0));
   3684   Result obj = frame_->Pop();
   3685   obj.ToRegister();
   3686   Condition is_smi = masm_->CheckSmi(obj.reg());
   3687   destination()->false_target()->Branch(is_smi);
   3688   __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
   3689   obj.Unuse();
   3690   destination()->Split(equal);
   3691 }
   3692 
   3693 
   3694 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
   3695   ASSERT(args->length() == 1);
   3696   Load(args->at(0));
   3697   Result obj = frame_->Pop();
   3698   obj.ToRegister();
   3699   Condition is_smi = masm_->CheckSmi(obj.reg());
   3700   destination()->false_target()->Branch(is_smi);
   3701   __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
   3702   __ movzxbl(kScratchRegister,
   3703              FieldOperand(kScratchRegister, Map::kBitFieldOffset));
   3704   __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
   3705   obj.Unuse();
   3706   destination()->Split(not_zero);
   3707 }
   3708 
   3709 
   3710 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
   3711   ASSERT(args->length() == 0);
   3712 
   3713   // Get the frame pointer for the calling frame.
   3714   Result fp = allocator()->Allocate();
   3715   __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   3716 
   3717   // Skip the arguments adaptor frame if it exists.
   3718   Label check_frame_marker;
   3719   __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
   3720                 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   3721   __ j(not_equal, &check_frame_marker);
   3722   __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
   3723 
   3724   // Check the marker in the calling frame.
   3725   __ bind(&check_frame_marker);
   3726   __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
   3727                 Smi::FromInt(StackFrame::CONSTRUCT));
   3728   fp.Unuse();
   3729   destination()->Split(equal);
   3730 }
   3731 
   3732 
   3733 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
   3734   ASSERT(args->length() == 0);
   3735   // ArgumentsAccessStub takes the parameter count as an input argument
   3736   // in register eax.  Create a constant result for it.
   3737   Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   3738   // Call the shared stub to get to the arguments.length.
   3739   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
   3740   Result result = frame_->CallStub(&stub, &count);
   3741   frame_->Push(&result);
   3742 }
   3743 
   3744 
   3745 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
   3746   Comment(masm_, "[ GenerateFastCharCodeAt");
   3747   ASSERT(args->length() == 2);
   3748 
   3749   Label slow_case;
   3750   Label end;
   3751   Label not_a_flat_string;
   3752   Label try_again_with_new_string;
   3753   Label ascii_string;
   3754   Label got_char_code;
   3755 
   3756   Load(args->at(0));
   3757   Load(args->at(1));
   3758   Result index = frame_->Pop();
   3759   Result object = frame_->Pop();
   3760 
   3761   // Get register rcx to use as shift amount later.
   3762   Result shift_amount;
   3763   if (object.is_register() && object.reg().is(rcx)) {
   3764     Result fresh = allocator_->Allocate();
   3765     shift_amount = object;
   3766     object = fresh;
   3767     __ movq(object.reg(), rcx);
   3768   }
   3769   if (index.is_register() && index.reg().is(rcx)) {
   3770     Result fresh = allocator_->Allocate();
   3771     shift_amount = index;
   3772     index = fresh;
   3773     __ movq(index.reg(), rcx);
   3774   }
   3775   // There could be references to ecx in the frame. Allocating will
   3776   // spill them, otherwise spill explicitly.
   3777   if (shift_amount.is_valid()) {
   3778     frame_->Spill(rcx);
   3779   } else {
   3780     shift_amount = allocator()->Allocate(rcx);
   3781   }
   3782   ASSERT(shift_amount.is_register());
   3783   ASSERT(shift_amount.reg().is(rcx));
   3784   ASSERT(allocator_->count(rcx) == 1);
   3785 
   3786   // We will mutate the index register and possibly the object register.
   3787   // The case where they are somehow the same register is handled
   3788   // because we only mutate them in the case where the receiver is a
   3789   // heap object and the index is not.
   3790   object.ToRegister();
   3791   index.ToRegister();
   3792   frame_->Spill(object.reg());
   3793   frame_->Spill(index.reg());
   3794 
   3795   // We need a single extra temporary register.
   3796   Result temp = allocator()->Allocate();
   3797   ASSERT(temp.is_valid());
   3798 
   3799   // There is no virtual frame effect from here up to the final result
   3800   // push.
   3801 
   3802   // If the receiver is a smi trigger the slow case.
   3803   __ JumpIfSmi(object.reg(), &slow_case);
   3804 
   3805   // If the index is negative or non-smi trigger the slow case.
   3806   __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
   3807 
   3808   // Untag the index.
   3809   __ SmiToInteger32(index.reg(), index.reg());
   3810 
   3811   __ bind(&try_again_with_new_string);
   3812   // Fetch the instance type of the receiver into rcx.
   3813   __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
   3814   __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
   3815   // If the receiver is not a string trigger the slow case.
   3816   __ testb(rcx, Immediate(kIsNotStringMask));
   3817   __ j(not_zero, &slow_case);
   3818 
   3819   // Check for index out of range.
   3820   __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
   3821   __ j(greater_equal, &slow_case);
   3822   // Reload the instance type (into the temp register this time)..
   3823   __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
   3824   __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
   3825 
   3826   // We need special handling for non-flat strings.
   3827   ASSERT_EQ(0, kSeqStringTag);
   3828   __ testb(temp.reg(), Immediate(kStringRepresentationMask));
   3829   __ j(not_zero, &not_a_flat_string);
   3830   // Check for 1-byte or 2-byte string.
   3831   ASSERT_EQ(0, kTwoByteStringTag);
   3832   __ testb(temp.reg(), Immediate(kStringEncodingMask));
   3833   __ j(not_zero, &ascii_string);
   3834 
   3835   // 2-byte string.
   3836   // Load the 2-byte character code into the temp register.
   3837   __ movzxwl(temp.reg(), FieldOperand(object.reg(),
   3838                                       index.reg(),
   3839                                       times_2,
   3840                                       SeqTwoByteString::kHeaderSize));
   3841   __ jmp(&got_char_code);
   3842 
   3843   // ASCII string.
   3844   __ bind(&ascii_string);
   3845   // Load the byte into the temp register.
   3846   __ movzxbl(temp.reg(), FieldOperand(object.reg(),
   3847                                       index.reg(),
   3848                                       times_1,
   3849                                       SeqAsciiString::kHeaderSize));
   3850   __ bind(&got_char_code);
   3851   __ Integer32ToSmi(temp.reg(), temp.reg());
   3852   __ jmp(&end);
   3853 
   3854   // Handle non-flat strings.
   3855   __ bind(&not_a_flat_string);
   3856   __ and_(temp.reg(), Immediate(kStringRepresentationMask));
   3857   __ cmpb(temp.reg(), Immediate(kConsStringTag));
   3858   __ j(not_equal, &slow_case);
   3859 
   3860   // ConsString.
   3861   // Check that the right hand side is the empty string (ie if this is really a
   3862   // flat string in a cons string).  If that is not the case we would rather go
   3863   // to the runtime system now, to flatten the string.
   3864   __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
   3865   __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
   3866   __ j(not_equal, &slow_case);
   3867   // Get the first of the two strings.
   3868   __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
   3869   __ jmp(&try_again_with_new_string);
   3870 
   3871   __ bind(&slow_case);
   3872   // Move the undefined value into the result register, which will
   3873   // trigger the slow case.
   3874   __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
   3875 
   3876   __ bind(&end);
   3877   frame_->Push(&temp);
   3878 }
   3879 
   3880 
   3881 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
   3882   ASSERT(args->length() == 1);
   3883   Load(args->at(0));
   3884   Result value = frame_->Pop();
   3885   value.ToRegister();
   3886   ASSERT(value.is_valid());
   3887   Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
   3888   value.Unuse();
   3889   destination()->Split(positive_smi);
   3890 }
   3891 
   3892 
   3893 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
   3894   ASSERT(args->length() == 1);
   3895   Load(args->at(0));
   3896   Result value = frame_->Pop();
   3897   value.ToRegister();
   3898   ASSERT(value.is_valid());
   3899   Condition is_smi = masm_->CheckSmi(value.reg());
   3900   value.Unuse();
   3901   destination()->Split(is_smi);
   3902 }
   3903 
   3904 
   3905 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
   3906   // Conditionally generate a log call.
   3907   // Args:
   3908   //   0 (literal string): The type of logging (corresponds to the flags).
   3909   //     This is used to determine whether or not to generate the log call.
   3910   //   1 (string): Format string.  Access the string at argument index 2
   3911   //     with '%2s' (see Logger::LogRuntime for all the formats).
   3912   //   2 (array): Arguments to the format string.
   3913   ASSERT_EQ(args->length(), 3);
   3914 #ifdef ENABLE_LOGGING_AND_PROFILING
   3915   if (ShouldGenerateLog(args->at(0))) {
   3916     Load(args->at(1));
   3917     Load(args->at(2));
   3918     frame_->CallRuntime(Runtime::kLog, 2);
   3919   }
   3920 #endif
   3921   // Finally, we're expected to leave a value on the top of the stack.
   3922   frame_->Push(Factory::undefined_value());
   3923 }
   3924 
   3925 
   3926 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
   3927   ASSERT(args->length() == 2);
   3928 
   3929   // Load the two objects into registers and perform the comparison.
   3930   Load(args->at(0));
   3931   Load(args->at(1));
   3932   Result right = frame_->Pop();
   3933   Result left = frame_->Pop();
   3934   right.ToRegister();
   3935   left.ToRegister();
   3936   __ cmpq(right.reg(), left.reg());
   3937   right.Unuse();
   3938   left.Unuse();
   3939   destination()->Split(equal);
   3940 }
   3941 
   3942 
   3943 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
   3944   ASSERT(args->length() == 0);
   3945   // RBP value is aligned, so it should be tagged as a smi (without necesarily
   3946   // being padded as a smi, so it should not be treated as a smi.).
   3947   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   3948   Result rbp_as_smi = allocator_->Allocate();
   3949   ASSERT(rbp_as_smi.is_valid());
   3950   __ movq(rbp_as_smi.reg(), rbp);
   3951   frame_->Push(&rbp_as_smi);
   3952 }
   3953 
   3954 
   3955 void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
   3956   ASSERT(args->length() == 0);
   3957   frame_->SpillAll();
   3958   __ push(rsi);
   3959 
   3960   // Make sure the frame is aligned like the OS expects.
   3961   static const int kFrameAlignment = OS::ActivationFrameAlignment();
   3962   if (kFrameAlignment > 0) {
   3963     ASSERT(IsPowerOf2(kFrameAlignment));
   3964     __ movq(rbx, rsp);  // Save in AMD-64 abi callee-saved register.
   3965     __ and_(rsp, Immediate(-kFrameAlignment));
   3966   }
   3967 
   3968   // Call V8::RandomPositiveSmi().
   3969   __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
   3970 
   3971   // Restore stack pointer from callee-saved register.
   3972   if (kFrameAlignment > 0) {
   3973     __ movq(rsp, rbx);
   3974   }
   3975 
   3976   __ pop(rsi);
   3977   Result result = allocator_->Allocate(rax);
   3978   frame_->Push(&result);
   3979 }
   3980 
   3981 
   3982 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
   3983   ASSERT_EQ(args->length(), 4);
   3984 
   3985   // Load the arguments on the stack and call the runtime system.
   3986   Load(args->at(0));
   3987   Load(args->at(1));
   3988   Load(args->at(2));
   3989   Load(args->at(3));
   3990   RegExpExecStub stub;
   3991   Result result = frame_->CallStub(&stub, 4);
   3992   frame_->Push(&result);
   3993 }
   3994 
   3995 
   3996 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
   3997   ASSERT_EQ(args->length(), 1);
   3998 
   3999   // Load the argument on the stack and jump to the runtime.
   4000   Load(args->at(0));
   4001 
   4002   Result answer = frame_->CallRuntime(Runtime::kNumberToString, 1);
   4003   frame_->Push(&answer);
   4004 }
   4005 
   4006 
   4007 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
   4008   ASSERT_EQ(args->length(), 1);
   4009   // Load the argument on the stack and jump to the runtime.
   4010   Load(args->at(0));
   4011   Result answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
   4012   frame_->Push(&answer);
   4013 }
   4014 
   4015 
   4016 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
   4017   ASSERT_EQ(args->length(), 1);
   4018   // Load the argument on the stack and jump to the runtime.
   4019   Load(args->at(0));
   4020   Result answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
   4021   frame_->Push(&answer);
   4022 }
   4023 
   4024 
   4025 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
   4026   ASSERT_EQ(2, args->length());
   4027 
   4028   Load(args->at(0));
   4029   Load(args->at(1));
   4030 
   4031   StringAddStub stub(NO_STRING_ADD_FLAGS);
   4032   Result answer = frame_->CallStub(&stub, 2);
   4033   frame_->Push(&answer);
   4034 }
   4035 
   4036 
   4037 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
   4038   ASSERT_EQ(3, args->length());
   4039 
   4040   Load(args->at(0));
   4041   Load(args->at(1));
   4042   Load(args->at(2));
   4043 
   4044   SubStringStub stub;
   4045   Result answer = frame_->CallStub(&stub, 3);
   4046   frame_->Push(&answer);
   4047 }
   4048 
   4049 
   4050 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
   4051   ASSERT_EQ(2, args->length());
   4052 
   4053   Load(args->at(0));
   4054   Load(args->at(1));
   4055 
   4056   StringCompareStub stub;
   4057   Result answer = frame_->CallStub(&stub, 2);
   4058   frame_->Push(&answer);
   4059 }
   4060 
   4061 
   4062 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
   4063   ASSERT(args->length() == 1);
   4064   JumpTarget leave, null, function, non_function_constructor;
   4065   Load(args->at(0));  // Load the object.
   4066   Result obj = frame_->Pop();
   4067   obj.ToRegister();
   4068   frame_->Spill(obj.reg());
   4069 
   4070   // If the object is a smi, we return null.
   4071   Condition is_smi = masm_->CheckSmi(obj.reg());
   4072   null.Branch(is_smi);
   4073 
   4074   // Check that the object is a JS object but take special care of JS
   4075   // functions to make sure they have 'Function' as their class.
   4076 
   4077   __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
   4078   null.Branch(below);
   4079 
   4080   // As long as JS_FUNCTION_TYPE is the last instance type and it is
   4081   // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
   4082   // LAST_JS_OBJECT_TYPE.
   4083   ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
   4084   ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
   4085   __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
   4086   function.Branch(equal);
   4087 
   4088   // Check if the constructor in the map is a function.
   4089   __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
   4090   __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
   4091   non_function_constructor.Branch(not_equal);
   4092 
   4093   // The obj register now contains the constructor function. Grab the
   4094   // instance class name from there.
   4095   __ movq(obj.reg(),
   4096           FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
   4097   __ movq(obj.reg(),
   4098           FieldOperand(obj.reg(),
   4099                        SharedFunctionInfo::kInstanceClassNameOffset));
   4100   frame_->Push(&obj);
   4101   leave.Jump();
   4102 
   4103   // Functions have class 'Function'.
   4104   function.Bind();
   4105   frame_->Push(Factory::function_class_symbol());
   4106   leave.Jump();
   4107 
   4108   // Objects with a non-function constructor have class 'Object'.
   4109   non_function_constructor.Bind();
   4110   frame_->Push(Factory::Object_symbol());
   4111   leave.Jump();
   4112 
   4113   // Non-JS objects have class null.
   4114   null.Bind();
   4115   frame_->Push(Factory::null_value());
   4116 
   4117   // All done.
   4118   leave.Bind();
   4119 }
   4120 
   4121 
   4122 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
   4123   ASSERT(args->length() == 2);
   4124   JumpTarget leave;
   4125   Load(args->at(0));  // Load the object.
   4126   Load(args->at(1));  // Load the value.
   4127   Result value = frame_->Pop();
   4128   Result object = frame_->Pop();
   4129   value.ToRegister();
   4130   object.ToRegister();
   4131 
   4132   // if (object->IsSmi()) return value.
   4133   Condition is_smi = masm_->CheckSmi(object.reg());
   4134   leave.Branch(is_smi, &value);
   4135 
   4136   // It is a heap object - get its map.
   4137   Result scratch = allocator_->Allocate();
   4138   ASSERT(scratch.is_valid());
   4139   // if (!object->IsJSValue()) return value.
   4140   __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
   4141   leave.Branch(not_equal, &value);
   4142 
   4143   // Store the value.
   4144   __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
   4145   // Update the write barrier.  Save the value as it will be
   4146   // overwritten by the write barrier code and is needed afterward.
   4147   Result duplicate_value = allocator_->Allocate();
   4148   ASSERT(duplicate_value.is_valid());
   4149   __ movq(duplicate_value.reg(), value.reg());
   4150   // The object register is also overwritten by the write barrier and
   4151   // possibly aliased in the frame.
   4152   frame_->Spill(object.reg());
   4153   __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
   4154                  scratch.reg());
   4155   object.Unuse();
   4156   scratch.Unuse();
   4157   duplicate_value.Unuse();
   4158 
   4159   // Leave.
   4160   leave.Bind(&value);
   4161   frame_->Push(&value);
   4162 }
   4163 
   4164 
   4165 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
   4166   ASSERT(args->length() == 1);
   4167   JumpTarget leave;
   4168   Load(args->at(0));  // Load the object.
   4169   frame_->Dup();
   4170   Result object = frame_->Pop();
   4171   object.ToRegister();
   4172   ASSERT(object.is_valid());
   4173   // if (object->IsSmi()) return object.
   4174   Condition is_smi = masm_->CheckSmi(object.reg());
   4175   leave.Branch(is_smi);
   4176   // It is a heap object - get map.
   4177   Result temp = allocator()->Allocate();
   4178   ASSERT(temp.is_valid());
   4179   // if (!object->IsJSValue()) return object.
   4180   __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
   4181   leave.Branch(not_equal);
   4182   __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
   4183   object.Unuse();
   4184   frame_->SetElementAt(0, &temp);
   4185   leave.Bind();
   4186 }
   4187 
   4188 
   4189 // -----------------------------------------------------------------------------
   4190 // CodeGenerator implementation of Expressions
   4191 
   4192 void CodeGenerator::LoadAndSpill(Expression* expression) {
   4193   // TODO(x64): No architecture specific code. Move to shared location.
   4194   ASSERT(in_spilled_code());
   4195   set_in_spilled_code(false);
   4196   Load(expression);
   4197   frame_->SpillAll();
   4198   set_in_spilled_code(true);
   4199 }
   4200 
   4201 
   4202 void CodeGenerator::Load(Expression* expr) {
   4203 #ifdef DEBUG
   4204   int original_height = frame_->height();
   4205 #endif
   4206   ASSERT(!in_spilled_code());
   4207   JumpTarget true_target;
   4208   JumpTarget false_target;
   4209   ControlDestination dest(&true_target, &false_target, true);
   4210   LoadCondition(expr, &dest, false);
   4211 
   4212   if (dest.false_was_fall_through()) {
   4213     // The false target was just bound.
   4214     JumpTarget loaded;
   4215     frame_->Push(Factory::false_value());
   4216     // There may be dangling jumps to the true target.
   4217     if (true_target.is_linked()) {
   4218       loaded.Jump();
   4219       true_target.Bind();
   4220       frame_->Push(Factory::true_value());
   4221       loaded.Bind();
   4222     }
   4223 
   4224   } else if (dest.is_used()) {
   4225     // There is true, and possibly false, control flow (with true as
   4226     // the fall through).
   4227     JumpTarget loaded;
   4228     frame_->Push(Factory::true_value());
   4229     if (false_target.is_linked()) {
   4230       loaded.Jump();
   4231       false_target.Bind();
   4232       frame_->Push(Factory::false_value());
   4233       loaded.Bind();
   4234     }
   4235 
   4236   } else {
   4237     // We have a valid value on top of the frame, but we still may
   4238     // have dangling jumps to the true and false targets from nested
   4239     // subexpressions (eg, the left subexpressions of the
   4240     // short-circuited boolean operators).
   4241     ASSERT(has_valid_frame());
   4242     if (true_target.is_linked() || false_target.is_linked()) {
   4243       JumpTarget loaded;
   4244       loaded.Jump();  // Don't lose the current TOS.
   4245       if (true_target.is_linked()) {
   4246         true_target.Bind();
   4247         frame_->Push(Factory::true_value());
   4248         if (false_target.is_linked()) {
   4249           loaded.Jump();
   4250         }
   4251       }
   4252       if (false_target.is_linked()) {
   4253         false_target.Bind();
   4254         frame_->Push(Factory::false_value());
   4255       }
   4256       loaded.Bind();
   4257     }
   4258   }
   4259 
   4260   ASSERT(has_valid_frame());
   4261   ASSERT(frame_->height() == original_height + 1);
   4262 }
   4263 
   4264 
   4265 // Emit code to load the value of an expression to the top of the
   4266 // frame. If the expression is boolean-valued it may be compiled (or
   4267 // partially compiled) into control flow to the control destination.
   4268 // If force_control is true, control flow is forced.
   4269 void CodeGenerator::LoadCondition(Expression* x,
   4270                                   ControlDestination* dest,
   4271                                   bool force_control) {
   4272   ASSERT(!in_spilled_code());
   4273   int original_height = frame_->height();
   4274 
   4275   { CodeGenState new_state(this, dest);
   4276     Visit(x);
   4277 
   4278     // If we hit a stack overflow, we may not have actually visited
   4279     // the expression.  In that case, we ensure that we have a
   4280     // valid-looking frame state because we will continue to generate
   4281     // code as we unwind the C++ stack.
   4282     //
   4283     // It's possible to have both a stack overflow and a valid frame
   4284     // state (eg, a subexpression overflowed, visiting it returned
   4285     // with a dummied frame state, and visiting this expression
   4286     // returned with a normal-looking state).
   4287     if (HasStackOverflow() &&
   4288         !dest->is_used() &&
   4289         frame_->height() == original_height) {
   4290       dest->Goto(true);
   4291     }
   4292   }
   4293 
   4294   if (force_control && !dest->is_used()) {
   4295     // Convert the TOS value into flow to the control destination.
   4296     // TODO(X64): Make control flow to control destinations work.
   4297     ToBoolean(dest);
   4298   }
   4299 
   4300   ASSERT(!(force_control && !dest->is_used()));
   4301   ASSERT(dest->is_used() || frame_->height() == original_height + 1);
   4302 }
   4303 
   4304 
   4305 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
   4306 // convert it to a boolean in the condition code register or jump to
   4307 // 'false_target'/'true_target' as appropriate.
   4308 void CodeGenerator::ToBoolean(ControlDestination* dest) {
   4309   Comment cmnt(masm_, "[ ToBoolean");
   4310 
   4311   // The value to convert should be popped from the frame.
   4312   Result value = frame_->Pop();
   4313   value.ToRegister();
   4314 
   4315   if (value.is_number()) {
   4316     Comment cmnt(masm_, "ONLY_NUMBER");
   4317     // Fast case if NumberInfo indicates only numbers.
   4318     if (FLAG_debug_code) {
   4319       __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
   4320     }
   4321     // Smi => false iff zero.
   4322     __ SmiCompare(value.reg(), Smi::FromInt(0));
   4323     dest->false_target()->Branch(equal);
   4324     Condition is_smi = masm_->CheckSmi(value.reg());
   4325     dest->true_target()->Branch(is_smi);
   4326     __ fldz();
   4327     __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
   4328     __ FCmp();
   4329     value.Unuse();
   4330     dest->Split(not_zero);
   4331   } else {
   4332     // Fast case checks.
   4333     // 'false' => false.
   4334     __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
   4335     dest->false_target()->Branch(equal);
   4336 
   4337     // 'true' => true.
   4338     __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
   4339     dest->true_target()->Branch(equal);
   4340 
   4341     // 'undefined' => false.
   4342     __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
   4343     dest->false_target()->Branch(equal);
   4344 
   4345     // Smi => false iff zero.
   4346     __ SmiCompare(value.reg(), Smi::FromInt(0));
   4347     dest->false_target()->Branch(equal);
   4348     Condition is_smi = masm_->CheckSmi(value.reg());
   4349     dest->true_target()->Branch(is_smi);
   4350 
   4351     // Call the stub for all other cases.
   4352     frame_->Push(&value);  // Undo the Pop() from above.
   4353     ToBooleanStub stub;
   4354     Result temp = frame_->CallStub(&stub, 1);
   4355     // Convert the result to a condition code.
   4356     __ testq(temp.reg(), temp.reg());
   4357     temp.Unuse();
   4358     dest->Split(not_equal);
   4359   }
   4360 }
   4361 
   4362 
   4363 void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
   4364   UNIMPLEMENTED();
   4365   // TODO(X64): Implement security policy for loads of smis.
   4366 }
   4367 
   4368 
   4369 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
   4370   return false;
   4371 }
   4372 
   4373 //------------------------------------------------------------------------------
   4374 // CodeGenerator implementation of variables, lookups, and stores.
   4375 
   4376 Reference::Reference(CodeGenerator* cgen,
   4377                      Expression* expression,
   4378                      bool  persist_after_get)
   4379     : cgen_(cgen),
   4380       expression_(expression),
   4381       type_(ILLEGAL),
   4382       persist_after_get_(persist_after_get) {
   4383   cgen->LoadReference(this);
   4384 }
   4385 
   4386 
   4387 Reference::~Reference() {
   4388   ASSERT(is_unloaded() || is_illegal());
   4389 }
   4390 
   4391 
   4392 void CodeGenerator::LoadReference(Reference* ref) {
   4393   // References are loaded from both spilled and unspilled code.  Set the
   4394   // state to unspilled to allow that (and explicitly spill after
   4395   // construction at the construction sites).
   4396   bool was_in_spilled_code = in_spilled_code_;
   4397   in_spilled_code_ = false;
   4398 
   4399   Comment cmnt(masm_, "[ LoadReference");
   4400   Expression* e = ref->expression();
   4401   Property* property = e->AsProperty();
   4402   Variable* var = e->AsVariableProxy()->AsVariable();
   4403 
   4404   if (property != NULL) {
   4405     // The expression is either a property or a variable proxy that rewrites
   4406     // to a property.
   4407     Load(property->obj());
   4408     if (property->key()->IsPropertyName()) {
   4409       ref->set_type(Reference::NAMED);
   4410     } else {
   4411       Load(property->key());
   4412       ref->set_type(Reference::KEYED);
   4413     }
   4414   } else if (var != NULL) {
   4415     // The expression is a variable proxy that does not rewrite to a
   4416     // property.  Global variables are treated as named property references.
   4417     if (var->is_global()) {
   4418       LoadGlobal();
   4419       ref->set_type(Reference::NAMED);
   4420     } else {
   4421       ASSERT(var->slot() != NULL);
   4422       ref->set_type(Reference::SLOT);
   4423     }
   4424   } else {
   4425     // Anything else is a runtime error.
   4426     Load(e);
   4427     frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
   4428   }
   4429 
   4430   in_spilled_code_ = was_in_spilled_code;
   4431 }
   4432 
   4433 
   4434 void CodeGenerator::UnloadReference(Reference* ref) {
   4435   // Pop a reference from the stack while preserving TOS.
   4436   Comment cmnt(masm_, "[ UnloadReference");
   4437   frame_->Nip(ref->size());
   4438   ref->set_unloaded();
   4439 }
   4440 
   4441 
   4442 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
   4443   // Currently, this assertion will fail if we try to assign to
   4444   // a constant variable that is constant because it is read-only
   4445   // (such as the variable referring to a named function expression).
   4446   // We need to implement assignments to read-only variables.
   4447   // Ideally, we should do this during AST generation (by converting
   4448   // such assignments into expression statements); however, in general
   4449   // we may not be able to make the decision until past AST generation,
   4450   // that is when the entire program is known.
   4451   ASSERT(slot != NULL);
   4452   int index = slot->index();
   4453   switch (slot->type()) {
   4454     case Slot::PARAMETER:
   4455       return frame_->ParameterAt(index);
   4456 
   4457     case Slot::LOCAL:
   4458       return frame_->LocalAt(index);
   4459 
   4460     case Slot::CONTEXT: {
   4461       // Follow the context chain if necessary.
   4462       ASSERT(!tmp.is(rsi));  // do not overwrite context register
   4463       Register context = rsi;
   4464       int chain_length = scope()->ContextChainLength(slot->var()->scope());
   4465       for (int i = 0; i < chain_length; i++) {
   4466         // Load the closure.
   4467         // (All contexts, even 'with' contexts, have a closure,
   4468         // and it is the same for all contexts inside a function.
   4469         // There is no need to go to the function context first.)
   4470         __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
   4471         // Load the function context (which is the incoming, outer context).
   4472         __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
   4473         context = tmp;
   4474       }
   4475       // We may have a 'with' context now. Get the function context.
   4476       // (In fact this mov may never be the needed, since the scope analysis
   4477       // may not permit a direct context access in this case and thus we are
   4478       // always at a function context. However it is safe to dereference be-
   4479       // cause the function context of a function context is itself. Before
   4480       // deleting this mov we should try to create a counter-example first,
   4481       // though...)
   4482       __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
   4483       return ContextOperand(tmp, index);
   4484     }
   4485 
   4486     default:
   4487       UNREACHABLE();
   4488       return Operand(rsp, 0);
   4489   }
   4490 }
   4491 
   4492 
   4493 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
   4494                                                          Result tmp,
   4495                                                          JumpTarget* slow) {
   4496   ASSERT(slot->type() == Slot::CONTEXT);
   4497   ASSERT(tmp.is_register());
   4498   Register context = rsi;
   4499 
   4500   for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
   4501     if (s->num_heap_slots() > 0) {
   4502       if (s->calls_eval()) {
   4503         // Check that extension is NULL.
   4504         __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
   4505                 Immediate(0));
   4506         slow->Branch(not_equal, not_taken);
   4507       }
   4508       __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
   4509       __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
   4510       context = tmp.reg();
   4511     }
   4512   }
   4513   // Check that last extension is NULL.
   4514   __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
   4515   slow->Branch(not_equal, not_taken);
   4516   __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
   4517   return ContextOperand(tmp.reg(), slot->index());
   4518 }
   4519 
   4520 
   4521 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
   4522   if (slot->type() == Slot::LOOKUP) {
   4523     ASSERT(slot->var()->is_dynamic());
   4524 
   4525     JumpTarget slow;
   4526     JumpTarget done;
   4527     Result value;
   4528 
   4529     // Generate fast-case code for variables that might be shadowed by
   4530     // eval-introduced variables.  Eval is used a lot without
   4531     // introducing variables.  In those cases, we do not want to
   4532     // perform a runtime call for all variables in the scope
   4533     // containing the eval.
   4534     if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
   4535       value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
   4536       // If there was no control flow to slow, we can exit early.
   4537       if (!slow.is_linked()) {
   4538         frame_->Push(&value);
   4539         return;
   4540       }
   4541 
   4542       done.Jump(&value);
   4543 
   4544     } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
   4545       Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
   4546       // Only generate the fast case for locals that rewrite to slots.
   4547       // This rules out argument loads.
   4548       if (potential_slot != NULL) {
   4549         // Allocate a fresh register to use as a temp in
   4550         // ContextSlotOperandCheckExtensions and to hold the result
   4551         // value.
   4552         value = allocator_->Allocate();
   4553         ASSERT(value.is_valid());
   4554         __ movq(value.reg(),
   4555                ContextSlotOperandCheckExtensions(potential_slot,
   4556                                                  value,
   4557                                                  &slow));
   4558         if (potential_slot->var()->mode() == Variable::CONST) {
   4559           __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
   4560           done.Branch(not_equal, &value);
   4561           __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
   4562         }
   4563         // There is always control flow to slow from
   4564         // ContextSlotOperandCheckExtensions so we have to jump around
   4565         // it.
   4566         done.Jump(&value);
   4567       }
   4568     }
   4569 
   4570     slow.Bind();
   4571     // A runtime call is inevitable.  We eagerly sync frame elements
   4572     // to memory so that we can push the arguments directly into place
   4573     // on top of the frame.
   4574     frame_->SyncRange(0, frame_->element_count() - 1);
   4575     frame_->EmitPush(rsi);
   4576     __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
   4577     frame_->EmitPush(kScratchRegister);
   4578     if (typeof_state == INSIDE_TYPEOF) {
   4579        value =
   4580          frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
   4581     } else {
   4582        value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
   4583     }
   4584 
   4585     done.Bind(&value);
   4586     frame_->Push(&value);
   4587 
   4588   } else if (slot->var()->mode() == Variable::CONST) {
   4589     // Const slots may contain 'the hole' value (the constant hasn't been
   4590     // initialized yet) which needs to be converted into the 'undefined'
   4591     // value.
   4592     //
   4593     // We currently spill the virtual frame because constants use the
   4594     // potentially unsafe direct-frame access of SlotOperand.
   4595     VirtualFrame::SpilledScope spilled_scope;
   4596     Comment cmnt(masm_, "[ Load const");
   4597     JumpTarget exit;
   4598     __ movq(rcx, SlotOperand(slot, rcx));
   4599     __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
   4600     exit.Branch(not_equal);
   4601     __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
   4602     exit.Bind();
   4603     frame_->EmitPush(rcx);
   4604 
   4605   } else if (slot->type() == Slot::PARAMETER) {
   4606     frame_->PushParameterAt(slot->index());
   4607 
   4608   } else if (slot->type() == Slot::LOCAL) {
   4609     frame_->PushLocalAt(slot->index());
   4610 
   4611   } else {
   4612     // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
   4613     // here.
   4614     //
   4615     // The use of SlotOperand below is safe for an unspilled frame
   4616     // because it will always be a context slot.
   4617     ASSERT(slot->type() == Slot::CONTEXT);
   4618     Result temp = allocator_->Allocate();
   4619     ASSERT(temp.is_valid());
   4620     __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
   4621     frame_->Push(&temp);
   4622   }
   4623 }
   4624 
   4625 
   4626 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
   4627                                                   TypeofState state) {
   4628   LoadFromSlot(slot, state);
   4629 
   4630   // Bail out quickly if we're not using lazy arguments allocation.
   4631   if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
   4632 
   4633   // ... or if the slot isn't a non-parameter arguments slot.
   4634   if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
   4635 
   4636   // Pop the loaded value from the stack.
   4637   Result value = frame_->Pop();
   4638 
   4639   // If the loaded value is a constant, we know if the arguments
   4640   // object has been lazily loaded yet.
   4641   if (value.is_constant()) {
   4642     if (value.handle()->IsTheHole()) {
   4643       Result arguments = StoreArgumentsObject(false);
   4644       frame_->Push(&arguments);
   4645     } else {
   4646       frame_->Push(&value);
   4647     }
   4648     return;
   4649   }
   4650 
   4651   // The loaded value is in a register. If it is the sentinel that
   4652   // indicates that we haven't loaded the arguments object yet, we
   4653   // need to do it now.
   4654   JumpTarget exit;
   4655   __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
   4656   frame_->Push(&value);
   4657   exit.Branch(not_equal);
   4658   Result arguments = StoreArgumentsObject(false);
   4659   frame_->SetElementAt(0, &arguments);
   4660   exit.Bind();
   4661 }
   4662 
   4663 
   4664 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
   4665   if (slot->type() == Slot::LOOKUP) {
   4666     ASSERT(slot->var()->is_dynamic());
   4667 
   4668     // For now, just do a runtime call.  Since the call is inevitable,
   4669     // we eagerly sync the virtual frame so we can directly push the
   4670     // arguments into place.
   4671     frame_->SyncRange(0, frame_->element_count() - 1);
   4672 
   4673     frame_->EmitPush(rsi);
   4674     frame_->EmitPush(slot->var()->name());
   4675 
   4676     Result value;
   4677     if (init_state == CONST_INIT) {
   4678       // Same as the case for a normal store, but ignores attribute
   4679       // (e.g. READ_ONLY) of context slot so that we can initialize const
   4680       // properties (introduced via eval("const foo = (some expr);")). Also,
   4681       // uses the current function context instead of the top context.
   4682       //
   4683       // Note that we must declare the foo upon entry of eval(), via a
   4684       // context slot declaration, but we cannot initialize it at the same
   4685       // time, because the const declaration may be at the end of the eval
   4686       // code (sigh...) and the const variable may have been used before
   4687       // (where its value is 'undefined'). Thus, we can only do the
   4688       // initialization when we actually encounter the expression and when
   4689       // the expression operands are defined and valid, and thus we need the
   4690       // split into 2 operations: declaration of the context slot followed
   4691       // by initialization.
   4692       value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
   4693     } else {
   4694       value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
   4695     }
   4696     // Storing a variable must keep the (new) value on the expression
   4697     // stack. This is necessary for compiling chained assignment
   4698     // expressions.
   4699     frame_->Push(&value);
   4700   } else {
   4701     ASSERT(!slot->var()->is_dynamic());
   4702 
   4703     JumpTarget exit;
   4704     if (init_state == CONST_INIT) {
   4705       ASSERT(slot->var()->mode() == Variable::CONST);
   4706       // Only the first const initialization must be executed (the slot
   4707       // still contains 'the hole' value). When the assignment is executed,
   4708       // the code is identical to a normal store (see below).
   4709       //
   4710       // We spill the frame in the code below because the direct-frame
   4711       // access of SlotOperand is potentially unsafe with an unspilled
   4712       // frame.
   4713       VirtualFrame::SpilledScope spilled_scope;
   4714       Comment cmnt(masm_, "[ Init const");
   4715       __ movq(rcx, SlotOperand(slot, rcx));
   4716       __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
   4717       exit.Branch(not_equal);
   4718     }
   4719 
   4720     // We must execute the store.  Storing a variable must keep the (new)
   4721     // value on the stack. This is necessary for compiling assignment
   4722     // expressions.
   4723     //
   4724     // Note: We will reach here even with slot->var()->mode() ==
   4725     // Variable::CONST because of const declarations which will initialize
   4726     // consts to 'the hole' value and by doing so, end up calling this code.
   4727     if (slot->type() == Slot::PARAMETER) {
   4728       frame_->StoreToParameterAt(slot->index());
   4729     } else if (slot->type() == Slot::LOCAL) {
   4730       frame_->StoreToLocalAt(slot->index());
   4731     } else {
   4732       // The other slot types (LOOKUP and GLOBAL) cannot reach here.
   4733       //
   4734       // The use of SlotOperand below is safe for an unspilled frame
   4735       // because the slot is a context slot.
   4736       ASSERT(slot->type() == Slot::CONTEXT);
   4737       frame_->Dup();
   4738       Result value = frame_->Pop();
   4739       value.ToRegister();
   4740       Result start = allocator_->Allocate();
   4741       ASSERT(start.is_valid());
   4742       __ movq(SlotOperand(slot, start.reg()), value.reg());
   4743       // RecordWrite may destroy the value registers.
   4744       //
   4745       // TODO(204): Avoid actually spilling when the value is not
   4746       // needed (probably the common case).
   4747       frame_->Spill(value.reg());
   4748       int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
   4749       Result temp = allocator_->Allocate();
   4750       ASSERT(temp.is_valid());
   4751       __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
   4752       // The results start, value, and temp are unused by going out of
   4753       // scope.
   4754     }
   4755 
   4756     exit.Bind();
   4757   }
   4758 }
   4759 
   4760 
   4761 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
   4762     Slot* slot,
   4763     TypeofState typeof_state,
   4764     JumpTarget* slow) {
   4765   // Check that no extension objects have been created by calls to
   4766   // eval from the current scope to the global scope.
   4767   Register context = rsi;
   4768   Result tmp = allocator_->Allocate();
   4769   ASSERT(tmp.is_valid());  // All non-reserved registers were available.
   4770 
   4771   Scope* s = scope();
   4772   while (s != NULL) {
   4773     if (s->num_heap_slots() > 0) {
   4774       if (s->calls_eval()) {
   4775         // Check that extension is NULL.
   4776         __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
   4777                Immediate(0));
   4778         slow->Branch(not_equal, not_taken);
   4779       }
   4780       // Load next context in chain.
   4781       __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
   4782       __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
   4783       context = tmp.reg();
   4784     }
   4785     // If no outer scope calls eval, we do not need to check more
   4786     // context extensions.  If we have reached an eval scope, we check
   4787     // all extensions from this point.
   4788     if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
   4789     s = s->outer_scope();
   4790   }
   4791 
   4792   if (s->is_eval_scope()) {
   4793     // Loop up the context chain.  There is no frame effect so it is
   4794     // safe to use raw labels here.
   4795     Label next, fast;
   4796     if (!context.is(tmp.reg())) {
   4797       __ movq(tmp.reg(), context);
   4798     }
   4799     // Load map for comparison into register, outside loop.
   4800     __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
   4801     __ bind(&next);
   4802     // Terminate at global context.
   4803     __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
   4804     __ j(equal, &fast);
   4805     // Check that extension is NULL.
   4806     __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
   4807     slow->Branch(not_equal);
   4808     // Load next context in chain.
   4809     __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
   4810     __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
   4811     __ jmp(&next);
   4812     __ bind(&fast);
   4813   }
   4814   tmp.Unuse();
   4815 
   4816   // All extension objects were empty and it is safe to use a global
   4817   // load IC call.
   4818   LoadGlobal();
   4819   frame_->Push(slot->var()->name());
   4820   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
   4821                          ? RelocInfo::CODE_TARGET
   4822                          : RelocInfo::CODE_TARGET_CONTEXT;
   4823   Result answer = frame_->CallLoadIC(mode);
   4824   // A test rax instruction following the call signals that the inobject
   4825   // property case was inlined.  Ensure that there is not a test rax
   4826   // instruction here.
   4827   masm_->nop();
   4828   // Discard the global object. The result is in answer.
   4829   frame_->Drop();
   4830   return answer;
   4831 }
   4832 
   4833 
   4834 void CodeGenerator::LoadGlobal() {
   4835   if (in_spilled_code()) {
   4836     frame_->EmitPush(GlobalObject());
   4837   } else {
   4838     Result temp = allocator_->Allocate();
   4839     __ movq(temp.reg(), GlobalObject());
   4840     frame_->Push(&temp);
   4841   }
   4842 }
   4843 
   4844 
   4845 void CodeGenerator::LoadGlobalReceiver() {
   4846   Result temp = allocator_->Allocate();
   4847   Register reg = temp.reg();
   4848   __ movq(reg, GlobalObject());
   4849   __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
   4850   frame_->Push(&temp);
   4851 }
   4852 
   4853 
   4854 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
   4855   if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
   4856   ASSERT(scope()->arguments_shadow() != NULL);
   4857   // We don't want to do lazy arguments allocation for functions that
   4858   // have heap-allocated contexts, because it interfers with the
   4859   // uninitialized const tracking in the context objects.
   4860   return (scope()->num_heap_slots() > 0)
   4861       ? EAGER_ARGUMENTS_ALLOCATION
   4862       : LAZY_ARGUMENTS_ALLOCATION;
   4863 }
   4864 
   4865 
   4866 Result CodeGenerator::StoreArgumentsObject(bool initial) {
   4867   ArgumentsAllocationMode mode = ArgumentsMode();
   4868   ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
   4869 
   4870   Comment cmnt(masm_, "[ store arguments object");
   4871   if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
   4872     // When using lazy arguments allocation, we store the hole value
   4873     // as a sentinel indicating that the arguments object hasn't been
   4874     // allocated yet.
   4875     frame_->Push(Factory::the_hole_value());
   4876   } else {
   4877     ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
   4878     frame_->PushFunction();
   4879     frame_->PushReceiverSlotAddress();
   4880     frame_->Push(Smi::FromInt(scope()->num_parameters()));
   4881     Result result = frame_->CallStub(&stub, 3);
   4882     frame_->Push(&result);
   4883   }
   4884 
   4885 
   4886   Variable* arguments = scope()->arguments()->var();
   4887   Variable* shadow = scope()->arguments_shadow()->var();
   4888   ASSERT(arguments != NULL && arguments->slot() != NULL);
   4889   ASSERT(shadow != NULL && shadow->slot() != NULL);
   4890   JumpTarget done;
   4891   bool skip_arguments = false;
   4892   if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
   4893     // We have to skip storing into the arguments slot if it has
   4894     // already been written to. This can happen if the a function
   4895     // has a local variable named 'arguments'.
   4896     LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
   4897     Result probe = frame_->Pop();
   4898     if (probe.is_constant()) {
   4899       // We have to skip updating the arguments object if it has been
   4900       // assigned a proper value.
   4901       skip_arguments = !probe.handle()->IsTheHole();
   4902     } else {
   4903       __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
   4904       probe.Unuse();
   4905       done.Branch(not_equal);
   4906     }
   4907   }
   4908   if (!skip_arguments) {
   4909     StoreToSlot(arguments->slot(), NOT_CONST_INIT);
   4910     if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
   4911   }
   4912   StoreToSlot(shadow->slot(), NOT_CONST_INIT);
   4913   return frame_->Pop();
   4914 }
   4915 
   4916 
   4917 void CodeGenerator::LoadTypeofExpression(Expression* expr) {
   4918   // Special handling of identifiers as subexpressions of typeof.
   4919   Variable* variable = expr->AsVariableProxy()->AsVariable();
   4920   if (variable != NULL && !variable->is_this() && variable->is_global()) {
   4921     // For a global variable we build the property reference
   4922     // <global>.<variable> and perform a (regular non-contextual) property
   4923     // load to make sure we do not get reference errors.
   4924     Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
   4925     Literal key(variable->name());
   4926     Property property(&global, &key, RelocInfo::kNoPosition);
   4927     Reference ref(this, &property);
   4928     ref.GetValue();
   4929   } else if (variable != NULL && variable->slot() != NULL) {
   4930     // For a variable that rewrites to a slot, we signal it is the immediate
   4931     // subexpression of a typeof.
   4932     LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
   4933   } else {
   4934     // Anything else can be handled normally.
   4935     Load(expr);
   4936   }
   4937 }
   4938 
   4939 
   4940 void CodeGenerator::Comparison(AstNode* node,
   4941                                Condition cc,
   4942                                bool strict,
   4943                                ControlDestination* dest) {
   4944   // Strict only makes sense for equality comparisons.
   4945   ASSERT(!strict || cc == equal);
   4946 
   4947   Result left_side;
   4948   Result right_side;
   4949   // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
   4950   if (cc == greater || cc == less_equal) {
   4951     cc = ReverseCondition(cc);
   4952     left_side = frame_->Pop();
   4953     right_side = frame_->Pop();
   4954   } else {
   4955     right_side = frame_->Pop();
   4956     left_side = frame_->Pop();
   4957   }
   4958   ASSERT(cc == less || cc == equal || cc == greater_equal);
   4959 
   4960   // If either side is a constant smi, optimize the comparison.
   4961   bool left_side_constant_smi =
   4962       left_side.is_constant() && left_side.handle()->IsSmi();
   4963   bool right_side_constant_smi =
   4964       right_side.is_constant() && right_side.handle()->IsSmi();
   4965   bool left_side_constant_null =
   4966       left_side.is_constant() && left_side.handle()->IsNull();
   4967   bool right_side_constant_null =
   4968       right_side.is_constant() && right_side.handle()->IsNull();
   4969 
   4970   if (left_side_constant_smi || right_side_constant_smi) {
   4971     if (left_side_constant_smi && right_side_constant_smi) {
   4972       // Trivial case, comparing two constants.
   4973       int left_value = Smi::cast(*left_side.handle())->value();
   4974       int right_value = Smi::cast(*right_side.handle())->value();
   4975       switch (cc) {
   4976         case less:
   4977           dest->Goto(left_value < right_value);
   4978           break;
   4979         case equal:
   4980           dest->Goto(left_value == right_value);
   4981           break;
   4982         case greater_equal:
   4983           dest->Goto(left_value >= right_value);
   4984           break;
   4985         default:
   4986           UNREACHABLE();
   4987       }
   4988     } else {
   4989       // Only one side is a constant Smi.
   4990       // If left side is a constant Smi, reverse the operands.
   4991       // Since one side is a constant Smi, conversion order does not matter.
   4992       if (left_side_constant_smi) {
   4993         Result temp = left_side;
   4994         left_side = right_side;
   4995         right_side = temp;
   4996         cc = ReverseCondition(cc);
   4997         // This may reintroduce greater or less_equal as the value of cc.
   4998         // CompareStub and the inline code both support all values of cc.
   4999       }
   5000       // Implement comparison against a constant Smi, inlining the case
   5001       // where both sides are Smis.
   5002       left_side.ToRegister();
   5003       Register left_reg = left_side.reg();
   5004       Handle<Object> right_val = right_side.handle();
   5005 
   5006       // Here we split control flow to the stub call and inlined cases
   5007       // before finally splitting it to the control destination.  We use
   5008       // a jump target and branching to duplicate the virtual frame at
   5009       // the first split.  We manually handle the off-frame references
   5010       // by reconstituting them on the non-fall-through path.
   5011       JumpTarget is_smi;
   5012 
   5013       Condition left_is_smi = masm_->CheckSmi(left_side.reg());
   5014       is_smi.Branch(left_is_smi);
   5015 
   5016       bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
   5017           && node->AsCompareOperation()->is_for_loop_condition();
   5018       if (!is_for_loop_compare && right_val->IsSmi()) {
   5019         // Right side is a constant smi and left side has been checked
   5020         // not to be a smi.
   5021         JumpTarget not_number;
   5022         __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
   5023                Factory::heap_number_map());
   5024         not_number.Branch(not_equal, &left_side);
   5025         __ movsd(xmm1,
   5026                  FieldOperand(left_reg, HeapNumber::kValueOffset));
   5027         int value = Smi::cast(*right_val)->value();
   5028         if (value == 0) {
   5029           __ xorpd(xmm0, xmm0);
   5030         } else {
   5031           Result temp = allocator()->Allocate();
   5032           __ movl(temp.reg(), Immediate(value));
   5033           __ cvtlsi2sd(xmm0, temp.reg());
   5034           temp.Unuse();
   5035         }
   5036         __ ucomisd(xmm1, xmm0);
   5037         // Jump to builtin for NaN.
   5038         not_number.Branch(parity_even, &left_side);
   5039         left_side.Unuse();
   5040         Condition double_cc = cc;
   5041         switch (cc) {
   5042           case less:          double_cc = below;       break;
   5043           case equal:         double_cc = equal;       break;
   5044           case less_equal:    double_cc = below_equal; break;
   5045           case greater:       double_cc = above;       break;
   5046           case greater_equal: double_cc = above_equal; break;
   5047           default: UNREACHABLE();
   5048         }
   5049         dest->true_target()->Branch(double_cc);
   5050         dest->false_target()->Jump();
   5051         not_number.Bind(&left_side);
   5052       }
   5053 
   5054       // Setup and call the compare stub.
   5055       CompareStub stub(cc, strict);
   5056       Result result = frame_->CallStub(&stub, &left_side, &right_side);
   5057       result.ToRegister();
   5058       __ testq(result.reg(), result.reg());
   5059       result.Unuse();
   5060       dest->true_target()->Branch(cc);
   5061       dest->false_target()->Jump();
   5062 
   5063       is_smi.Bind();
   5064       left_side = Result(left_reg);
   5065       right_side = Result(right_val);
   5066       // Test smi equality and comparison by signed int comparison.
   5067       // Both sides are smis, so we can use an Immediate.
   5068       __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
   5069       left_side.Unuse();
   5070       right_side.Unuse();
   5071       dest->Split(cc);
   5072     }
   5073   } else if (cc == equal &&
   5074              (left_side_constant_null || right_side_constant_null)) {
   5075     // To make null checks efficient, we check if either the left side or
   5076     // the right side is the constant 'null'.
   5077     // If so, we optimize the code by inlining a null check instead of
   5078     // calling the (very) general runtime routine for checking equality.
   5079     Result operand = left_side_constant_null ? right_side : left_side;
   5080     right_side.Unuse();
   5081     left_side.Unuse();
   5082     operand.ToRegister();
   5083     __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
   5084     if (strict) {
   5085       operand.Unuse();
   5086       dest->Split(equal);
   5087     } else {
   5088       // The 'null' value is only equal to 'undefined' if using non-strict
   5089       // comparisons.
   5090       dest->true_target()->Branch(equal);
   5091       __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
   5092       dest->true_target()->Branch(equal);
   5093       Condition is_smi = masm_->CheckSmi(operand.reg());
   5094       dest->false_target()->Branch(is_smi);
   5095 
   5096       // It can be an undetectable object.
   5097       // Use a scratch register in preference to spilling operand.reg().
   5098       Result temp = allocator()->Allocate();
   5099       ASSERT(temp.is_valid());
   5100       __ movq(temp.reg(),
   5101               FieldOperand(operand.reg(), HeapObject::kMapOffset));
   5102       __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
   5103                Immediate(1 << Map::kIsUndetectable));
   5104       temp.Unuse();
   5105       operand.Unuse();
   5106       dest->Split(not_zero);
   5107     }
   5108   } else {  // Neither side is a constant Smi or null.
   5109     // If either side is a non-smi constant, skip the smi check.
   5110     bool known_non_smi =
   5111         (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
   5112         (right_side.is_constant() && !right_side.handle()->IsSmi());
   5113     left_side.ToRegister();
   5114     right_side.ToRegister();
   5115 
   5116     if (known_non_smi) {
   5117       // When non-smi, call out to the compare stub.
   5118       CompareStub stub(cc, strict);
   5119       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
   5120       // The result is a Smi, which is negative, zero, or positive.
   5121       __ SmiTest(answer.reg());  // Sets both zero and sign flag.
   5122       answer.Unuse();
   5123       dest->Split(cc);
   5124     } else {
   5125       // Here we split control flow to the stub call and inlined cases
   5126       // before finally splitting it to the control destination.  We use
   5127       // a jump target and branching to duplicate the virtual frame at
   5128       // the first split.  We manually handle the off-frame references
   5129       // by reconstituting them on the non-fall-through path.
   5130       JumpTarget is_smi;
   5131       Register left_reg = left_side.reg();
   5132       Register right_reg = right_side.reg();
   5133 
   5134       Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
   5135       is_smi.Branch(both_smi);
   5136       // When non-smi, call out to the compare stub.
   5137       CompareStub stub(cc, strict);
   5138       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
   5139       __ SmiTest(answer.reg());  // Sets both zero and sign flags.
   5140       answer.Unuse();
   5141       dest->true_target()->Branch(cc);
   5142       dest->false_target()->Jump();
   5143 
   5144       is_smi.Bind();
   5145       left_side = Result(left_reg);
   5146       right_side = Result(right_reg);
   5147       __ SmiCompare(left_side.reg(), right_side.reg());
   5148       right_side.Unuse();
   5149       left_side.Unuse();
   5150       dest->Split(cc);
   5151     }
   5152   }
   5153 }
   5154 
   5155 
   5156 class DeferredInlineBinaryOperation: public DeferredCode {
   5157  public:
   5158   DeferredInlineBinaryOperation(Token::Value op,
   5159                                 Register dst,
   5160                                 Register left,
   5161                                 Register right,
   5162                                 OverwriteMode mode)
   5163       : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
   5164     set_comment("[ DeferredInlineBinaryOperation");
   5165   }
   5166 
   5167   virtual void Generate();
   5168 
   5169  private:
   5170   Token::Value op_;
   5171   Register dst_;
   5172   Register left_;
   5173   Register right_;
   5174   OverwriteMode mode_;
   5175 };
   5176 
   5177 
   5178 void DeferredInlineBinaryOperation::Generate() {
   5179   GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
   5180   stub.GenerateCall(masm_, left_, right_);
   5181   if (!dst_.is(rax)) __ movq(dst_, rax);
   5182 }
   5183 
   5184 
   5185 void CodeGenerator::GenericBinaryOperation(Token::Value op,
   5186                                            StaticType* type,
   5187                                            OverwriteMode overwrite_mode) {
   5188   Comment cmnt(masm_, "[ BinaryOperation");
   5189   Comment cmnt_token(masm_, Token::String(op));
   5190 
   5191   if (op == Token::COMMA) {
   5192     // Simply discard left value.
   5193     frame_->Nip(1);
   5194     return;
   5195   }
   5196 
   5197   Result right = frame_->Pop();
   5198   Result left = frame_->Pop();
   5199 
   5200   if (op == Token::ADD) {
   5201     bool left_is_string = left.is_constant() && left.handle()->IsString();
   5202     bool right_is_string = right.is_constant() && right.handle()->IsString();
   5203     if (left_is_string || right_is_string) {
   5204       frame_->Push(&left);
   5205       frame_->Push(&right);
   5206       Result answer;
   5207       if (left_is_string) {
   5208         if (right_is_string) {
   5209           // TODO(lrn): if both are constant strings
   5210           // -- do a compile time cons, if allocation during codegen is allowed.
   5211           answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
   5212         } else {
   5213           answer =
   5214             frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
   5215         }
   5216       } else if (right_is_string) {
   5217         answer =
   5218           frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
   5219       }
   5220       frame_->Push(&answer);
   5221       return;
   5222     }
   5223     // Neither operand is known to be a string.
   5224   }
   5225 
   5226   bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
   5227   bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
   5228   bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
   5229   bool right_is_non_smi_constant =
   5230       right.is_constant() && !right.handle()->IsSmi();
   5231 
   5232   if (left_is_smi_constant && right_is_smi_constant) {
   5233     // Compute the constant result at compile time, and leave it on the frame.
   5234     int left_int = Smi::cast(*left.handle())->value();
   5235     int right_int = Smi::cast(*right.handle())->value();
   5236     if (FoldConstantSmis(op, left_int, right_int)) return;
   5237   }
   5238 
   5239   // Get number type of left and right sub-expressions.
   5240   NumberInfo::Type operands_type =
   5241       NumberInfo::Combine(left.number_info(), right.number_info());
   5242 
   5243   Result answer;
   5244   if (left_is_non_smi_constant || right_is_non_smi_constant) {
   5245     GenericBinaryOpStub stub(op,
   5246                              overwrite_mode,
   5247                              NO_SMI_CODE_IN_STUB,
   5248                              operands_type);
   5249     answer = stub.GenerateCall(masm_, frame_, &left, &right);
   5250   } else if (right_is_smi_constant) {
   5251     answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
   5252                                         type, false, overwrite_mode);
   5253   } else if (left_is_smi_constant) {
   5254     answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
   5255                                         type, true, overwrite_mode);
   5256   } else {
   5257     // Set the flags based on the operation, type and loop nesting level.
   5258     // Bit operations always assume they likely operate on Smis. Still only
   5259     // generate the inline Smi check code if this operation is part of a loop.
   5260     // For all other operations only inline the Smi check code for likely smis
   5261     // if the operation is part of a loop.
   5262     if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
   5263       answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
   5264     } else {
   5265       GenericBinaryOpStub stub(op,
   5266                                overwrite_mode,
   5267                                NO_GENERIC_BINARY_FLAGS,
   5268                                operands_type);
   5269       answer = stub.GenerateCall(masm_, frame_, &left, &right);
   5270     }
   5271   }
   5272 
   5273   // Set NumberInfo of result according to the operation performed.
   5274   // We rely on the fact that smis have a 32 bit payload on x64.
   5275   ASSERT(kSmiValueSize == 32);
   5276   NumberInfo::Type result_type = NumberInfo::kUnknown;
   5277   switch (op) {
   5278     case Token::COMMA:
   5279       result_type = right.number_info();
   5280       break;
   5281     case Token::OR:
   5282     case Token::AND:
   5283       // Result type can be either of the two input types.
   5284       result_type = operands_type;
   5285       break;
   5286     case Token::BIT_OR:
   5287     case Token::BIT_XOR:
   5288     case Token::BIT_AND:
   5289       // Result is always a smi.
   5290       result_type = NumberInfo::kSmi;
   5291       break;
   5292     case Token::SAR:
   5293     case Token::SHL:
   5294       // Result is always a smi.
   5295       result_type = NumberInfo::kSmi;
   5296       break;
   5297     case Token::SHR:
   5298       // Result of x >>> y is always a smi if y >= 1, otherwise a number.
   5299       result_type = (right.is_constant() && right.handle()->IsSmi()
   5300                      && Smi::cast(*right.handle())->value() >= 1)
   5301           ? NumberInfo::kSmi
   5302           : NumberInfo::kNumber;
   5303       break;
   5304     case Token::ADD:
   5305       // Result could be a string or a number. Check types of inputs.
   5306       result_type = NumberInfo::IsNumber(operands_type)
   5307           ? NumberInfo::kNumber
   5308           : NumberInfo::kUnknown;
   5309       break;
   5310     case Token::SUB:
   5311     case Token::MUL:
   5312     case Token::DIV:
   5313     case Token::MOD:
   5314       // Result is always a number.
   5315       result_type = NumberInfo::kNumber;
   5316       break;
   5317     default:
   5318       UNREACHABLE();
   5319   }
   5320   answer.set_number_info(result_type);
   5321   frame_->Push(&answer);
   5322 }
   5323 
   5324 
   5325 // Emit a LoadIC call to get the value from receiver and leave it in
   5326 // dst.  The receiver register is restored after the call.
   5327 class DeferredReferenceGetNamedValue: public DeferredCode {
   5328  public:
   5329   DeferredReferenceGetNamedValue(Register dst,
   5330                                  Register receiver,
   5331                                  Handle<String> name)
   5332       : dst_(dst), receiver_(receiver),  name_(name) {
   5333     set_comment("[ DeferredReferenceGetNamedValue");
   5334   }
   5335 
   5336   virtual void Generate();
   5337 
   5338   Label* patch_site() { return &patch_site_; }
   5339 
   5340  private:
   5341   Label patch_site_;
   5342   Register dst_;
   5343   Register receiver_;
   5344   Handle<String> name_;
   5345 };
   5346 
   5347 
   5348 void DeferredReferenceGetNamedValue::Generate() {
   5349   __ push(receiver_);
   5350   __ Move(rcx, name_);
   5351   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   5352   __ Call(ic, RelocInfo::CODE_TARGET);
   5353   // The call must be followed by a test rax instruction to indicate
   5354   // that the inobject property case was inlined.
   5355   //
   5356   // Store the delta to the map check instruction here in the test
   5357   // instruction.  Use masm_-> instead of the __ macro since the
   5358   // latter can't return a value.
   5359   int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
   5360   // Here we use masm_-> instead of the __ macro because this is the
   5361   // instruction that gets patched and coverage code gets in the way.
   5362   masm_->testl(rax, Immediate(-delta_to_patch_site));
   5363   __ IncrementCounter(&Counters::named_load_inline_miss, 1);
   5364 
   5365   if (!dst_.is(rax)) __ movq(dst_, rax);
   5366   __ pop(receiver_);
   5367 }
   5368 
   5369 
   5370 void DeferredInlineSmiAdd::Generate() {
   5371   GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
   5372   igostub.GenerateCall(masm_, dst_, value_);
   5373   if (!dst_.is(rax)) __ movq(dst_, rax);
   5374 }
   5375 
   5376 
   5377 void DeferredInlineSmiAddReversed::Generate() {
   5378   GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
   5379   igostub.GenerateCall(masm_, value_, dst_);
   5380   if (!dst_.is(rax)) __ movq(dst_, rax);
   5381 }
   5382 
   5383 
   5384 void DeferredInlineSmiSub::Generate() {
   5385   GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
   5386   igostub.GenerateCall(masm_, dst_, value_);
   5387   if (!dst_.is(rax)) __ movq(dst_, rax);
   5388 }
   5389 
   5390 
   5391 void DeferredInlineSmiOperation::Generate() {
   5392   // For mod we don't generate all the Smi code inline.
   5393   GenericBinaryOpStub stub(
   5394       op_,
   5395       overwrite_mode_,
   5396       (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
   5397   stub.GenerateCall(masm_, src_, value_);
   5398   if (!dst_.is(rax)) __ movq(dst_, rax);
   5399 }
   5400 
   5401 
   5402 Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
   5403                                                  Result* operand,
   5404                                                  Handle<Object> value,
   5405                                                  StaticType* type,
   5406                                                  bool reversed,
   5407                                                  OverwriteMode overwrite_mode) {
   5408   // NOTE: This is an attempt to inline (a bit) more of the code for
   5409   // some possible smi operations (like + and -) when (at least) one
   5410   // of the operands is a constant smi.
   5411   // Consumes the argument "operand".
   5412 
   5413   // TODO(199): Optimize some special cases of operations involving a
   5414   // smi literal (multiply by 2, shift by 0, etc.).
   5415   if (IsUnsafeSmi(value)) {
   5416     Result unsafe_operand(value);
   5417     if (reversed) {
   5418       return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
   5419                                overwrite_mode);
   5420     } else {
   5421       return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
   5422                                overwrite_mode);
   5423     }
   5424   }
   5425 
   5426   // Get the literal value.
   5427   Smi* smi_value = Smi::cast(*value);
   5428   int int_value = smi_value->value();
   5429 
   5430   Result answer;
   5431   switch (op) {
   5432     case Token::ADD: {
   5433       operand->ToRegister();
   5434       frame_->Spill(operand->reg());
   5435       DeferredCode* deferred = NULL;
   5436       if (reversed) {
   5437         deferred = new DeferredInlineSmiAddReversed(operand->reg(),
   5438                                                     smi_value,
   5439                                                     overwrite_mode);
   5440       } else {
   5441         deferred = new DeferredInlineSmiAdd(operand->reg(),
   5442                                             smi_value,
   5443                                             overwrite_mode);
   5444       }
   5445       __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
   5446       __ SmiAddConstant(operand->reg(),
   5447                         operand->reg(),
   5448                         smi_value,
   5449                         deferred->entry_label());
   5450       deferred->BindExit();
   5451       answer = *operand;
   5452       break;
   5453     }
   5454 
   5455     case Token::SUB: {
   5456       if (reversed) {
   5457         Result constant_operand(value);
   5458         answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
   5459                                           overwrite_mode);
   5460       } else {
   5461         operand->ToRegister();
   5462         frame_->Spill(operand->reg());
   5463         DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
   5464                                                           smi_value,
   5465                                                           overwrite_mode);
   5466         __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
   5467         // A smi currently fits in a 32-bit Immediate.
   5468         __ SmiSubConstant(operand->reg(),
   5469                           operand->reg(),
   5470                           smi_value,
   5471                           deferred->entry_label());
   5472         deferred->BindExit();
   5473         answer = *operand;
   5474       }
   5475       break;
   5476     }
   5477 
   5478     case Token::SAR:
   5479       if (reversed) {
   5480         Result constant_operand(value);
   5481         answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
   5482                                           overwrite_mode);
   5483       } else {
   5484         // Only the least significant 5 bits of the shift value are used.
   5485         // In the slow case, this masking is done inside the runtime call.
   5486         int shift_value = int_value & 0x1f;
   5487         operand->ToRegister();
   5488         frame_->Spill(operand->reg());
   5489         DeferredInlineSmiOperation* deferred =
   5490             new DeferredInlineSmiOperation(op,
   5491                                            operand->reg(),
   5492                                            operand->reg(),
   5493                                            smi_value,
   5494                                            overwrite_mode);
   5495         __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
   5496         __ SmiShiftArithmeticRightConstant(operand->reg(),
   5497                                            operand->reg(),
   5498                                            shift_value);
   5499         deferred->BindExit();
   5500         answer = *operand;
   5501       }
   5502       break;
   5503 
   5504     case Token::SHR:
   5505       if (reversed) {
   5506         Result constant_operand(value);
   5507         answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
   5508                                           overwrite_mode);
   5509       } else {
   5510         // Only the least significant 5 bits of the shift value are used.
   5511         // In the slow case, this masking is done inside the runtime call.
   5512         int shift_value = int_value & 0x1f;
   5513         operand->ToRegister();
   5514         answer = allocator()->Allocate();
   5515         ASSERT(answer.is_valid());
   5516         DeferredInlineSmiOperation* deferred =
   5517             new DeferredInlineSmiOperation(op,
   5518                                            answer.reg(),
   5519                                            operand->reg(),
   5520                                            smi_value,
   5521                                            overwrite_mode);
   5522         __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
   5523         __ SmiShiftLogicalRightConstant(answer.reg(),
   5524                                         operand->reg(),
   5525                                         shift_value,
   5526                                         deferred->entry_label());
   5527         deferred->BindExit();
   5528         operand->Unuse();
   5529       }
   5530       break;
   5531 
   5532     case Token::SHL:
   5533       if (reversed) {
   5534         Result constant_operand(value);
   5535         answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
   5536                                           overwrite_mode);
   5537       } else {
   5538         // Only the least significant 5 bits of the shift value are used.
   5539         // In the slow case, this masking is done inside the runtime call.
   5540         int shift_value = int_value & 0x1f;
   5541         operand->ToRegister();
   5542         if (shift_value == 0) {
   5543           // Spill operand so it can be overwritten in the slow case.
   5544           frame_->Spill(operand->reg());
   5545           DeferredInlineSmiOperation* deferred =
   5546               new DeferredInlineSmiOperation(op,
   5547                                              operand->reg(),
   5548                                              operand->reg(),
   5549                                              smi_value,
   5550                                              overwrite_mode);
   5551           __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
   5552           deferred->BindExit();
   5553           answer = *operand;
   5554         } else {
   5555           // Use a fresh temporary for nonzero shift values.
   5556           answer = allocator()->Allocate();
   5557           ASSERT(answer.is_valid());
   5558           DeferredInlineSmiOperation* deferred =
   5559               new DeferredInlineSmiOperation(op,
   5560                                              answer.reg(),
   5561                                              operand->reg(),
   5562                                              smi_value,
   5563                                              overwrite_mode);
   5564           __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
   5565           __ SmiShiftLeftConstant(answer.reg(),
   5566                                   operand->reg(),
   5567                                   shift_value,
   5568                                   deferred->entry_label());
   5569           deferred->BindExit();
   5570           operand->Unuse();
   5571         }
   5572       }
   5573       break;
   5574 
   5575     case Token::BIT_OR:
   5576     case Token::BIT_XOR:
   5577     case Token::BIT_AND: {
   5578       operand->ToRegister();
   5579       frame_->Spill(operand->reg());
   5580       if (reversed) {
   5581         // Bit operations with a constant smi are commutative.
   5582         // We can swap left and right operands with no problem.
   5583         // Swap left and right overwrite modes.  0->0, 1->2, 2->1.
   5584         overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
   5585       }
   5586       DeferredCode* deferred =  new DeferredInlineSmiOperation(op,
   5587                                                                operand->reg(),
   5588                                                                operand->reg(),
   5589                                                                smi_value,
   5590                                                                overwrite_mode);
   5591       __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
   5592       if (op == Token::BIT_AND) {
   5593         __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
   5594       } else if (op == Token::BIT_XOR) {
   5595         if (int_value != 0) {
   5596           __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
   5597         }
   5598       } else {
   5599         ASSERT(op == Token::BIT_OR);
   5600         if (int_value != 0) {
   5601           __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
   5602         }
   5603       }
   5604       deferred->BindExit();
   5605       answer = *operand;
   5606       break;
   5607     }
   5608 
   5609     // Generate inline code for mod of powers of 2 and negative powers of 2.
   5610     case Token::MOD:
   5611       if (!reversed &&
   5612           int_value != 0 &&
   5613           (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
   5614         operand->ToRegister();
   5615         frame_->Spill(operand->reg());
   5616         DeferredCode* deferred =
   5617             new DeferredInlineSmiOperation(op,
   5618                                            operand->reg(),
   5619                                            operand->reg(),
   5620                                            smi_value,
   5621                                            overwrite_mode);
   5622         // Check for negative or non-Smi left hand side.
   5623         __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
   5624         if (int_value < 0) int_value = -int_value;
   5625         if (int_value == 1) {
   5626           __ Move(operand->reg(), Smi::FromInt(0));
   5627         } else {
   5628           __ SmiAndConstant(operand->reg(),
   5629                             operand->reg(),
   5630                             Smi::FromInt(int_value - 1));
   5631         }
   5632         deferred->BindExit();
   5633         answer = *operand;
   5634         break;  // This break only applies if we generated code for MOD.
   5635       }
   5636       // Fall through if we did not find a power of 2 on the right hand side!
   5637       // The next case must be the default.
   5638 
   5639     default: {
   5640       Result constant_operand(value);
   5641       if (reversed) {
   5642         answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
   5643                                           overwrite_mode);
   5644       } else {
   5645         answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
   5646                                           overwrite_mode);
   5647       }
   5648       break;
   5649     }
   5650   }
   5651   ASSERT(answer.is_valid());
   5652   return answer;
   5653 }
   5654 
   5655 Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
   5656                                                Result* left,
   5657                                                Result* right,
   5658                                                OverwriteMode overwrite_mode) {
   5659   Result answer;
   5660   // Special handling of div and mod because they use fixed registers.
   5661   if (op == Token::DIV || op == Token::MOD) {
   5662     // We need rax as the quotient register, rdx as the remainder
   5663     // register, neither left nor right in rax or rdx, and left copied
   5664     // to rax.
   5665     Result quotient;
   5666     Result remainder;
   5667     bool left_is_in_rax = false;
   5668     // Step 1: get rax for quotient.
   5669     if ((left->is_register() && left->reg().is(rax)) ||
   5670         (right->is_register() && right->reg().is(rax))) {
   5671       // One or both is in rax.  Use a fresh non-rdx register for
   5672       // them.
   5673       Result fresh = allocator_->Allocate();
   5674       ASSERT(fresh.is_valid());
   5675       if (fresh.reg().is(rdx)) {
   5676         remainder = fresh;
   5677         fresh = allocator_->Allocate();
   5678         ASSERT(fresh.is_valid());
   5679       }
   5680       if (left->is_register() && left->reg().is(rax)) {
   5681         quotient = *left;
   5682         *left = fresh;
   5683         left_is_in_rax = true;
   5684       }
   5685       if (right->is_register() && right->reg().is(rax)) {
   5686         quotient = *right;
   5687         *right = fresh;
   5688       }
   5689       __ movq(fresh.reg(), rax);
   5690     } else {
   5691       // Neither left nor right is in rax.
   5692       quotient = allocator_->Allocate(rax);
   5693     }
   5694     ASSERT(quotient.is_register() && quotient.reg().is(rax));
   5695     ASSERT(!(left->is_register() && left->reg().is(rax)));
   5696     ASSERT(!(right->is_register() && right->reg().is(rax)));
   5697 
   5698     // Step 2: get rdx for remainder if necessary.
   5699     if (!remainder.is_valid()) {
   5700       if ((left->is_register() && left->reg().is(rdx)) ||
   5701           (right->is_register() && right->reg().is(rdx))) {
   5702         Result fresh = allocator_->Allocate();
   5703         ASSERT(fresh.is_valid());
   5704         if (left->is_register() && left->reg().is(rdx)) {
   5705           remainder = *left;
   5706           *left = fresh;
   5707         }
   5708         if (right->is_register() && right->reg().is(rdx)) {
   5709           remainder = *right;
   5710           *right = fresh;
   5711         }
   5712         __ movq(fresh.reg(), rdx);
   5713       } else {
   5714         // Neither left nor right is in rdx.
   5715         remainder = allocator_->Allocate(rdx);
   5716       }
   5717     }
   5718     ASSERT(remainder.is_register() && remainder.reg().is(rdx));
   5719     ASSERT(!(left->is_register() && left->reg().is(rdx)));
   5720     ASSERT(!(right->is_register() && right->reg().is(rdx)));
   5721 
   5722     left->ToRegister();
   5723     right->ToRegister();
   5724     frame_->Spill(rax);
   5725     frame_->Spill(rdx);
   5726 
   5727     // Check that left and right are smi tagged.
   5728     DeferredInlineBinaryOperation* deferred =
   5729         new DeferredInlineBinaryOperation(op,
   5730                                           (op == Token::DIV) ? rax : rdx,
   5731                                           left->reg(),
   5732                                           right->reg(),
   5733                                           overwrite_mode);
   5734     __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
   5735 
   5736     if (op == Token::DIV) {
   5737       __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
   5738       deferred->BindExit();
   5739       left->Unuse();
   5740       right->Unuse();
   5741       answer = quotient;
   5742     } else {
   5743       ASSERT(op == Token::MOD);
   5744       __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
   5745       deferred->BindExit();
   5746       left->Unuse();
   5747       right->Unuse();
   5748       answer = remainder;
   5749     }
   5750     ASSERT(answer.is_valid());
   5751     return answer;
   5752   }
   5753 
   5754   // Special handling of shift operations because they use fixed
   5755   // registers.
   5756   if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
   5757     // Move left out of rcx if necessary.
   5758     if (left->is_register() && left->reg().is(rcx)) {
   5759       *left = allocator_->Allocate();
   5760       ASSERT(left->is_valid());
   5761       __ movq(left->reg(), rcx);
   5762     }
   5763     right->ToRegister(rcx);
   5764     left->ToRegister();
   5765     ASSERT(left->is_register() && !left->reg().is(rcx));
   5766     ASSERT(right->is_register() && right->reg().is(rcx));
   5767 
   5768     // We will modify right, it must be spilled.
   5769     frame_->Spill(rcx);
   5770 
   5771     // Use a fresh answer register to avoid spilling the left operand.
   5772     answer = allocator_->Allocate();
   5773     ASSERT(answer.is_valid());
   5774     // Check that both operands are smis using the answer register as a
   5775     // temporary.
   5776     DeferredInlineBinaryOperation* deferred =
   5777         new DeferredInlineBinaryOperation(op,
   5778                                           answer.reg(),
   5779                                           left->reg(),
   5780                                           rcx,
   5781                                           overwrite_mode);
   5782     __ movq(answer.reg(), left->reg());
   5783     __ or_(answer.reg(), rcx);
   5784     __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
   5785 
   5786     // Perform the operation.
   5787     switch (op) {
   5788       case Token::SAR:
   5789         __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
   5790         break;
   5791       case Token::SHR: {
   5792         __ SmiShiftLogicalRight(answer.reg(),
   5793                               left->reg(),
   5794                               rcx,
   5795                               deferred->entry_label());
   5796         break;
   5797       }
   5798       case Token::SHL: {
   5799         __ SmiShiftLeft(answer.reg(),
   5800                         left->reg(),
   5801                         rcx,
   5802                         deferred->entry_label());
   5803         break;
   5804       }
   5805       default:
   5806         UNREACHABLE();
   5807     }
   5808     deferred->BindExit();
   5809     left->Unuse();
   5810     right->Unuse();
   5811     ASSERT(answer.is_valid());
   5812     return answer;
   5813   }
   5814 
   5815   // Handle the other binary operations.
   5816   left->ToRegister();
   5817   right->ToRegister();
   5818   // A newly allocated register answer is used to hold the answer.  The
   5819   // registers containing left and right are not modified so they don't
   5820   // need to be spilled in the fast case.
   5821   answer = allocator_->Allocate();
   5822   ASSERT(answer.is_valid());
   5823 
   5824   // Perform the smi tag check.
   5825   DeferredInlineBinaryOperation* deferred =
   5826       new DeferredInlineBinaryOperation(op,
   5827                                         answer.reg(),
   5828                                         left->reg(),
   5829                                         right->reg(),
   5830                                         overwrite_mode);
   5831   __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
   5832 
   5833   switch (op) {
   5834     case Token::ADD:
   5835       __ SmiAdd(answer.reg(),
   5836                 left->reg(),
   5837                 right->reg(),
   5838                 deferred->entry_label());
   5839       break;
   5840 
   5841     case Token::SUB:
   5842       __ SmiSub(answer.reg(),
   5843                 left->reg(),
   5844                 right->reg(),
   5845                 deferred->entry_label());
   5846       break;
   5847 
   5848     case Token::MUL: {
   5849       __ SmiMul(answer.reg(),
   5850                 left->reg(),
   5851                 right->reg(),
   5852                 deferred->entry_label());
   5853       break;
   5854     }
   5855 
   5856     case Token::BIT_OR:
   5857       __ SmiOr(answer.reg(), left->reg(), right->reg());
   5858       break;
   5859 
   5860     case Token::BIT_AND:
   5861       __ SmiAnd(answer.reg(), left->reg(), right->reg());
   5862       break;
   5863 
   5864     case Token::BIT_XOR:
   5865       __ SmiXor(answer.reg(), left->reg(), right->reg());
   5866       break;
   5867 
   5868     default:
   5869       UNREACHABLE();
   5870       break;
   5871   }
   5872   deferred->BindExit();
   5873   left->Unuse();
   5874   right->Unuse();
   5875   ASSERT(answer.is_valid());
   5876   return answer;
   5877 }
   5878 
   5879 
   5880 Result CodeGenerator::EmitKeyedLoad(bool is_global) {
   5881   Comment cmnt(masm_, "[ Load from keyed Property");
   5882   // Inline array load code if inside of a loop.  We do not know
   5883   // the receiver map yet, so we initially generate the code with
   5884   // a check against an invalid map.  In the inline cache code, we
   5885   // patch the map check if appropriate.
   5886   if (loop_nesting() > 0) {
   5887     Comment cmnt(masm_, "[ Inlined load from keyed Property");
   5888 
   5889     Result key = frame_->Pop();
   5890     Result receiver = frame_->Pop();
   5891     key.ToRegister();
   5892     receiver.ToRegister();
   5893 
   5894     // Use a fresh temporary to load the elements without destroying
   5895     // the receiver which is needed for the deferred slow case.
   5896     Result elements = allocator()->Allocate();
   5897     ASSERT(elements.is_valid());
   5898 
   5899     // Use a fresh temporary for the index and later the loaded
   5900     // value.
   5901     Result index = allocator()->Allocate();
   5902     ASSERT(index.is_valid());
   5903 
   5904     DeferredReferenceGetKeyedValue* deferred =
   5905         new DeferredReferenceGetKeyedValue(index.reg(),
   5906                                            receiver.reg(),
   5907                                            key.reg(),
   5908                                            is_global);
   5909 
   5910     // Check that the receiver is not a smi (only needed if this
   5911     // is not a load from the global context) and that it has the
   5912     // expected map.
   5913     if (!is_global) {
   5914       __ JumpIfSmi(receiver.reg(), deferred->entry_label());
   5915     }
   5916 
   5917     // Initially, use an invalid map. The map is patched in the IC
   5918     // initialization code.
   5919     __ bind(deferred->patch_site());
   5920     // Use masm-> here instead of the double underscore macro since extra
   5921     // coverage code can interfere with the patching.  Do not use
   5922     // root array to load null_value, since it must be patched with
   5923     // the expected receiver map.
   5924     masm_->movq(kScratchRegister, Factory::null_value(),
   5925                 RelocInfo::EMBEDDED_OBJECT);
   5926     masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
   5927                 kScratchRegister);
   5928     deferred->Branch(not_equal);
   5929 
   5930     // Check that the key is a non-negative smi.
   5931     __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
   5932 
   5933     // Get the elements array from the receiver and check that it
   5934     // is not a dictionary.
   5935     __ movq(elements.reg(),
   5936             FieldOperand(receiver.reg(), JSObject::kElementsOffset));
   5937     __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
   5938            Factory::fixed_array_map());
   5939     deferred->Branch(not_equal);
   5940 
   5941     // Shift the key to get the actual index value and check that
   5942     // it is within bounds.
   5943     __ SmiToInteger32(index.reg(), key.reg());
   5944     __ cmpl(index.reg(),
   5945             FieldOperand(elements.reg(), FixedArray::kLengthOffset));
   5946     deferred->Branch(above_equal);
   5947 
   5948     // The index register holds the un-smi-tagged key. It has been
   5949     // zero-extended to 64-bits, so it can be used directly as index in the
   5950     // operand below.
   5951     // Load and check that the result is not the hole.  We could
   5952     // reuse the index or elements register for the value.
   5953     //
   5954     // TODO(206): Consider whether it makes sense to try some
   5955     // heuristic about which register to reuse.  For example, if
   5956     // one is rax, the we can reuse that one because the value
   5957     // coming from the deferred code will be in rax.
   5958     Result value = index;
   5959     __ movq(value.reg(),
   5960             Operand(elements.reg(),
   5961                     index.reg(),
   5962                     times_pointer_size,
   5963                     FixedArray::kHeaderSize - kHeapObjectTag));
   5964     elements.Unuse();
   5965     index.Unuse();
   5966     __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
   5967     deferred->Branch(equal);
   5968     __ IncrementCounter(&Counters::keyed_load_inline, 1);
   5969 
   5970     deferred->BindExit();
   5971     // Restore the receiver and key to the frame and push the
   5972     // result on top of it.
   5973     frame_->Push(&receiver);
   5974     frame_->Push(&key);
   5975     return value;
   5976 
   5977   } else {
   5978     Comment cmnt(masm_, "[ Load from keyed Property");
   5979     RelocInfo::Mode mode = is_global
   5980         ? RelocInfo::CODE_TARGET_CONTEXT
   5981         : RelocInfo::CODE_TARGET;
   5982     Result answer = frame_->CallKeyedLoadIC(mode);
   5983     // Make sure that we do not have a test instruction after the
   5984     // call.  A test instruction after the call is used to
   5985     // indicate that we have generated an inline version of the
   5986     // keyed load.  The explicit nop instruction is here because
   5987     // the push that follows might be peep-hole optimized away.
   5988     __ nop();
   5989     return answer;
   5990   }
   5991 }
   5992 
   5993 
   5994 #undef __
   5995 #define __ ACCESS_MASM(masm)
   5996 
   5997 
   5998 Handle<String> Reference::GetName() {
   5999   ASSERT(type_ == NAMED);
   6000   Property* property = expression_->AsProperty();
   6001   if (property == NULL) {
   6002     // Global variable reference treated as a named property reference.
   6003     VariableProxy* proxy = expression_->AsVariableProxy();
   6004     ASSERT(proxy->AsVariable() != NULL);
   6005     ASSERT(proxy->AsVariable()->is_global());
   6006     return proxy->name();
   6007   } else {
   6008     Literal* raw_name = property->key()->AsLiteral();
   6009     ASSERT(raw_name != NULL);
   6010     return Handle<String>(String::cast(*raw_name->handle()));
   6011   }
   6012 }
   6013 
   6014 
   6015 void Reference::GetValue() {
   6016   ASSERT(!cgen_->in_spilled_code());
   6017   ASSERT(cgen_->HasValidEntryRegisters());
   6018   ASSERT(!is_illegal());
   6019   MacroAssembler* masm = cgen_->masm();
   6020 
   6021   // Record the source position for the property load.
   6022   Property* property = expression_->AsProperty();
   6023   if (property != NULL) {
   6024     cgen_->CodeForSourcePosition(property->position());
   6025   }
   6026 
   6027   switch (type_) {
   6028     case SLOT: {
   6029       Comment cmnt(masm, "[ Load from Slot");
   6030       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
   6031       ASSERT(slot != NULL);
   6032       cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
   6033       break;
   6034     }
   6035 
   6036     case NAMED: {
   6037       Variable* var = expression_->AsVariableProxy()->AsVariable();
   6038       bool is_global = var != NULL;
   6039       ASSERT(!is_global || var->is_global());
   6040 
   6041       // Do not inline the inobject property case for loads from the global
   6042       // object.  Also do not inline for unoptimized code.  This saves time
   6043       // in the code generator.  Unoptimized code is toplevel code or code
   6044       // that is not in a loop.
   6045       if (is_global ||
   6046           cgen_->scope()->is_global_scope() ||
   6047           cgen_->loop_nesting() == 0) {
   6048         Comment cmnt(masm, "[ Load from named Property");
   6049         cgen_->frame()->Push(GetName());
   6050 
   6051         RelocInfo::Mode mode = is_global
   6052                                ? RelocInfo::CODE_TARGET_CONTEXT
   6053                                : RelocInfo::CODE_TARGET;
   6054         Result answer = cgen_->frame()->CallLoadIC(mode);
   6055         // A test rax instruction following the call signals that the
   6056         // inobject property case was inlined.  Ensure that there is not
   6057         // a test rax instruction here.
   6058         __ nop();
   6059         cgen_->frame()->Push(&answer);
   6060       } else {
   6061         // Inline the inobject property case.
   6062         Comment cmnt(masm, "[ Inlined named property load");
   6063         Result receiver = cgen_->frame()->Pop();
   6064         receiver.ToRegister();
   6065         Result value = cgen_->allocator()->Allocate();
   6066         ASSERT(value.is_valid());
   6067         // Cannot use r12 for receiver, because that changes
   6068         // the distance between a call and a fixup location,
   6069         // due to a special encoding of r12 as r/m in a ModR/M byte.
   6070         if (receiver.reg().is(r12)) {
   6071           // Swap receiver and value.
   6072           __ movq(value.reg(), receiver.reg());
   6073           Result temp = receiver;
   6074           receiver = value;
   6075           value = temp;
   6076           cgen_->frame()->Spill(value.reg());  // r12 may have been shared.
   6077         }
   6078 
   6079         DeferredReferenceGetNamedValue* deferred =
   6080             new DeferredReferenceGetNamedValue(value.reg(),
   6081                                                receiver.reg(),
   6082                                                GetName());
   6083 
   6084         // Check that the receiver is a heap object.
   6085         __ JumpIfSmi(receiver.reg(), deferred->entry_label());
   6086 
   6087         __ bind(deferred->patch_site());
   6088         // This is the map check instruction that will be patched (so we can't
   6089         // use the double underscore macro that may insert instructions).
   6090         // Initially use an invalid map to force a failure.
   6091         masm->Move(kScratchRegister, Factory::null_value());
   6092         masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
   6093                    kScratchRegister);
   6094         // This branch is always a forwards branch so it's always a fixed
   6095         // size which allows the assert below to succeed and patching to work.
   6096         // Don't use deferred->Branch(...), since that might add coverage code.
   6097         masm->j(not_equal, deferred->entry_label());
   6098 
   6099         // The delta from the patch label to the load offset must be
   6100         // statically known.
   6101         ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
   6102                LoadIC::kOffsetToLoadInstruction);
   6103         // The initial (invalid) offset has to be large enough to force
   6104         // a 32-bit instruction encoding to allow patching with an
   6105         // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
   6106         int offset = kMaxInt;
   6107         masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
   6108 
   6109         __ IncrementCounter(&Counters::named_load_inline, 1);
   6110         deferred->BindExit();
   6111         cgen_->frame()->Push(&receiver);
   6112         cgen_->frame()->Push(&value);
   6113       }
   6114       break;
   6115     }
   6116 
   6117     case KEYED: {
   6118       Comment cmnt(masm, "[ Load from keyed Property");
   6119       Variable* var = expression_->AsVariableProxy()->AsVariable();
   6120       bool is_global = var != NULL;
   6121       ASSERT(!is_global || var->is_global());
   6122 
   6123       Result value = cgen_->EmitKeyedLoad(is_global);
   6124       cgen_->frame()->Push(&value);
   6125       break;
   6126     }
   6127 
   6128     default:
   6129       UNREACHABLE();
   6130   }
   6131 
   6132   if (!persist_after_get_) {
   6133     cgen_->UnloadReference(this);
   6134   }
   6135 }
   6136 
   6137 
   6138 void Reference::TakeValue() {
   6139   // TODO(X64): This function is completely architecture independent. Move
   6140   // it somewhere shared.
   6141 
   6142   // For non-constant frame-allocated slots, we invalidate the value in the
   6143   // slot.  For all others, we fall back on GetValue.
   6144   ASSERT(!cgen_->in_spilled_code());
   6145   ASSERT(!is_illegal());
   6146   if (type_ != SLOT) {
   6147     GetValue();
   6148     return;
   6149   }
   6150 
   6151   Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
   6152   ASSERT(slot != NULL);
   6153   if (slot->type() == Slot::LOOKUP ||
   6154       slot->type() == Slot::CONTEXT ||
   6155       slot->var()->mode() == Variable::CONST ||
   6156       slot->is_arguments()) {
   6157     GetValue();
   6158     return;
   6159   }
   6160 
   6161   // Only non-constant, frame-allocated parameters and locals can reach
   6162   // here.  Be careful not to use the optimizations for arguments
   6163   // object access since it may not have been initialized yet.
   6164   ASSERT(!slot->is_arguments());
   6165   if (slot->type() == Slot::PARAMETER) {
   6166     cgen_->frame()->TakeParameterAt(slot->index());
   6167   } else {
   6168     ASSERT(slot->type() == Slot::LOCAL);
   6169     cgen_->frame()->TakeLocalAt(slot->index());
   6170   }
   6171 
   6172   ASSERT(persist_after_get_);
   6173   // Do not unload the reference, because it is used in SetValue.
   6174 }
   6175 
   6176 
   6177 void Reference::SetValue(InitState init_state) {
   6178   ASSERT(cgen_->HasValidEntryRegisters());
   6179   ASSERT(!is_illegal());
   6180   MacroAssembler* masm = cgen_->masm();
   6181   switch (type_) {
   6182     case SLOT: {
   6183       Comment cmnt(masm, "[ Store to Slot");
   6184       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
   6185       ASSERT(slot != NULL);
   6186       cgen_->StoreToSlot(slot, init_state);
   6187       cgen_->UnloadReference(this);
   6188       break;
   6189     }
   6190 
   6191     case NAMED: {
   6192       Comment cmnt(masm, "[ Store to named Property");
   6193       cgen_->frame()->Push(GetName());
   6194       Result answer = cgen_->frame()->CallStoreIC();
   6195       cgen_->frame()->Push(&answer);
   6196       set_unloaded();
   6197       break;
   6198     }
   6199 
   6200     case KEYED: {
   6201       Comment cmnt(masm, "[ Store to keyed Property");
   6202 
   6203       // Generate inlined version of the keyed store if the code is in
   6204       // a loop and the key is likely to be a smi.
   6205       Property* property = expression()->AsProperty();
   6206       ASSERT(property != NULL);
   6207       StaticType* key_smi_analysis = property->key()->type();
   6208 
   6209       if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
   6210         Comment cmnt(masm, "[ Inlined store to keyed Property");
   6211 
   6212         // Get the receiver, key and value into registers.
   6213         Result value = cgen_->frame()->Pop();
   6214         Result key = cgen_->frame()->Pop();
   6215         Result receiver = cgen_->frame()->Pop();
   6216 
   6217         Result tmp = cgen_->allocator_->Allocate();
   6218         ASSERT(tmp.is_valid());
   6219 
   6220         // Determine whether the value is a constant before putting it
   6221         // in a register.
   6222         bool value_is_constant = value.is_constant();
   6223 
   6224         // Make sure that value, key and receiver are in registers.
   6225         value.ToRegister();
   6226         key.ToRegister();
   6227         receiver.ToRegister();
   6228 
   6229         DeferredReferenceSetKeyedValue* deferred =
   6230             new DeferredReferenceSetKeyedValue(value.reg(),
   6231                                                key.reg(),
   6232                                                receiver.reg());
   6233 
   6234         // Check that the value is a smi if it is not a constant.
   6235         // We can skip the write barrier for smis and constants.
   6236         if (!value_is_constant) {
   6237           __ JumpIfNotSmi(value.reg(), deferred->entry_label());
   6238         }
   6239 
   6240         // Check that the key is a non-negative smi.
   6241         __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
   6242 
   6243         // Check that the receiver is not a smi.
   6244         __ JumpIfSmi(receiver.reg(), deferred->entry_label());
   6245 
   6246         // Check that the receiver is a JSArray.
   6247         __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
   6248         deferred->Branch(not_equal);
   6249 
   6250         // Check that the key is within bounds.  Both the key and the
   6251         // length of the JSArray are smis.
   6252         __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
   6253                       key.reg());
   6254         deferred->Branch(less_equal);
   6255 
   6256         // Get the elements array from the receiver and check that it
   6257         // is a flat array (not a dictionary).
   6258         __ movq(tmp.reg(),
   6259                 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
   6260         // Bind the deferred code patch site to be able to locate the
   6261         // fixed array map comparison.  When debugging, we patch this
   6262         // comparison to always fail so that we will hit the IC call
   6263         // in the deferred code which will allow the debugger to
   6264         // break for fast case stores.
   6265         __ bind(deferred->patch_site());
   6266         // Avoid using __ to ensure the distance from patch_site
   6267         // to the map address is always the same.
   6268         masm->movq(kScratchRegister, Factory::fixed_array_map(),
   6269                    RelocInfo::EMBEDDED_OBJECT);
   6270         __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
   6271                 kScratchRegister);
   6272         deferred->Branch(not_equal);
   6273 
   6274         // Store the value.
   6275         SmiIndex index =
   6276             masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
   6277               __ movq(Operand(tmp.reg(),
   6278                         index.reg,
   6279                         index.scale,
   6280                         FixedArray::kHeaderSize - kHeapObjectTag),
   6281                 value.reg());
   6282         __ IncrementCounter(&Counters::keyed_store_inline, 1);
   6283 
   6284         deferred->BindExit();
   6285 
   6286         cgen_->frame()->Push(&receiver);
   6287         cgen_->frame()->Push(&key);
   6288         cgen_->frame()->Push(&value);
   6289       } else {
   6290         Result answer = cgen_->frame()->CallKeyedStoreIC();
   6291         // Make sure that we do not have a test instruction after the
   6292         // call.  A test instruction after the call is used to
   6293         // indicate that we have generated an inline version of the
   6294         // keyed store.
   6295         masm->nop();
   6296         cgen_->frame()->Push(&answer);
   6297       }
   6298       cgen_->UnloadReference(this);
   6299       break;
   6300     }
   6301 
   6302     default:
   6303       UNREACHABLE();
   6304   }
   6305 }
   6306 
   6307 
   6308 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   6309   // Clone the boilerplate in new space. Set the context to the
   6310   // current context in rsi.
   6311   Label gc;
   6312   __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
   6313 
   6314   // Get the boilerplate function from the stack.
   6315   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
   6316 
   6317   // Compute the function map in the current global context and set that
   6318   // as the map of the allocated object.
   6319   __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   6320   __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
   6321   __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
   6322   __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
   6323 
   6324   // Clone the rest of the boilerplate fields. We don't have to update
   6325   // the write barrier because the allocated object is in new space.
   6326   for (int offset = kPointerSize;
   6327        offset < JSFunction::kSize;
   6328        offset += kPointerSize) {
   6329     if (offset == JSFunction::kContextOffset) {
   6330       __ movq(FieldOperand(rax, offset), rsi);
   6331     } else {
   6332       __ movq(rbx, FieldOperand(rdx, offset));
   6333       __ movq(FieldOperand(rax, offset), rbx);
   6334     }
   6335   }
   6336 
   6337   // Return and remove the on-stack parameter.
   6338   __ ret(1 * kPointerSize);
   6339 
   6340   // Create a new closure through the slower runtime call.
   6341   __ bind(&gc);
   6342   __ pop(rcx);  // Temporarily remove return address.
   6343   __ pop(rdx);
   6344   __ push(rsi);
   6345   __ push(rdx);
   6346   __ push(rcx);  // Restore return address.
   6347   __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
   6348 }
   6349 
   6350 
   6351 void FastNewContextStub::Generate(MacroAssembler* masm) {
   6352   // Try to allocate the context in new space.
   6353   Label gc;
   6354   int length = slots_ + Context::MIN_CONTEXT_SLOTS;
   6355   __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
   6356                         rax, rbx, rcx, &gc, TAG_OBJECT);
   6357 
   6358   // Get the function from the stack.
   6359   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
   6360 
   6361   // Setup the object header.
   6362   __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
   6363   __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
   6364   __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
   6365 
   6366   // Setup the fixed slots.
   6367   __ xor_(rbx, rbx);  // Set to NULL.
   6368   __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
   6369   __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
   6370   __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
   6371   __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
   6372 
   6373   // Copy the global object from the surrounding context.
   6374   __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   6375   __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
   6376 
   6377   // Initialize the rest of the slots to undefined.
   6378   __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
   6379   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
   6380     __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
   6381   }
   6382 
   6383   // Return and remove the on-stack parameter.
   6384   __ movq(rsi, rax);
   6385   __ ret(1 * kPointerSize);
   6386 
   6387   // Need to collect. Call into runtime system.
   6388   __ bind(&gc);
   6389   __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
   6390 }
   6391 
   6392 
   6393 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   6394   // Stack layout on entry:
   6395   //
   6396   // [rsp + kPointerSize]: constant elements.
   6397   // [rsp + (2 * kPointerSize)]: literal index.
   6398   // [rsp + (3 * kPointerSize)]: literals array.
   6399 
   6400   // All sizes here are multiples of kPointerSize.
   6401   int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
   6402   int size = JSArray::kSize + elements_size;
   6403 
   6404   // Load boilerplate object into rcx and check if we need to create a
   6405   // boilerplate.
   6406   Label slow_case;
   6407   __ movq(rcx, Operand(rsp, 3 * kPointerSize));
   6408   __ movq(rax, Operand(rsp, 2 * kPointerSize));
   6409   SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
   6410   __ movq(rcx,
   6411           FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
   6412   __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
   6413   __ j(equal, &slow_case);
   6414 
   6415   // Allocate both the JS array and the elements array in one big
   6416   // allocation. This avoids multiple limit checks.
   6417   __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
   6418 
   6419   // Copy the JS array part.
   6420   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
   6421     if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
   6422       __ movq(rbx, FieldOperand(rcx, i));
   6423       __ movq(FieldOperand(rax, i), rbx);
   6424     }
   6425   }
   6426 
   6427   if (length_ > 0) {
   6428     // Get hold of the elements array of the boilerplate and setup the
   6429     // elements pointer in the resulting object.
   6430     __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
   6431     __ lea(rdx, Operand(rax, JSArray::kSize));
   6432     __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
   6433 
   6434     // Copy the elements array.
   6435     for (int i = 0; i < elements_size; i += kPointerSize) {
   6436       __ movq(rbx, FieldOperand(rcx, i));
   6437       __ movq(FieldOperand(rdx, i), rbx);
   6438     }
   6439   }
   6440 
   6441   // Return and remove the on-stack parameters.
   6442   __ ret(3 * kPointerSize);
   6443 
   6444   __ bind(&slow_case);
   6445   ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
   6446   __ TailCallRuntime(runtime, 3, 1);
   6447 }
   6448 
   6449 
   6450 void ToBooleanStub::Generate(MacroAssembler* masm) {
   6451   Label false_result, true_result, not_string;
   6452   __ movq(rax, Operand(rsp, 1 * kPointerSize));
   6453 
   6454   // 'null' => false.
   6455   __ CompareRoot(rax, Heap::kNullValueRootIndex);
   6456   __ j(equal, &false_result);
   6457 
   6458   // Get the map and type of the heap object.
   6459   // We don't use CmpObjectType because we manipulate the type field.
   6460   __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
   6461   __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
   6462 
   6463   // Undetectable => false.
   6464   __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
   6465   __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
   6466   __ j(not_zero, &false_result);
   6467 
   6468   // JavaScript object => true.
   6469   __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
   6470   __ j(above_equal, &true_result);
   6471 
   6472   // String value => false iff empty.
   6473   __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
   6474   __ j(above_equal, &not_string);
   6475   __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
   6476   __ testl(rdx, rdx);
   6477   __ j(zero, &false_result);
   6478   __ jmp(&true_result);
   6479 
   6480   __ bind(&not_string);
   6481   // HeapNumber => false iff +0, -0, or NaN.
   6482   // These three cases set C3 when compared to zero in the FPU.
   6483   __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
   6484   __ j(not_equal, &true_result);
   6485   __ fldz();  // Load zero onto fp stack
   6486   // Load heap-number double value onto fp stack
   6487   __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
   6488   __ FCmp();
   6489   __ j(zero, &false_result);
   6490   // Fall through to |true_result|.
   6491 
   6492   // Return 1/0 for true/false in rax.
   6493   __ bind(&true_result);
   6494   __ movq(rax, Immediate(1));
   6495   __ ret(1 * kPointerSize);
   6496   __ bind(&false_result);
   6497   __ xor_(rax, rax);
   6498   __ ret(1 * kPointerSize);
   6499 }
   6500 
   6501 
   6502 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
   6503   Object* answer_object = Heap::undefined_value();
   6504   switch (op) {
   6505     case Token::ADD:
   6506       // Use intptr_t to detect overflow of 32-bit int.
   6507       if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
   6508         answer_object = Smi::FromInt(left + right);
   6509       }
   6510       break;
   6511     case Token::SUB:
   6512       // Use intptr_t to detect overflow of 32-bit int.
   6513       if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
   6514         answer_object = Smi::FromInt(left - right);
   6515       }
   6516       break;
   6517     case Token::MUL: {
   6518         double answer = static_cast<double>(left) * right;
   6519         if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
   6520           // If the product is zero and the non-zero factor is negative,
   6521           // the spec requires us to return floating point negative zero.
   6522           if (answer != 0 || (left + right) >= 0) {
   6523             answer_object = Smi::FromInt(static_cast<int>(answer));
   6524           }
   6525         }
   6526       }
   6527       break;
   6528     case Token::DIV:
   6529     case Token::MOD:
   6530       break;
   6531     case Token::BIT_OR:
   6532       answer_object = Smi::FromInt(left | right);
   6533       break;
   6534     case Token::BIT_AND:
   6535       answer_object = Smi::FromInt(left & right);
   6536       break;
   6537     case Token::BIT_XOR:
   6538       answer_object = Smi::FromInt(left ^ right);
   6539       break;
   6540 
   6541     case Token::SHL: {
   6542         int shift_amount = right & 0x1F;
   6543         if (Smi::IsValid(left << shift_amount)) {
   6544           answer_object = Smi::FromInt(left << shift_amount);
   6545         }
   6546         break;
   6547       }
   6548     case Token::SHR: {
   6549         int shift_amount = right & 0x1F;
   6550         unsigned int unsigned_left = left;
   6551         unsigned_left >>= shift_amount;
   6552         if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
   6553           answer_object = Smi::FromInt(unsigned_left);
   6554         }
   6555         break;
   6556       }
   6557     case Token::SAR: {
   6558         int shift_amount = right & 0x1F;
   6559         unsigned int unsigned_left = left;
   6560         if (left < 0) {
   6561           // Perform arithmetic shift of a negative number by
   6562           // complementing number, logical shifting, complementing again.
   6563           unsigned_left = ~unsigned_left;
   6564           unsigned_left >>= shift_amount;
   6565           unsigned_left = ~unsigned_left;
   6566         } else {
   6567           unsigned_left >>= shift_amount;
   6568         }
   6569         ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
   6570         answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
   6571         break;
   6572       }
   6573     default:
   6574       UNREACHABLE();
   6575       break;
   6576   }
   6577   if (answer_object == Heap::undefined_value()) {
   6578     return false;
   6579   }
   6580   frame_->Push(Handle<Object>(answer_object));
   6581   return true;
   6582 }
   6583 
   6584 
   6585 // End of CodeGenerator implementation.
   6586 
   6587 // Get the integer part of a heap number.  Surprisingly, all this bit twiddling
   6588 // is faster than using the built-in instructions on floating point registers.
   6589 // Trashes rdi and rbx.  Dest is rcx.  Source cannot be rcx or one of the
   6590 // trashed registers.
   6591 void IntegerConvert(MacroAssembler* masm,
   6592                     Register source,
   6593                     bool use_sse3,
   6594                     Label* conversion_failure) {
   6595   ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
   6596   Label done, right_exponent, normal_exponent;
   6597   Register scratch = rbx;
   6598   Register scratch2 = rdi;
   6599   // Get exponent word.
   6600   __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
   6601   // Get exponent alone in scratch2.
   6602   __ movl(scratch2, scratch);
   6603   __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
   6604   if (use_sse3) {
   6605     CpuFeatures::Scope scope(SSE3);
   6606     // Check whether the exponent is too big for a 64 bit signed integer.
   6607     static const uint32_t kTooBigExponent =
   6608         (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
   6609     __ cmpl(scratch2, Immediate(kTooBigExponent));
   6610     __ j(greater_equal, conversion_failure);
   6611     // Load x87 register with heap number.
   6612     __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
   6613     // Reserve space for 64 bit answer.
   6614     __ subq(rsp, Immediate(sizeof(uint64_t)));  // Nolint.
   6615     // Do conversion, which cannot fail because we checked the exponent.
   6616     __ fisttp_d(Operand(rsp, 0));
   6617     __ movl(rcx, Operand(rsp, 0));  // Load low word of answer into rcx.
   6618     __ addq(rsp, Immediate(sizeof(uint64_t)));  // Nolint.
   6619   } else {
   6620     // Load rcx with zero.  We use this either for the final shift or
   6621     // for the answer.
   6622     __ xor_(rcx, rcx);
   6623     // Check whether the exponent matches a 32 bit signed int that cannot be
   6624     // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
   6625     // exponent is 30 (biased).  This is the exponent that we are fastest at and
   6626     // also the highest exponent we can handle here.
   6627     const uint32_t non_smi_exponent =
   6628         (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
   6629     __ cmpl(scratch2, Immediate(non_smi_exponent));
   6630     // If we have a match of the int32-but-not-Smi exponent then skip some
   6631     // logic.
   6632     __ j(equal, &right_exponent);
   6633     // If the exponent is higher than that then go to slow case.  This catches
   6634     // numbers that don't fit in a signed int32, infinities and NaNs.
   6635     __ j(less, &normal_exponent);
   6636 
   6637     {
   6638       // Handle a big exponent.  The only reason we have this code is that the
   6639       // >>> operator has a tendency to generate numbers with an exponent of 31.
   6640       const uint32_t big_non_smi_exponent =
   6641           (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
   6642       __ cmpl(scratch2, Immediate(big_non_smi_exponent));
   6643       __ j(not_equal, conversion_failure);
   6644       // We have the big exponent, typically from >>>.  This means the number is
   6645       // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
   6646       __ movl(scratch2, scratch);
   6647       __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
   6648       // Put back the implicit 1.
   6649       __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
   6650       // Shift up the mantissa bits to take up the space the exponent used to
   6651       // take. We just orred in the implicit bit so that took care of one and
   6652       // we want to use the full unsigned range so we subtract 1 bit from the
   6653       // shift distance.
   6654       const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
   6655       __ shl(scratch2, Immediate(big_shift_distance));
   6656       // Get the second half of the double.
   6657       __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
   6658       // Shift down 21 bits to get the most significant 11 bits or the low
   6659       // mantissa word.
   6660       __ shr(rcx, Immediate(32 - big_shift_distance));
   6661       __ or_(rcx, scratch2);
   6662       // We have the answer in rcx, but we may need to negate it.
   6663       __ testl(scratch, scratch);
   6664       __ j(positive, &done);
   6665       __ neg(rcx);
   6666       __ jmp(&done);
   6667     }
   6668 
   6669     __ bind(&normal_exponent);
   6670     // Exponent word in scratch, exponent part of exponent word in scratch2.
   6671     // Zero in rcx.
   6672     // We know the exponent is smaller than 30 (biased).  If it is less than
   6673     // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
   6674     // it rounds to zero.
   6675     const uint32_t zero_exponent =
   6676         (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
   6677     __ subl(scratch2, Immediate(zero_exponent));
   6678     // rcx already has a Smi zero.
   6679     __ j(less, &done);
   6680 
   6681     // We have a shifted exponent between 0 and 30 in scratch2.
   6682     __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
   6683     __ movl(rcx, Immediate(30));
   6684     __ subl(rcx, scratch2);
   6685 
   6686     __ bind(&right_exponent);
   6687     // Here rcx is the shift, scratch is the exponent word.
   6688     // Get the top bits of the mantissa.
   6689     __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
   6690     // Put back the implicit 1.
   6691     __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
   6692     // Shift up the mantissa bits to take up the space the exponent used to
   6693     // take. We have kExponentShift + 1 significant bits int he low end of the
   6694     // word.  Shift them to the top bits.
   6695     const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
   6696     __ shl(scratch, Immediate(shift_distance));
   6697     // Get the second half of the double. For some exponents we don't
   6698     // actually need this because the bits get shifted out again, but
   6699     // it's probably slower to test than just to do it.
   6700     __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
   6701     // Shift down 22 bits to get the most significant 10 bits or the low
   6702     // mantissa word.
   6703     __ shr(scratch2, Immediate(32 - shift_distance));
   6704     __ or_(scratch2, scratch);
   6705     // Move down according to the exponent.
   6706     __ shr_cl(scratch2);
   6707     // Now the unsigned answer is in scratch2.  We need to move it to rcx and
   6708     // we may need to fix the sign.
   6709     Label negative;
   6710     __ xor_(rcx, rcx);
   6711     __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
   6712     __ j(greater, &negative);
   6713     __ movl(rcx, scratch2);
   6714     __ jmp(&done);
   6715     __ bind(&negative);
   6716     __ subl(rcx, scratch2);
   6717     __ bind(&done);
   6718   }
   6719 }
   6720 
   6721 
   6722 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
   6723   Label slow, done;
   6724 
   6725   if (op_ == Token::SUB) {
   6726     // Check whether the value is a smi.
   6727     Label try_float;
   6728     __ JumpIfNotSmi(rax, &try_float);
   6729 
   6730     // Enter runtime system if the value of the smi is zero
   6731     // to make sure that we switch between 0 and -0.
   6732     // Also enter it if the value of the smi is Smi::kMinValue.
   6733     __ SmiNeg(rax, rax, &done);
   6734 
   6735     // Either zero or Smi::kMinValue, neither of which become a smi when
   6736     // negated.
   6737     __ SmiCompare(rax, Smi::FromInt(0));
   6738     __ j(not_equal, &slow);
   6739     __ Move(rax, Factory::minus_zero_value());
   6740     __ jmp(&done);
   6741 
   6742     // Try floating point case.
   6743     __ bind(&try_float);
   6744     __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
   6745     __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
   6746     __ j(not_equal, &slow);
   6747     // Operand is a float, negate its value by flipping sign bit.
   6748     __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
   6749     __ movq(kScratchRegister, Immediate(0x01));
   6750     __ shl(kScratchRegister, Immediate(63));
   6751     __ xor_(rdx, kScratchRegister);  // Flip sign.
   6752     // rdx is value to store.
   6753     if (overwrite_) {
   6754       __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
   6755     } else {
   6756       __ AllocateHeapNumber(rcx, rbx, &slow);
   6757       // rcx: allocated 'empty' number
   6758       __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
   6759       __ movq(rax, rcx);
   6760     }
   6761   } else if (op_ == Token::BIT_NOT) {
   6762     // Check if the operand is a heap number.
   6763     __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
   6764     __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
   6765     __ j(not_equal, &slow);
   6766 
   6767     // Convert the heap number in rax to an untagged integer in rcx.
   6768     IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
   6769 
   6770     // Do the bitwise operation and check if the result fits in a smi.
   6771     Label try_float;
   6772     __ not_(rcx);
   6773     // Tag the result as a smi and we're done.
   6774     ASSERT(kSmiTagSize == 1);
   6775     __ Integer32ToSmi(rax, rcx);
   6776   }
   6777 
   6778   // Return from the stub.
   6779   __ bind(&done);
   6780   __ StubReturn(1);
   6781 
   6782   // Handle the slow case by jumping to the JavaScript builtin.
   6783   __ bind(&slow);
   6784   __ pop(rcx);  // pop return address
   6785   __ push(rax);
   6786   __ push(rcx);  // push return address
   6787   switch (op_) {
   6788     case Token::SUB:
   6789       __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
   6790       break;
   6791     case Token::BIT_NOT:
   6792       __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
   6793       break;
   6794     default:
   6795       UNREACHABLE();
   6796   }
   6797 }
   6798 
   6799 
   6800 void RegExpExecStub::Generate(MacroAssembler* masm) {
   6801   // Just jump directly to runtime if native RegExp is not selected at compile
   6802   // time or if regexp entry in generated code is turned off runtime switch or
   6803   // at compilation.
   6804 #ifndef V8_NATIVE_REGEXP
   6805   __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
   6806 #else  // V8_NATIVE_REGEXP
   6807   if (!FLAG_regexp_entry_native) {
   6808     __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
   6809     return;
   6810   }
   6811 
   6812   // Stack frame on entry.
   6813   //  esp[0]: return address
   6814   //  esp[8]: last_match_info (expected JSArray)
   6815   //  esp[16]: previous index
   6816   //  esp[24]: subject string
   6817   //  esp[32]: JSRegExp object
   6818 
   6819   static const int kLastMatchInfoOffset = 1 * kPointerSize;
   6820   static const int kPreviousIndexOffset = 2 * kPointerSize;
   6821   static const int kSubjectOffset = 3 * kPointerSize;
   6822   static const int kJSRegExpOffset = 4 * kPointerSize;
   6823 
   6824   Label runtime;
   6825 
   6826   // Ensure that a RegExp stack is allocated.
   6827   ExternalReference address_of_regexp_stack_memory_address =
   6828       ExternalReference::address_of_regexp_stack_memory_address();
   6829   ExternalReference address_of_regexp_stack_memory_size =
   6830       ExternalReference::address_of_regexp_stack_memory_size();
   6831   __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
   6832   __ movq(kScratchRegister, Operand(kScratchRegister, 0));
   6833   __ testq(kScratchRegister, kScratchRegister);
   6834   __ j(zero, &runtime);
   6835 
   6836 
   6837   // Check that the first argument is a JSRegExp object.
   6838   __ movq(rax, Operand(rsp, kJSRegExpOffset));
   6839   __ JumpIfSmi(rax, &runtime);
   6840   __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
   6841   __ j(not_equal, &runtime);
   6842   // Check that the RegExp has been compiled (data contains a fixed array).
   6843   __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
   6844   if (FLAG_debug_code) {
   6845     Condition is_smi = masm->CheckSmi(rcx);
   6846     __ Check(NegateCondition(is_smi),
   6847         "Unexpected type for RegExp data, FixedArray expected");
   6848     __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
   6849     __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
   6850   }
   6851 
   6852   // rcx: RegExp data (FixedArray)
   6853   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   6854   __ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
   6855   __ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
   6856   __ j(not_equal, &runtime);
   6857 
   6858   // rcx: RegExp data (FixedArray)
   6859   // Check that the number of captures fit in the static offsets vector buffer.
   6860   __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
   6861   // Calculate number of capture registers (number_of_captures + 1) * 2.
   6862   __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
   6863   __ addq(rdx, Immediate(2));  // rdx was number_of_captures * 2.
   6864   // Check that the static offsets vector buffer is large enough.
   6865   __ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
   6866   __ j(above, &runtime);
   6867 
   6868   // rcx: RegExp data (FixedArray)
   6869   // rdx: Number of capture registers
   6870   // Check that the second argument is a string.
   6871   __ movq(rax, Operand(rsp, kSubjectOffset));
   6872   __ JumpIfSmi(rax, &runtime);
   6873   Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
   6874   __ j(NegateCondition(is_string), &runtime);
   6875   // Get the length of the string to rbx.
   6876   __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
   6877 
   6878   // rbx: Length of subject string
   6879   // rcx: RegExp data (FixedArray)
   6880   // rdx: Number of capture registers
   6881   // Check that the third argument is a positive smi less than the string
   6882   // length. A negative value will be greater (usigned comparison).
   6883   __ movq(rax, Operand(rsp, kPreviousIndexOffset));
   6884   __ SmiToInteger32(rax, rax);
   6885   __ cmpl(rax, rbx);
   6886   __ j(above, &runtime);
   6887 
   6888   // rcx: RegExp data (FixedArray)
   6889   // rdx: Number of capture registers
   6890   // Check that the fourth object is a JSArray object.
   6891   __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
   6892   __ JumpIfSmi(rax, &runtime);
   6893   __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
   6894   __ j(not_equal, &runtime);
   6895   // Check that the JSArray is in fast case.
   6896   __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
   6897   __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
   6898   __ Cmp(rax, Factory::fixed_array_map());
   6899   __ j(not_equal, &runtime);
   6900   // Check that the last match info has space for the capture registers and the
   6901   // additional information. Ensure no overflow in add.
   6902   ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
   6903   __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
   6904   __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
   6905   __ cmpl(rdx, rax);
   6906   __ j(greater, &runtime);
   6907 
   6908   // ecx: RegExp data (FixedArray)
   6909   // Check the representation and encoding of the subject string.
   6910   Label seq_string, seq_two_byte_string, check_code;
   6911   const int kStringRepresentationEncodingMask =
   6912       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   6913   __ movq(rax, Operand(rsp, kSubjectOffset));
   6914   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   6915   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   6916   __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
   6917   // First check for sequential string.
   6918   ASSERT_EQ(0, kStringTag);
   6919   ASSERT_EQ(0, kSeqStringTag);
   6920   __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
   6921   __ j(zero, &seq_string);
   6922 
   6923   // Check for flat cons string.
   6924   // A flat cons string is a cons string where the second part is the empty
   6925   // string. In that case the subject string is just the first part of the cons
   6926   // string. Also in this case the first part of the cons string is known to be
   6927   // a sequential string or an external string.
   6928   __ movl(rdx, rbx);
   6929   __ andb(rdx, Immediate(kStringRepresentationMask));
   6930   __ cmpb(rdx, Immediate(kConsStringTag));
   6931   __ j(not_equal, &runtime);
   6932   __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
   6933   __ Cmp(rdx, Factory::empty_string());
   6934   __ j(not_equal, &runtime);
   6935   __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
   6936   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   6937   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   6938   ASSERT_EQ(0, kSeqStringTag);
   6939   __ testb(rbx, Immediate(kStringRepresentationMask));
   6940   __ j(not_zero, &runtime);
   6941   __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
   6942 
   6943   __ bind(&seq_string);
   6944   // rax: subject string (sequential either ascii to two byte)
   6945   // rbx: suject string type & kStringRepresentationEncodingMask
   6946   // rcx: RegExp data (FixedArray)
   6947   // Check that the irregexp code has been generated for an ascii string. If
   6948   // it has, the field contains a code object otherwise it contains the hole.
   6949   __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kTwoByteStringTag));
   6950   __ j(equal, &seq_two_byte_string);
   6951   if (FLAG_debug_code) {
   6952     __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
   6953     __ Check(equal, "Expected sequential ascii string");
   6954   }
   6955   __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
   6956   __ Set(rdi, 1);  // Type is ascii.
   6957   __ jmp(&check_code);
   6958 
   6959   __ bind(&seq_two_byte_string);
   6960   // rax: subject string
   6961   // rcx: RegExp data (FixedArray)
   6962   __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
   6963   __ Set(rdi, 0);  // Type is two byte.
   6964 
   6965   __ bind(&check_code);
   6966   // Check that the irregexp code has been generated for the actual string
   6967   // encoding. If it has, the field contains a code object otherwise it contains
   6968   // the hole.
   6969   __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
   6970   __ j(not_equal, &runtime);
   6971 
   6972   // rax: subject string
   6973   // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
   6974   // r12: code
   6975   // Load used arguments before starting to push arguments for call to native
   6976   // RegExp code to avoid handling changing stack height.
   6977   __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
   6978   __ SmiToInteger64(rbx, rbx);  // Previous index from smi.
   6979 
   6980   // rax: subject string
   6981   // rbx: previous index
   6982   // rdi: encoding of subject string (1 if ascii 0 if two_byte);
   6983   // r12: code
   6984   // All checks done. Now push arguments for native regexp code.
   6985   __ IncrementCounter(&Counters::regexp_entry_native, 1);
   6986 
   6987   // rsi is caller save on Windows and used to pass parameter on Linux.
   6988   __ push(rsi);
   6989 
   6990   static const int kRegExpExecuteArguments = 7;
   6991   __ PrepareCallCFunction(kRegExpExecuteArguments);
   6992   int argument_slots_on_stack =
   6993       masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
   6994 
   6995   // Argument 7: Indicate that this is a direct call from JavaScript.
   6996   __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
   6997           Immediate(1));
   6998 
   6999   // Argument 6: Start (high end) of backtracking stack memory area.
   7000   __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
   7001   __ movq(r9, Operand(kScratchRegister, 0));
   7002   __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
   7003   __ addq(r9, Operand(kScratchRegister, 0));
   7004   // Argument 6 passed in r9 on Linux and on the stack on Windows.
   7005 #ifdef _WIN64
   7006   __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
   7007 #endif
   7008 
   7009   // Argument 5: static offsets vector buffer.
   7010   __ movq(r8, ExternalReference::address_of_static_offsets_vector());
   7011   // Argument 5 passed in r8 on Linux and on the stack on Windows.
   7012 #ifdef _WIN64
   7013   __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
   7014 #endif
   7015 
   7016   // First four arguments are passed in registers on both Linux and Windows.
   7017 #ifdef _WIN64
   7018   Register arg4 = r9;
   7019   Register arg3 = r8;
   7020   Register arg2 = rdx;
   7021   Register arg1 = rcx;
   7022 #else
   7023   Register arg4 = rcx;
   7024   Register arg3 = rdx;
   7025   Register arg2 = rsi;
   7026   Register arg1 = rdi;
   7027 #endif
   7028 
   7029   // Keep track on aliasing between argX defined above and the registers used.
   7030   // rax: subject string
   7031   // rbx: previous index
   7032   // rdi: encoding of subject string (1 if ascii 0 if two_byte);
   7033   // r12: code
   7034 
   7035   // Argument 4: End of string data
   7036   // Argument 3: Start of string data
   7037   Label setup_two_byte, setup_rest;
   7038   __ testb(rdi, rdi);
   7039   __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
   7040   __ j(zero, &setup_two_byte);
   7041   __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
   7042   __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
   7043   __ jmp(&setup_rest);
   7044   __ bind(&setup_two_byte);
   7045   __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
   7046   __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
   7047 
   7048   __ bind(&setup_rest);
   7049   // Argument 2: Previous index.
   7050   __ movq(arg2, rbx);
   7051 
   7052   // Argument 1: Subject string.
   7053   __ movq(arg1, rax);
   7054 
   7055   // Locate the code entry and call it.
   7056   __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
   7057   __ CallCFunction(r12, kRegExpExecuteArguments);
   7058 
   7059   // rsi is caller save, as it is used to pass parameter.
   7060   __ pop(rsi);
   7061 
   7062   // Check the result.
   7063   Label success;
   7064   __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
   7065   __ j(equal, &success);
   7066   Label failure;
   7067   __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
   7068   __ j(equal, &failure);
   7069   __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
   7070   // If not exception it can only be retry. Handle that in the runtime system.
   7071   __ j(not_equal, &runtime);
   7072   // Result must now be exception. If there is no pending exception already a
   7073   // stack overflow (on the backtrack stack) was detected in RegExp code but
   7074   // haven't created the exception yet. Handle that in the runtime system.
   7075   // TODO(592) Rerunning the RegExp to get the stack overflow exception.
   7076   ExternalReference pending_exception_address(Top::k_pending_exception_address);
   7077   __ movq(kScratchRegister, pending_exception_address);
   7078   __ Cmp(kScratchRegister, Factory::the_hole_value());
   7079   __ j(equal, &runtime);
   7080   __ bind(&failure);
   7081   // For failure and exception return null.
   7082   __ Move(rax, Factory::null_value());
   7083   __ ret(4 * kPointerSize);
   7084 
   7085   // Load RegExp data.
   7086   __ bind(&success);
   7087   __ movq(rax, Operand(rsp, kJSRegExpOffset));
   7088   __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
   7089   __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
   7090   // Calculate number of capture registers (number_of_captures + 1) * 2.
   7091   __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
   7092   __ addq(rdx, Immediate(2));  // rdx was number_of_captures * 2.
   7093 
   7094   // rdx: Number of capture registers
   7095   // Load last_match_info which is still known to be a fast case JSArray.
   7096   __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
   7097   __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
   7098 
   7099   // rbx: last_match_info backing store (FixedArray)
   7100   // rdx: number of capture registers
   7101   // Store the capture count.
   7102   __ Integer32ToSmi(kScratchRegister, rdx);
   7103   __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
   7104           kScratchRegister);
   7105   // Store last subject and last input.
   7106   __ movq(rax, Operand(rsp, kSubjectOffset));
   7107   __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
   7108   __ movq(rcx, rbx);
   7109   __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
   7110   __ movq(rax, Operand(rsp, kSubjectOffset));
   7111   __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
   7112   __ movq(rcx, rbx);
   7113   __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
   7114 
   7115   // Get the static offsets vector filled by the native regexp code.
   7116   __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
   7117 
   7118   // rbx: last_match_info backing store (FixedArray)
   7119   // rcx: offsets vector
   7120   // rdx: number of capture registers
   7121   Label next_capture, done;
   7122   __ movq(rax, Operand(rsp, kPreviousIndexOffset));
   7123   // Capture register counter starts from number of capture registers and
   7124   // counts down until wraping after zero.
   7125   __ bind(&next_capture);
   7126   __ subq(rdx, Immediate(1));
   7127   __ j(negative, &done);
   7128   // Read the value from the static offsets vector buffer and make it a smi.
   7129   __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
   7130   __ Integer32ToSmi(rdi, rdi, &runtime);
   7131   // Add previous index (from its stack slot) if value is not negative.
   7132   Label capture_negative;
   7133   // Negative flag set by smi convertion above.
   7134   __ j(negative, &capture_negative);
   7135   __ SmiAdd(rdi, rdi, rax, &runtime);  // Add previous index.
   7136   __ bind(&capture_negative);
   7137   // Store the smi value in the last match info.
   7138   __ movq(FieldOperand(rbx,
   7139                        rdx,
   7140                        times_pointer_size,
   7141                        RegExpImpl::kFirstCaptureOffset),
   7142                        rdi);
   7143   __ jmp(&next_capture);
   7144   __ bind(&done);
   7145 
   7146   // Return last match info.
   7147   __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
   7148   __ ret(4 * kPointerSize);
   7149 
   7150   // Do the runtime call to execute the regexp.
   7151   __ bind(&runtime);
   7152   __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
   7153 #endif  // V8_NATIVE_REGEXP
   7154 }
   7155 
   7156 
   7157 void CompareStub::Generate(MacroAssembler* masm) {
   7158   Label call_builtin, done;
   7159 
   7160   // NOTICE! This code is only reached after a smi-fast-case check, so
   7161   // it is certain that at least one operand isn't a smi.
   7162 
   7163   if (cc_ == equal) {  // Both strict and non-strict.
   7164     Label slow;  // Fallthrough label.
   7165     // Equality is almost reflexive (everything but NaN), so start by testing
   7166     // for "identity and not NaN".
   7167     {
   7168       Label not_identical;
   7169       __ cmpq(rax, rdx);
   7170       __ j(not_equal, &not_identical);
   7171       // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
   7172       // so we do the second best thing - test it ourselves.
   7173 
   7174       if (never_nan_nan_) {
   7175         __ xor_(rax, rax);
   7176         __ ret(0);
   7177       } else {
   7178         Label return_equal;
   7179         Label heap_number;
   7180         // If it's not a heap number, then return equal.
   7181         __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
   7182                Factory::heap_number_map());
   7183         __ j(equal, &heap_number);
   7184         __ bind(&return_equal);
   7185         __ xor_(rax, rax);
   7186         __ ret(0);
   7187 
   7188         __ bind(&heap_number);
   7189         // It is a heap number, so return non-equal if it's NaN and equal if
   7190         // it's not NaN.
   7191         // The representation of NaN values has all exponent bits (52..62) set,
   7192         // and not all mantissa bits (0..51) clear.
   7193         // We only allow QNaNs, which have bit 51 set (which also rules out
   7194         // the value being Infinity).
   7195 
   7196         // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
   7197         // all bits in the mask are set. We only need to check the word
   7198         // that contains the exponent and high bit of the mantissa.
   7199         ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
   7200         __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
   7201         __ xorl(rax, rax);
   7202         __ addl(rdx, rdx);  // Shift value and mask so mask applies to top bits.
   7203         __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
   7204         __ setcc(above_equal, rax);
   7205         __ ret(0);
   7206       }
   7207 
   7208       __ bind(&not_identical);
   7209     }
   7210 
   7211     // If we're doing a strict equality comparison, we don't have to do
   7212     // type conversion, so we generate code to do fast comparison for objects
   7213     // and oddballs. Non-smi numbers and strings still go through the usual
   7214     // slow-case code.
   7215     if (strict_) {
   7216       // If either is a Smi (we know that not both are), then they can only
   7217       // be equal if the other is a HeapNumber. If so, use the slow case.
   7218       {
   7219         Label not_smis;
   7220         __ SelectNonSmi(rbx, rax, rdx, &not_smis);
   7221 
   7222         // Check if the non-smi operand is a heap number.
   7223         __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
   7224                Factory::heap_number_map());
   7225         // If heap number, handle it in the slow case.
   7226         __ j(equal, &slow);
   7227         // Return non-equal.  ebx (the lower half of rbx) is not zero.
   7228         __ movq(rax, rbx);
   7229         __ ret(0);
   7230 
   7231         __ bind(&not_smis);
   7232       }
   7233 
   7234       // If either operand is a JSObject or an oddball value, then they are not
   7235       // equal since their pointers are different
   7236       // There is no test for undetectability in strict equality.
   7237 
   7238       // If the first object is a JS object, we have done pointer comparison.
   7239       ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
   7240       Label first_non_object;
   7241       __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
   7242       __ j(below, &first_non_object);
   7243       // Return non-zero (eax (not rax) is not zero)
   7244       Label return_not_equal;
   7245       ASSERT(kHeapObjectTag != 0);
   7246       __ bind(&return_not_equal);
   7247       __ ret(0);
   7248 
   7249       __ bind(&first_non_object);
   7250       // Check for oddballs: true, false, null, undefined.
   7251       __ CmpInstanceType(rcx, ODDBALL_TYPE);
   7252       __ j(equal, &return_not_equal);
   7253 
   7254       __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
   7255       __ j(above_equal, &return_not_equal);
   7256 
   7257       // Check for oddballs: true, false, null, undefined.
   7258       __ CmpInstanceType(rcx, ODDBALL_TYPE);
   7259       __ j(equal, &return_not_equal);
   7260 
   7261       // Fall through to the general case.
   7262     }
   7263     __ bind(&slow);
   7264   }
   7265 
   7266   // Push arguments below the return address to prepare jump to builtin.
   7267   __ pop(rcx);
   7268   __ push(rax);
   7269   __ push(rdx);
   7270   __ push(rcx);
   7271 
   7272   // Inlined floating point compare.
   7273   // Call builtin if operands are not floating point or smi.
   7274   Label check_for_symbols;
   7275   // Push arguments on stack, for helper functions.
   7276   FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
   7277   FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
   7278   __ FCmp();
   7279 
   7280   // Jump to builtin for NaN.
   7281   __ j(parity_even, &call_builtin);
   7282 
   7283   // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
   7284   Label below_lbl, above_lbl;
   7285   // use rdx, rax to convert unsigned to signed comparison
   7286   __ j(below, &below_lbl);
   7287   __ j(above, &above_lbl);
   7288 
   7289   __ xor_(rax, rax);  // equal
   7290   __ ret(2 * kPointerSize);
   7291 
   7292   __ bind(&below_lbl);
   7293   __ movq(rax, Immediate(-1));
   7294   __ ret(2 * kPointerSize);
   7295 
   7296   __ bind(&above_lbl);
   7297   __ movq(rax, Immediate(1));
   7298   __ ret(2 * kPointerSize);  // rax, rdx were pushed
   7299 
   7300   // Fast negative check for symbol-to-symbol equality.
   7301   __ bind(&check_for_symbols);
   7302   Label check_for_strings;
   7303   if (cc_ == equal) {
   7304     BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
   7305     BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
   7306 
   7307     // We've already checked for object identity, so if both operands
   7308     // are symbols they aren't equal. Register eax (not rax) already holds a
   7309     // non-zero value, which indicates not equal, so just return.
   7310     __ ret(2 * kPointerSize);
   7311   }
   7312 
   7313   __ bind(&check_for_strings);
   7314 
   7315   __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
   7316 
   7317   // Inline comparison of ascii strings.
   7318   StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
   7319                                                      rdx,
   7320                                                      rax,
   7321                                                      rcx,
   7322                                                      rbx,
   7323                                                      rdi,
   7324                                                      r8);
   7325 
   7326 #ifdef DEBUG
   7327   __ Abort("Unexpected fall-through from string comparison");
   7328 #endif
   7329 
   7330   __ bind(&call_builtin);
   7331   // must swap argument order
   7332   __ pop(rcx);
   7333   __ pop(rdx);
   7334   __ pop(rax);
   7335   __ push(rdx);
   7336   __ push(rax);
   7337 
   7338   // Figure out which native to call and setup the arguments.
   7339   Builtins::JavaScript builtin;
   7340   if (cc_ == equal) {
   7341     builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
   7342   } else {
   7343     builtin = Builtins::COMPARE;
   7344     int ncr;  // NaN compare result
   7345     if (cc_ == less || cc_ == less_equal) {
   7346       ncr = GREATER;
   7347     } else {
   7348       ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
   7349       ncr = LESS;
   7350     }
   7351     __ Push(Smi::FromInt(ncr));
   7352   }
   7353 
   7354   // Restore return address on the stack.
   7355   __ push(rcx);
   7356 
   7357   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
   7358   // tagged as a small integer.
   7359   __ InvokeBuiltin(builtin, JUMP_FUNCTION);
   7360 }
   7361 
   7362 
   7363 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
   7364                                     Label* label,
   7365                                     Register object,
   7366                                     Register scratch) {
   7367   __ JumpIfSmi(object, label);
   7368   __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
   7369   __ movzxbq(scratch,
   7370              FieldOperand(scratch, Map::kInstanceTypeOffset));
   7371   // Ensure that no non-strings have the symbol bit set.
   7372   ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
   7373   ASSERT(kSymbolTag != 0);
   7374   __ testb(scratch, Immediate(kIsSymbolMask));
   7375   __ j(zero, label);
   7376 }
   7377 
   7378 
   7379 // Call the function just below TOS on the stack with the given
   7380 // arguments. The receiver is the TOS.
   7381 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
   7382                                       CallFunctionFlags flags,
   7383                                       int position) {
   7384   // Push the arguments ("left-to-right") on the stack.
   7385   int arg_count = args->length();
   7386   for (int i = 0; i < arg_count; i++) {
   7387     Load(args->at(i));
   7388   }
   7389 
   7390   // Record the position for debugging purposes.
   7391   CodeForSourcePosition(position);
   7392 
   7393   // Use the shared code stub to call the function.
   7394   InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
   7395   CallFunctionStub call_function(arg_count, in_loop, flags);
   7396   Result answer = frame_->CallStub(&call_function, arg_count + 1);
   7397   // Restore context and replace function on the stack with the
   7398   // result of the stub invocation.
   7399   frame_->RestoreContextRegister();
   7400   frame_->SetElementAt(0, &answer);
   7401 }
   7402 
   7403 
   7404 void InstanceofStub::Generate(MacroAssembler* masm) {
   7405   // Implements "value instanceof function" operator.
   7406   // Expected input state:
   7407   //   rsp[0] : return address
   7408   //   rsp[1] : function pointer
   7409   //   rsp[2] : value
   7410 
   7411   // Get the object - go slow case if it's a smi.
   7412   Label slow;
   7413   __ movq(rax, Operand(rsp, 2 * kPointerSize));
   7414   __ JumpIfSmi(rax, &slow);
   7415 
   7416   // Check that the left hand is a JS object. Leave its map in rax.
   7417   __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
   7418   __ j(below, &slow);
   7419   __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
   7420   __ j(above, &slow);
   7421 
   7422   // Get the prototype of the function.
   7423   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
   7424   __ TryGetFunctionPrototype(rdx, rbx, &slow);
   7425 
   7426   // Check that the function prototype is a JS object.
   7427   __ JumpIfSmi(rbx, &slow);
   7428   __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
   7429   __ j(below, &slow);
   7430   __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
   7431   __ j(above, &slow);
   7432 
   7433   // Register mapping: rax is object map and rbx is function prototype.
   7434   __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
   7435 
   7436   // Loop through the prototype chain looking for the function prototype.
   7437   Label loop, is_instance, is_not_instance;
   7438   __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
   7439   __ bind(&loop);
   7440   __ cmpq(rcx, rbx);
   7441   __ j(equal, &is_instance);
   7442   __ cmpq(rcx, kScratchRegister);
   7443   __ j(equal, &is_not_instance);
   7444   __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
   7445   __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
   7446   __ jmp(&loop);
   7447 
   7448   __ bind(&is_instance);
   7449   __ xorl(rax, rax);
   7450   __ ret(2 * kPointerSize);
   7451 
   7452   __ bind(&is_not_instance);
   7453   __ movl(rax, Immediate(1));
   7454   __ ret(2 * kPointerSize);
   7455 
   7456   // Slow-case: Go through the JavaScript implementation.
   7457   __ bind(&slow);
   7458   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   7459 }
   7460 
   7461 
   7462 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
   7463   // rsp[0] : return address
   7464   // rsp[8] : number of parameters
   7465   // rsp[16] : receiver displacement
   7466   // rsp[24] : function
   7467 
   7468   // The displacement is used for skipping the return address and the
   7469   // frame pointer on the stack. It is the offset of the last
   7470   // parameter (if any) relative to the frame pointer.
   7471   static const int kDisplacement = 2 * kPointerSize;
   7472 
   7473   // Check if the calling frame is an arguments adaptor frame.
   7474   Label adaptor_frame, try_allocate, runtime;
   7475   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   7476   __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
   7477                 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   7478   __ j(equal, &adaptor_frame);
   7479 
   7480   // Get the length from the frame.
   7481   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
   7482   __ jmp(&try_allocate);
   7483 
   7484   // Patch the arguments.length and the parameters pointer.
   7485   __ bind(&adaptor_frame);
   7486   __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   7487   __ movq(Operand(rsp, 1 * kPointerSize), rcx);
   7488   // Do not clobber the length index for the indexing operation since
   7489   // it is used compute the size for allocation later.
   7490   SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
   7491   __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
   7492   __ movq(Operand(rsp, 2 * kPointerSize), rdx);
   7493 
   7494   // Try the new space allocation. Start out with computing the size of
   7495   // the arguments object and the elements array.
   7496   Label add_arguments_object;
   7497   __ bind(&try_allocate);
   7498   __ testq(rcx, rcx);
   7499   __ j(zero, &add_arguments_object);
   7500   index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
   7501   __ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
   7502   __ bind(&add_arguments_object);
   7503   __ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
   7504 
   7505   // Do the allocation of both objects in one go.
   7506   __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
   7507 
   7508   // Get the arguments boilerplate from the current (global) context.
   7509   int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
   7510   __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   7511   __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
   7512   __ movq(rdi, Operand(rdi, offset));
   7513 
   7514   // Copy the JS object part.
   7515   for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
   7516     __ movq(kScratchRegister, FieldOperand(rdi, i));
   7517     __ movq(FieldOperand(rax, i), kScratchRegister);
   7518   }
   7519 
   7520   // Setup the callee in-object property.
   7521   ASSERT(Heap::arguments_callee_index == 0);
   7522   __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
   7523   __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
   7524 
   7525   // Get the length (smi tagged) and set that as an in-object property too.
   7526   ASSERT(Heap::arguments_length_index == 1);
   7527   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
   7528   __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
   7529 
   7530   // If there are no actual arguments, we're done.
   7531   Label done;
   7532   __ testq(rcx, rcx);
   7533   __ j(zero, &done);
   7534 
   7535   // Get the parameters pointer from the stack and untag the length.
   7536   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
   7537   __ SmiToInteger32(rcx, rcx);
   7538 
   7539   // Setup the elements pointer in the allocated arguments object and
   7540   // initialize the header in the elements fixed array.
   7541   __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
   7542   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
   7543   __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
   7544   __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
   7545   __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
   7546 
   7547   // Copy the fixed array slots.
   7548   Label loop;
   7549   __ bind(&loop);
   7550   __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize));  // Skip receiver.
   7551   __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
   7552   __ addq(rdi, Immediate(kPointerSize));
   7553   __ subq(rdx, Immediate(kPointerSize));
   7554   __ decq(rcx);
   7555   __ j(not_zero, &loop);
   7556 
   7557   // Return and remove the on-stack parameters.
   7558   __ bind(&done);
   7559   __ ret(3 * kPointerSize);
   7560 
   7561   // Do the runtime call to allocate the arguments object.
   7562   __ bind(&runtime);
   7563   __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
   7564 }
   7565 
   7566 
   7567 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   7568   // The key is in rdx and the parameter count is in rax.
   7569 
   7570   // The displacement is used for skipping the frame pointer on the
   7571   // stack. It is the offset of the last parameter (if any) relative
   7572   // to the frame pointer.
   7573   static const int kDisplacement = 1 * kPointerSize;
   7574 
   7575   // Check that the key is a smi.
   7576   Label slow;
   7577   __ JumpIfNotSmi(rdx, &slow);
   7578 
   7579   // Check if the calling frame is an arguments adaptor frame.
   7580   Label adaptor;
   7581   __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   7582   __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
   7583                 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   7584   __ j(equal, &adaptor);
   7585 
   7586   // Check index against formal parameters count limit passed in
   7587   // through register rax. Use unsigned comparison to get negative
   7588   // check for free.
   7589   __ cmpq(rdx, rax);
   7590   __ j(above_equal, &slow);
   7591 
   7592   // Read the argument from the stack and return it.
   7593   SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
   7594   __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
   7595   index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
   7596   __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
   7597   __ Ret();
   7598 
   7599   // Arguments adaptor case: Check index against actual arguments
   7600   // limit found in the arguments adaptor frame. Use unsigned
   7601   // comparison to get negative check for free.
   7602   __ bind(&adaptor);
   7603   __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   7604   __ cmpq(rdx, rcx);
   7605   __ j(above_equal, &slow);
   7606 
   7607   // Read the argument from the stack and return it.
   7608   index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
   7609   __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
   7610   index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
   7611   __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
   7612   __ Ret();
   7613 
   7614   // Slow-case: Handle non-smi or out-of-bounds access to arguments
   7615   // by calling the runtime system.
   7616   __ bind(&slow);
   7617   __ pop(rbx);  // Return address.
   7618   __ push(rdx);
   7619   __ push(rbx);
   7620   Runtime::Function* f =
   7621       Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
   7622   __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
   7623 }
   7624 
   7625 
   7626 void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
   7627   // Check if the calling frame is an arguments adaptor frame.
   7628   Label adaptor;
   7629   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   7630   __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
   7631                 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   7632 
   7633   // Arguments adaptor case: Read the arguments length from the
   7634   // adaptor frame and return it.
   7635   // Otherwise nothing to do: The number of formal parameters has already been
   7636   // passed in register eax by calling function. Just return it.
   7637   __ cmovq(equal, rax,
   7638            Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   7639   __ ret(0);
   7640 }
   7641 
   7642 
   7643 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   7644   // Check that stack should contain next handler, frame pointer, state and
   7645   // return address in that order.
   7646   ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
   7647             StackHandlerConstants::kStateOffset);
   7648   ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
   7649             StackHandlerConstants::kPCOffset);
   7650 
   7651   ExternalReference handler_address(Top::k_handler_address);
   7652   __ movq(kScratchRegister, handler_address);
   7653   __ movq(rsp, Operand(kScratchRegister, 0));
   7654   // get next in chain
   7655   __ pop(rcx);
   7656   __ movq(Operand(kScratchRegister, 0), rcx);
   7657   __ pop(rbp);  // pop frame pointer
   7658   __ pop(rdx);  // remove state
   7659 
   7660   // Before returning we restore the context from the frame pointer if not NULL.
   7661   // The frame pointer is NULL in the exception handler of a JS entry frame.
   7662   __ xor_(rsi, rsi);  // tentatively set context pointer to NULL
   7663   Label skip;
   7664   __ cmpq(rbp, Immediate(0));
   7665   __ j(equal, &skip);
   7666   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   7667   __ bind(&skip);
   7668   __ ret(0);
   7669 }
   7670 
   7671 
   7672 void CEntryStub::GenerateCore(MacroAssembler* masm,
   7673                               Label* throw_normal_exception,
   7674                               Label* throw_termination_exception,
   7675                               Label* throw_out_of_memory_exception,
   7676                               bool do_gc,
   7677                               bool always_allocate_scope) {
   7678   // rax: result parameter for PerformGC, if any.
   7679   // rbx: pointer to C function  (C callee-saved).
   7680   // rbp: frame pointer  (restored after C call).
   7681   // rsp: stack pointer  (restored after C call).
   7682   // r14: number of arguments including receiver (C callee-saved).
   7683   // r15: pointer to the first argument (C callee-saved).
   7684   //      This pointer is reused in LeaveExitFrame(), so it is stored in a
   7685   //      callee-saved register.
   7686 
   7687   // Simple results returned in rax (both AMD64 and Win64 calling conventions).
   7688   // Complex results must be written to address passed as first argument.
   7689   // AMD64 calling convention: a struct of two pointers in rax+rdx
   7690 
   7691   if (do_gc) {
   7692     // Pass failure code returned from last attempt as first argument to GC.
   7693 #ifdef _WIN64
   7694     __ movq(rcx, rax);
   7695 #else  // ! defined(_WIN64)
   7696     __ movq(rdi, rax);
   7697 #endif
   7698     __ movq(kScratchRegister,
   7699             FUNCTION_ADDR(Runtime::PerformGC),
   7700             RelocInfo::RUNTIME_ENTRY);
   7701     __ call(kScratchRegister);
   7702   }
   7703 
   7704   ExternalReference scope_depth =
   7705       ExternalReference::heap_always_allocate_scope_depth();
   7706   if (always_allocate_scope) {
   7707     __ movq(kScratchRegister, scope_depth);
   7708     __ incl(Operand(kScratchRegister, 0));
   7709   }
   7710 
   7711   // Call C function.
   7712 #ifdef _WIN64
   7713   // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
   7714   // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
   7715   __ movq(Operand(rsp, 4 * kPointerSize), r14);  // argc.
   7716   __ movq(Operand(rsp, 5 * kPointerSize), r15);  // argv.
   7717   if (result_size_ < 2) {
   7718     // Pass a pointer to the Arguments object as the first argument.
   7719     // Return result in single register (rax).
   7720     __ lea(rcx, Operand(rsp, 4 * kPointerSize));
   7721   } else {
   7722     ASSERT_EQ(2, result_size_);
   7723     // Pass a pointer to the result location as the first argument.
   7724     __ lea(rcx, Operand(rsp, 6 * kPointerSize));
   7725     // Pass a pointer to the Arguments object as the second argument.
   7726     __ lea(rdx, Operand(rsp, 4 * kPointerSize));
   7727   }
   7728 
   7729 #else  // ! defined(_WIN64)
   7730   // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
   7731   __ movq(rdi, r14);  // argc.
   7732   __ movq(rsi, r15);  // argv.
   7733 #endif
   7734   __ call(rbx);
   7735   // Result is in rax - do not destroy this register!
   7736 
   7737   if (always_allocate_scope) {
   7738     __ movq(kScratchRegister, scope_depth);
   7739     __ decl(Operand(kScratchRegister, 0));
   7740   }
   7741 
   7742   // Check for failure result.
   7743   Label failure_returned;
   7744   ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
   7745 #ifdef _WIN64
   7746   // If return value is on the stack, pop it to registers.
   7747   if (result_size_ > 1) {
   7748     ASSERT_EQ(2, result_size_);
   7749     // Read result values stored on stack. Result is stored
   7750     // above the four argument mirror slots and the two
   7751     // Arguments object slots.
   7752     __ movq(rax, Operand(rsp, 6 * kPointerSize));
   7753     __ movq(rdx, Operand(rsp, 7 * kPointerSize));
   7754   }
   7755 #endif
   7756   __ lea(rcx, Operand(rax, 1));
   7757   // Lower 2 bits of rcx are 0 iff rax has failure tag.
   7758   __ testl(rcx, Immediate(kFailureTagMask));
   7759   __ j(zero, &failure_returned);
   7760 
   7761   // Exit the JavaScript to C++ exit frame.
   7762   __ LeaveExitFrame(mode_, result_size_);
   7763   __ ret(0);
   7764 
   7765   // Handling of failure.
   7766   __ bind(&failure_returned);
   7767 
   7768   Label retry;
   7769   // If the returned exception is RETRY_AFTER_GC continue at retry label
   7770   ASSERT(Failure::RETRY_AFTER_GC == 0);
   7771   __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
   7772   __ j(zero, &retry);
   7773 
   7774   // Special handling of out of memory exceptions.
   7775   __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
   7776   __ cmpq(rax, kScratchRegister);
   7777   __ j(equal, throw_out_of_memory_exception);
   7778 
   7779   // Retrieve the pending exception and clear the variable.
   7780   ExternalReference pending_exception_address(Top::k_pending_exception_address);
   7781   __ movq(kScratchRegister, pending_exception_address);
   7782   __ movq(rax, Operand(kScratchRegister, 0));
   7783   __ movq(rdx, ExternalReference::the_hole_value_location());
   7784   __ movq(rdx, Operand(rdx, 0));
   7785   __ movq(Operand(kScratchRegister, 0), rdx);
   7786 
   7787   // Special handling of termination exceptions which are uncatchable
   7788   // by javascript code.
   7789   __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
   7790   __ j(equal, throw_termination_exception);
   7791 
   7792   // Handle normal exception.
   7793   __ jmp(throw_normal_exception);
   7794 
   7795   // Retry.
   7796   __ bind(&retry);
   7797 }
   7798 
   7799 
   7800 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
   7801                                           UncatchableExceptionType type) {
   7802   // Fetch top stack handler.
   7803   ExternalReference handler_address(Top::k_handler_address);
   7804   __ movq(kScratchRegister, handler_address);
   7805   __ movq(rsp, Operand(kScratchRegister, 0));
   7806 
   7807   // Unwind the handlers until the ENTRY handler is found.
   7808   Label loop, done;
   7809   __ bind(&loop);
   7810   // Load the type of the current stack handler.
   7811   const int kStateOffset = StackHandlerConstants::kStateOffset;
   7812   __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
   7813   __ j(equal, &done);
   7814   // Fetch the next handler in the list.
   7815   const int kNextOffset = StackHandlerConstants::kNextOffset;
   7816   __ movq(rsp, Operand(rsp, kNextOffset));
   7817   __ jmp(&loop);
   7818   __ bind(&done);
   7819 
   7820   // Set the top handler address to next handler past the current ENTRY handler.
   7821   __ movq(kScratchRegister, handler_address);
   7822   __ pop(Operand(kScratchRegister, 0));
   7823 
   7824   if (type == OUT_OF_MEMORY) {
   7825     // Set external caught exception to false.
   7826     ExternalReference external_caught(Top::k_external_caught_exception_address);
   7827     __ movq(rax, Immediate(false));
   7828     __ store_rax(external_caught);
   7829 
   7830     // Set pending exception and rax to out of memory exception.
   7831     ExternalReference pending_exception(Top::k_pending_exception_address);
   7832     __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
   7833     __ store_rax(pending_exception);
   7834   }
   7835 
   7836   // Clear the context pointer.
   7837   __ xor_(rsi, rsi);
   7838 
   7839   // Restore registers from handler.
   7840   ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
   7841             StackHandlerConstants::kFPOffset);
   7842   __ pop(rbp);  // FP
   7843   ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
   7844             StackHandlerConstants::kStateOffset);
   7845   __ pop(rdx);  // State
   7846 
   7847   ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
   7848             StackHandlerConstants::kPCOffset);
   7849   __ ret(0);
   7850 }
   7851 
   7852 
   7853 void CallFunctionStub::Generate(MacroAssembler* masm) {
   7854   Label slow;
   7855 
   7856   // If the receiver might be a value (string, number or boolean) check for this
   7857   // and box it if it is.
   7858   if (ReceiverMightBeValue()) {
   7859     // Get the receiver from the stack.
   7860     // +1 ~ return address
   7861     Label receiver_is_value, receiver_is_js_object;
   7862     __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
   7863 
   7864     // Check if receiver is a smi (which is a number value).
   7865     __ JumpIfSmi(rax, &receiver_is_value);
   7866 
   7867     // Check if the receiver is a valid JS object.
   7868     __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
   7869     __ j(above_equal, &receiver_is_js_object);
   7870 
   7871     // Call the runtime to box the value.
   7872     __ bind(&receiver_is_value);
   7873     __ EnterInternalFrame();
   7874     __ push(rax);
   7875     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   7876     __ LeaveInternalFrame();
   7877     __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
   7878 
   7879     __ bind(&receiver_is_js_object);
   7880   }
   7881 
   7882   // Get the function to call from the stack.
   7883   // +2 ~ receiver, return address
   7884   __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
   7885 
   7886   // Check that the function really is a JavaScript function.
   7887   __ JumpIfSmi(rdi, &slow);
   7888   // Goto slow case if we do not have a function.
   7889   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
   7890   __ j(not_equal, &slow);
   7891 
   7892   // Fast-case: Just invoke the function.
   7893   ParameterCount actual(argc_);
   7894   __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
   7895 
   7896   // Slow-case: Non-function called.
   7897   __ bind(&slow);
   7898   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   7899   // of the original receiver from the call site).
   7900   __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
   7901   __ Set(rax, argc_);
   7902   __ Set(rbx, 0);
   7903   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
   7904   Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
   7905   __ Jump(adaptor, RelocInfo::CODE_TARGET);
   7906 }
   7907 
   7908 
   7909 void CEntryStub::Generate(MacroAssembler* masm) {
   7910   // rax: number of arguments including receiver
   7911   // rbx: pointer to C function  (C callee-saved)
   7912   // rbp: frame pointer of calling JS frame (restored after C call)
   7913   // rsp: stack pointer  (restored after C call)
   7914   // rsi: current context (restored)
   7915 
   7916   // NOTE: Invocations of builtins may return failure objects
   7917   // instead of a proper result. The builtin entry handles
   7918   // this by performing a garbage collection and retrying the
   7919   // builtin once.
   7920 
   7921   // Enter the exit frame that transitions from JavaScript to C++.
   7922   __ EnterExitFrame(mode_, result_size_);
   7923 
   7924   // rax: Holds the context at this point, but should not be used.
   7925   //      On entry to code generated by GenerateCore, it must hold
   7926   //      a failure result if the collect_garbage argument to GenerateCore
   7927   //      is true.  This failure result can be the result of code
   7928   //      generated by a previous call to GenerateCore.  The value
   7929   //      of rax is then passed to Runtime::PerformGC.
   7930   // rbx: pointer to builtin function  (C callee-saved).
   7931   // rbp: frame pointer of exit frame  (restored after C call).
   7932   // rsp: stack pointer (restored after C call).
   7933   // r14: number of arguments including receiver (C callee-saved).
   7934   // r15: argv pointer (C callee-saved).
   7935 
   7936   Label throw_normal_exception;
   7937   Label throw_termination_exception;
   7938   Label throw_out_of_memory_exception;
   7939 
   7940   // Call into the runtime system.
   7941   GenerateCore(masm,
   7942                &throw_normal_exception,
   7943                &throw_termination_exception,
   7944                &throw_out_of_memory_exception,
   7945                false,
   7946                false);
   7947 
   7948   // Do space-specific GC and retry runtime call.
   7949   GenerateCore(masm,
   7950                &throw_normal_exception,
   7951                &throw_termination_exception,
   7952                &throw_out_of_memory_exception,
   7953                true,
   7954                false);
   7955 
   7956   // Do full GC and retry runtime call one final time.
   7957   Failure* failure = Failure::InternalError();
   7958   __ movq(rax, failure, RelocInfo::NONE);
   7959   GenerateCore(masm,
   7960                &throw_normal_exception,
   7961                &throw_termination_exception,
   7962                &throw_out_of_memory_exception,
   7963                true,
   7964                true);
   7965 
   7966   __ bind(&throw_out_of_memory_exception);
   7967   GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
   7968 
   7969   __ bind(&throw_termination_exception);
   7970   GenerateThrowUncatchable(masm, TERMINATION);
   7971 
   7972   __ bind(&throw_normal_exception);
   7973   GenerateThrowTOS(masm);
   7974 }
   7975 
   7976 
   7977 void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
   7978   UNREACHABLE();
   7979 }
   7980 
   7981 
   7982 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   7983   Label invoke, exit;
   7984 #ifdef ENABLE_LOGGING_AND_PROFILING
   7985   Label not_outermost_js, not_outermost_js_2;
   7986 #endif
   7987 
   7988   // Setup frame.
   7989   __ push(rbp);
   7990   __ movq(rbp, rsp);
   7991 
   7992   // Push the stack frame type marker twice.
   7993   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
   7994   __ Push(Smi::FromInt(marker));  // context slot
   7995   __ Push(Smi::FromInt(marker));  // function slot
   7996   // Save callee-saved registers (X64 calling conventions).
   7997   __ push(r12);
   7998   __ push(r13);
   7999   __ push(r14);
   8000   __ push(r15);
   8001   __ push(rdi);
   8002   __ push(rsi);
   8003   __ push(rbx);
   8004   // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
   8005   // callee-save in JS code as well.
   8006 
   8007   // Save copies of the top frame descriptor on the stack.
   8008   ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
   8009   __ load_rax(c_entry_fp);
   8010   __ push(rax);
   8011 
   8012 #ifdef ENABLE_LOGGING_AND_PROFILING
   8013   // If this is the outermost JS call, set js_entry_sp value.
   8014   ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
   8015   __ load_rax(js_entry_sp);
   8016   __ testq(rax, rax);
   8017   __ j(not_zero, &not_outermost_js);
   8018   __ movq(rax, rbp);
   8019   __ store_rax(js_entry_sp);
   8020   __ bind(&not_outermost_js);
   8021 #endif
   8022 
   8023   // Call a faked try-block that does the invoke.
   8024   __ call(&invoke);
   8025 
   8026   // Caught exception: Store result (exception) in the pending
   8027   // exception field in the JSEnv and return a failure sentinel.
   8028   ExternalReference pending_exception(Top::k_pending_exception_address);
   8029   __ store_rax(pending_exception);
   8030   __ movq(rax, Failure::Exception(), RelocInfo::NONE);
   8031   __ jmp(&exit);
   8032 
   8033   // Invoke: Link this frame into the handler chain.
   8034   __ bind(&invoke);
   8035   __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
   8036 
   8037   // Clear any pending exceptions.
   8038   __ load_rax(ExternalReference::the_hole_value_location());
   8039   __ store_rax(pending_exception);
   8040 
   8041   // Fake a receiver (NULL).
   8042   __ push(Immediate(0));  // receiver
   8043 
   8044   // Invoke the function by calling through JS entry trampoline
   8045   // builtin and pop the faked function when we return. We load the address
   8046   // from an external reference instead of inlining the call target address
   8047   // directly in the code, because the builtin stubs may not have been
   8048   // generated yet at the time this code is generated.
   8049   if (is_construct) {
   8050     ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
   8051     __ load_rax(construct_entry);
   8052   } else {
   8053     ExternalReference entry(Builtins::JSEntryTrampoline);
   8054     __ load_rax(entry);
   8055   }
   8056   __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
   8057   __ call(kScratchRegister);
   8058 
   8059   // Unlink this frame from the handler chain.
   8060   __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
   8061   __ pop(Operand(kScratchRegister, 0));
   8062   // Pop next_sp.
   8063   __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
   8064 
   8065 #ifdef ENABLE_LOGGING_AND_PROFILING
   8066   // If current EBP value is the same as js_entry_sp value, it means that
   8067   // the current function is the outermost.
   8068   __ movq(kScratchRegister, js_entry_sp);
   8069   __ cmpq(rbp, Operand(kScratchRegister, 0));
   8070   __ j(not_equal, &not_outermost_js_2);
   8071   __ movq(Operand(kScratchRegister, 0), Immediate(0));
   8072   __ bind(&not_outermost_js_2);
   8073 #endif
   8074 
   8075   // Restore the top frame descriptor from the stack.
   8076   __ bind(&exit);
   8077   __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
   8078   __ pop(Operand(kScratchRegister, 0));
   8079 
   8080   // Restore callee-saved registers (X64 conventions).
   8081   __ pop(rbx);
   8082   __ pop(rsi);
   8083   __ pop(rdi);
   8084   __ pop(r15);
   8085   __ pop(r14);
   8086   __ pop(r13);
   8087   __ pop(r12);
   8088   __ addq(rsp, Immediate(2 * kPointerSize));  // remove markers
   8089 
   8090   // Restore frame pointer and return.
   8091   __ pop(rbp);
   8092   __ ret(0);
   8093 }
   8094 
   8095 
   8096 // -----------------------------------------------------------------------------
   8097 // Implementation of stubs.
   8098 
   8099 //  Stub classes have public member named masm, not masm_.
   8100 
   8101 void StackCheckStub::Generate(MacroAssembler* masm) {
   8102   // Because builtins always remove the receiver from the stack, we
   8103   // have to fake one to avoid underflowing the stack. The receiver
   8104   // must be inserted below the return address on the stack so we
   8105   // temporarily store that in a register.
   8106   __ pop(rax);
   8107   __ Push(Smi::FromInt(0));
   8108   __ push(rax);
   8109 
   8110   // Do tail-call to runtime routine.
   8111   Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
   8112   __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
   8113 }
   8114 
   8115 
   8116 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
   8117                                            Register number) {
   8118   Label load_smi, done;
   8119 
   8120   __ JumpIfSmi(number, &load_smi);
   8121   __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
   8122   __ jmp(&done);
   8123 
   8124   __ bind(&load_smi);
   8125   __ SmiToInteger32(number, number);
   8126   __ push(number);
   8127   __ fild_s(Operand(rsp, 0));
   8128   __ pop(number);
   8129 
   8130   __ bind(&done);
   8131 }
   8132 
   8133 
   8134 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
   8135                                            Register src,
   8136                                            XMMRegister dst) {
   8137   Label load_smi, done;
   8138 
   8139   __ JumpIfSmi(src, &load_smi);
   8140   __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
   8141   __ jmp(&done);
   8142 
   8143   __ bind(&load_smi);
   8144   __ SmiToInteger32(src, src);
   8145   __ cvtlsi2sd(dst, src);
   8146 
   8147   __ bind(&done);
   8148 }
   8149 
   8150 
   8151 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
   8152                                             XMMRegister dst1,
   8153                                             XMMRegister dst2) {
   8154   __ movq(kScratchRegister, rdx);
   8155   LoadFloatOperand(masm, kScratchRegister, dst1);
   8156   __ movq(kScratchRegister, rax);
   8157   LoadFloatOperand(masm, kScratchRegister, dst2);
   8158 }
   8159 
   8160 
   8161 void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
   8162                                                     XMMRegister dst1,
   8163                                                     XMMRegister dst2) {
   8164   __ SmiToInteger32(kScratchRegister, rdx);
   8165   __ cvtlsi2sd(dst1, kScratchRegister);
   8166   __ SmiToInteger32(kScratchRegister, rax);
   8167   __ cvtlsi2sd(dst2, kScratchRegister);
   8168 }
   8169 
   8170 
   8171 // Input: rdx, rax are the left and right objects of a bit op.
   8172 // Output: rax, rcx are left and right integers for a bit op.
   8173 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
   8174                                          bool use_sse3,
   8175                                          Label* conversion_failure) {
   8176   // Check float operands.
   8177   Label arg1_is_object, check_undefined_arg1;
   8178   Label arg2_is_object, check_undefined_arg2;
   8179   Label load_arg2, done;
   8180 
   8181   __ JumpIfNotSmi(rdx, &arg1_is_object);
   8182   __ SmiToInteger32(rdx, rdx);
   8183   __ jmp(&load_arg2);
   8184 
   8185   // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
   8186   __ bind(&check_undefined_arg1);
   8187   __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
   8188   __ j(not_equal, conversion_failure);
   8189   __ movl(rdx, Immediate(0));
   8190   __ jmp(&load_arg2);
   8191 
   8192   __ bind(&arg1_is_object);
   8193   __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
   8194   __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
   8195   __ j(not_equal, &check_undefined_arg1);
   8196   // Get the untagged integer version of the edx heap number in rcx.
   8197   IntegerConvert(masm, rdx, use_sse3, conversion_failure);
   8198   __ movl(rdx, rcx);
   8199 
   8200   // Here edx has the untagged integer, eax has a Smi or a heap number.
   8201   __ bind(&load_arg2);
   8202   // Test if arg2 is a Smi.
   8203   __ JumpIfNotSmi(rax, &arg2_is_object);
   8204   __ SmiToInteger32(rax, rax);
   8205   __ movl(rcx, rax);
   8206   __ jmp(&done);
   8207 
   8208   // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
   8209   __ bind(&check_undefined_arg2);
   8210   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   8211   __ j(not_equal, conversion_failure);
   8212   __ movl(rcx, Immediate(0));
   8213   __ jmp(&done);
   8214 
   8215   __ bind(&arg2_is_object);
   8216   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   8217   __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
   8218   __ j(not_equal, &check_undefined_arg2);
   8219   // Get the untagged integer version of the eax heap number in ecx.
   8220   IntegerConvert(masm, rax, use_sse3, conversion_failure);
   8221   __ bind(&done);
   8222   __ movl(rax, rdx);
   8223 }
   8224 
   8225 
   8226 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
   8227                                             Register lhs,
   8228                                             Register rhs) {
   8229   Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
   8230   __ JumpIfSmi(lhs, &load_smi_lhs);
   8231   __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
   8232   __ bind(&done_load_lhs);
   8233 
   8234   __ JumpIfSmi(rhs, &load_smi_rhs);
   8235   __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
   8236   __ jmp(&done);
   8237 
   8238   __ bind(&load_smi_lhs);
   8239   __ SmiToInteger64(kScratchRegister, lhs);
   8240   __ push(kScratchRegister);
   8241   __ fild_d(Operand(rsp, 0));
   8242   __ pop(kScratchRegister);
   8243   __ jmp(&done_load_lhs);
   8244 
   8245   __ bind(&load_smi_rhs);
   8246   __ SmiToInteger64(kScratchRegister, rhs);
   8247   __ push(kScratchRegister);
   8248   __ fild_d(Operand(rsp, 0));
   8249   __ pop(kScratchRegister);
   8250 
   8251   __ bind(&done);
   8252 }
   8253 
   8254 
   8255 void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
   8256                                               Label* non_float) {
   8257   Label test_other, done;
   8258   // Test if both operands are numbers (heap_numbers or smis).
   8259   // If not, jump to label non_float.
   8260   __ JumpIfSmi(rdx, &test_other);  // argument in rdx is OK
   8261   __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
   8262   __ j(not_equal, non_float);  // The argument in rdx is not a number.
   8263 
   8264   __ bind(&test_other);
   8265   __ JumpIfSmi(rax, &done);  // argument in rax is OK
   8266   __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
   8267   __ j(not_equal, non_float);  // The argument in rax is not a number.
   8268 
   8269   // Fall-through: Both operands are numbers.
   8270   __ bind(&done);
   8271 }
   8272 
   8273 
   8274 const char* GenericBinaryOpStub::GetName() {
   8275   if (name_ != NULL) return name_;
   8276   const int len = 100;
   8277   name_ = Bootstrapper::AllocateAutoDeletedArray(len);
   8278   if (name_ == NULL) return "OOM";
   8279   const char* op_name = Token::Name(op_);
   8280   const char* overwrite_name;
   8281   switch (mode_) {
   8282     case NO_OVERWRITE: overwrite_name = "Alloc"; break;
   8283     case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
   8284     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
   8285     default: overwrite_name = "UnknownOverwrite"; break;
   8286   }
   8287 
   8288   OS::SNPrintF(Vector<char>(name_, len),
   8289                "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s",
   8290                op_name,
   8291                overwrite_name,
   8292                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
   8293                args_in_registers_ ? "RegArgs" : "StackArgs",
   8294                args_reversed_ ? "_R" : "",
   8295                use_sse3_ ? "SSE3" : "SSE2",
   8296                NumberInfo::ToString(operands_type_));
   8297   return name_;
   8298 }
   8299 
   8300 
   8301 void GenericBinaryOpStub::GenerateCall(
   8302     MacroAssembler* masm,
   8303     Register left,
   8304     Register right) {
   8305   if (!ArgsInRegistersSupported()) {
   8306     // Pass arguments on the stack.
   8307     __ push(left);
   8308     __ push(right);
   8309   } else {
   8310     // The calling convention with registers is left in rdx and right in rax.
   8311     Register left_arg = rdx;
   8312     Register right_arg = rax;
   8313     if (!(left.is(left_arg) && right.is(right_arg))) {
   8314       if (left.is(right_arg) && right.is(left_arg)) {
   8315         if (IsOperationCommutative()) {
   8316           SetArgsReversed();
   8317         } else {
   8318           __ xchg(left, right);
   8319         }
   8320       } else if (left.is(left_arg)) {
   8321         __ movq(right_arg, right);
   8322       } else if (right.is(right_arg)) {
   8323         __ movq(left_arg, left);
   8324       } else if (left.is(right_arg)) {
   8325         if (IsOperationCommutative()) {
   8326           __ movq(left_arg, right);
   8327           SetArgsReversed();
   8328         } else {
   8329           // Order of moves important to avoid destroying left argument.
   8330           __ movq(left_arg, left);
   8331           __ movq(right_arg, right);
   8332         }
   8333       } else if (right.is(left_arg)) {
   8334         if (IsOperationCommutative()) {
   8335           __ movq(right_arg, left);
   8336           SetArgsReversed();
   8337         } else {
   8338           // Order of moves important to avoid destroying right argument.
   8339           __ movq(right_arg, right);
   8340           __ movq(left_arg, left);
   8341         }
   8342       } else {
   8343         // Order of moves is not important.
   8344         __ movq(left_arg, left);
   8345         __ movq(right_arg, right);
   8346       }
   8347     }
   8348 
   8349     // Update flags to indicate that arguments are in registers.
   8350     SetArgsInRegisters();
   8351     __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
   8352   }
   8353 
   8354   // Call the stub.
   8355   __ CallStub(this);
   8356 }
   8357 
   8358 
   8359 void GenericBinaryOpStub::GenerateCall(
   8360     MacroAssembler* masm,
   8361     Register left,
   8362     Smi* right) {
   8363   if (!ArgsInRegistersSupported()) {
   8364     // Pass arguments on the stack.
   8365     __ push(left);
   8366     __ Push(right);
   8367   } else {
   8368     // The calling convention with registers is left in rdx and right in rax.
   8369     Register left_arg = rdx;
   8370     Register right_arg = rax;
   8371     if (left.is(left_arg)) {
   8372       __ Move(right_arg, right);
   8373     } else if (left.is(right_arg) && IsOperationCommutative()) {
   8374       __ Move(left_arg, right);
   8375       SetArgsReversed();
   8376     } else {
   8377       // For non-commutative operations, left and right_arg might be
   8378       // the same register.  Therefore, the order of the moves is
   8379       // important here in order to not overwrite left before moving
   8380       // it to left_arg.
   8381       __ movq(left_arg, left);
   8382       __ Move(right_arg, right);
   8383     }
   8384 
   8385     // Update flags to indicate that arguments are in registers.
   8386     SetArgsInRegisters();
   8387     __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
   8388   }
   8389 
   8390   // Call the stub.
   8391   __ CallStub(this);
   8392 }
   8393 
   8394 
   8395 void GenericBinaryOpStub::GenerateCall(
   8396     MacroAssembler* masm,
   8397     Smi* left,
   8398     Register right) {
   8399   if (!ArgsInRegistersSupported()) {
   8400     // Pass arguments on the stack.
   8401     __ Push(left);
   8402     __ push(right);
   8403   } else {
   8404     // The calling convention with registers is left in rdx and right in rax.
   8405     Register left_arg = rdx;
   8406     Register right_arg = rax;
   8407     if (right.is(right_arg)) {
   8408       __ Move(left_arg, left);
   8409     } else if (right.is(left_arg) && IsOperationCommutative()) {
   8410       __ Move(right_arg, left);
   8411       SetArgsReversed();
   8412     } else {
   8413       // For non-commutative operations, right and left_arg might be
   8414       // the same register.  Therefore, the order of the moves is
   8415       // important here in order to not overwrite right before moving
   8416       // it to right_arg.
   8417       __ movq(right_arg, right);
   8418       __ Move(left_arg, left);
   8419     }
   8420     // Update flags to indicate that arguments are in registers.
   8421     SetArgsInRegisters();
   8422     __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
   8423   }
   8424 
   8425   // Call the stub.
   8426   __ CallStub(this);
   8427 }
   8428 
   8429 
   8430 Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
   8431                                          VirtualFrame* frame,
   8432                                          Result* left,
   8433                                          Result* right) {
   8434   if (ArgsInRegistersSupported()) {
   8435     SetArgsInRegisters();
   8436     return frame->CallStub(this, left, right);
   8437   } else {
   8438     frame->Push(left);
   8439     frame->Push(right);
   8440     return frame->CallStub(this, 2);
   8441   }
   8442 }
   8443 
   8444 
   8445 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
   8446   // 1. Move arguments into edx, eax except for DIV and MOD, which need the
   8447   // dividend in eax and edx free for the division.  Use eax, ebx for those.
   8448   Comment load_comment(masm, "-- Load arguments");
   8449   Register left = rdx;
   8450   Register right = rax;
   8451   if (op_ == Token::DIV || op_ == Token::MOD) {
   8452     left = rax;
   8453     right = rbx;
   8454     if (HasArgsInRegisters()) {
   8455       __ movq(rbx, rax);
   8456       __ movq(rax, rdx);
   8457     }
   8458   }
   8459   if (!HasArgsInRegisters()) {
   8460     __ movq(right, Operand(rsp, 1 * kPointerSize));
   8461     __ movq(left, Operand(rsp, 2 * kPointerSize));
   8462   }
   8463 
   8464   // 2. Smi check both operands. Skip the check for OR as it is better combined
   8465   // with the actual operation.
   8466   Label not_smis;
   8467   if (op_ != Token::BIT_OR) {
   8468     Comment smi_check_comment(masm, "-- Smi check arguments");
   8469     __ JumpIfNotBothSmi(left, right, &not_smis);
   8470   }
   8471 
   8472   // 3. Operands are both smis (except for OR), perform the operation leaving
   8473   // the result in rax and check the result if necessary.
   8474   Comment perform_smi(masm, "-- Perform smi operation");
   8475   Label use_fp_on_smis;
   8476   switch (op_) {
   8477     case Token::ADD: {
   8478       ASSERT(right.is(rax));
   8479       __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
   8480       break;
   8481     }
   8482 
   8483     case Token::SUB: {
   8484       __ SmiSub(left, left, right, &use_fp_on_smis);
   8485       __ movq(rax, left);
   8486       break;
   8487     }
   8488 
   8489     case Token::MUL:
   8490       ASSERT(right.is(rax));
   8491       __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
   8492       break;
   8493 
   8494     case Token::DIV:
   8495       ASSERT(left.is(rax));
   8496       __ SmiDiv(left, left, right, &use_fp_on_smis);
   8497       break;
   8498 
   8499     case Token::MOD:
   8500       ASSERT(left.is(rax));
   8501       __ SmiMod(left, left, right, slow);
   8502       break;
   8503 
   8504     case Token::BIT_OR:
   8505       ASSERT(right.is(rax));
   8506       __ movq(rcx, right);  // Save the right operand.
   8507       __ SmiOr(right, right, left);  // BIT_OR is commutative.
   8508       __ testb(right, Immediate(kSmiTagMask));
   8509       __ j(not_zero, &not_smis);
   8510       break;
   8511 
   8512     case Token::BIT_AND:
   8513       ASSERT(right.is(rax));
   8514       __ SmiAnd(right, right, left);  // BIT_AND is commutative.
   8515       break;
   8516 
   8517     case Token::BIT_XOR:
   8518       ASSERT(right.is(rax));
   8519       __ SmiXor(right, right, left);  // BIT_XOR is commutative.
   8520       break;
   8521 
   8522     case Token::SHL:
   8523     case Token::SHR:
   8524     case Token::SAR:
   8525       switch (op_) {
   8526         case Token::SAR:
   8527           __ SmiShiftArithmeticRight(left, left, right);
   8528           break;
   8529         case Token::SHR:
   8530           __ SmiShiftLogicalRight(left, left, right, slow);
   8531           break;
   8532         case Token::SHL:
   8533           __ SmiShiftLeft(left, left, right, slow);
   8534           break;
   8535         default:
   8536           UNREACHABLE();
   8537       }
   8538       __ movq(rax, left);
   8539       break;
   8540 
   8541     default:
   8542       UNREACHABLE();
   8543       break;
   8544   }
   8545 
   8546   // 4. Emit return of result in eax.
   8547   GenerateReturn(masm);
   8548 
   8549   // 5. For some operations emit inline code to perform floating point
   8550   // operations on known smis (e.g., if the result of the operation
   8551   // overflowed the smi range).
   8552   switch (op_) {
   8553     case Token::ADD:
   8554     case Token::SUB:
   8555     case Token::MUL:
   8556     case Token::DIV: {
   8557       __ bind(&use_fp_on_smis);
   8558       if (op_ == Token::DIV) {
   8559         __ movq(rdx, rax);
   8560         __ movq(rax, rbx);
   8561       }
   8562       // left is rdx, right is rax.
   8563       __ AllocateHeapNumber(rbx, rcx, slow);
   8564       FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5);
   8565       switch (op_) {
   8566         case Token::ADD: __ addsd(xmm4, xmm5); break;
   8567         case Token::SUB: __ subsd(xmm4, xmm5); break;
   8568         case Token::MUL: __ mulsd(xmm4, xmm5); break;
   8569         case Token::DIV: __ divsd(xmm4, xmm5); break;
   8570         default: UNREACHABLE();
   8571       }
   8572       __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4);
   8573       __ movq(rax, rbx);
   8574       GenerateReturn(masm);
   8575     }
   8576     default:
   8577       break;
   8578   }
   8579 
   8580   // 6. Non-smi operands, fall out to the non-smi code with the operands in
   8581   // rdx and rax.
   8582   Comment done_comment(masm, "-- Enter non-smi code");
   8583   __ bind(&not_smis);
   8584 
   8585   switch (op_) {
   8586     case Token::DIV:
   8587     case Token::MOD:
   8588       // Operands are in rax, rbx at this point.
   8589       __ movq(rdx, rax);
   8590       __ movq(rax, rbx);
   8591       break;
   8592 
   8593     case Token::BIT_OR:
   8594       // Right operand is saved in rcx and rax was destroyed by the smi
   8595       // operation.
   8596       __ movq(rax, rcx);
   8597       break;
   8598 
   8599     default:
   8600       break;
   8601   }
   8602 }
   8603 
   8604 
   8605 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   8606   Label call_runtime;
   8607   if (HasSmiCodeInStub()) {
   8608     GenerateSmiCode(masm, &call_runtime);
   8609   } else if (op_ != Token::MOD) {
   8610     GenerateLoadArguments(masm);
   8611   }
   8612   // Floating point case.
   8613   switch (op_) {
   8614     case Token::ADD:
   8615     case Token::SUB:
   8616     case Token::MUL:
   8617     case Token::DIV: {
   8618       // rax: y
   8619       // rdx: x
   8620       if (NumberInfo::IsNumber(operands_type_)) {
   8621         if (FLAG_debug_code) {
   8622           // Assert at runtime that inputs are only numbers.
   8623           __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
   8624           __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
   8625         }
   8626       } else {
   8627         FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
   8628       }
   8629       // Fast-case: Both operands are numbers.
   8630       // xmm4 and xmm5 are volatile XMM registers.
   8631       FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
   8632 
   8633       switch (op_) {
   8634         case Token::ADD: __ addsd(xmm4, xmm5); break;
   8635         case Token::SUB: __ subsd(xmm4, xmm5); break;
   8636         case Token::MUL: __ mulsd(xmm4, xmm5); break;
   8637         case Token::DIV: __ divsd(xmm4, xmm5); break;
   8638         default: UNREACHABLE();
   8639       }
   8640       // Allocate a heap number, if needed.
   8641       Label skip_allocation;
   8642       OverwriteMode mode = mode_;
   8643       if (HasArgsReversed()) {
   8644         if (mode == OVERWRITE_RIGHT) {
   8645           mode = OVERWRITE_LEFT;
   8646         } else if (mode == OVERWRITE_LEFT) {
   8647           mode = OVERWRITE_RIGHT;
   8648         }
   8649       }
   8650       switch (mode) {
   8651         case OVERWRITE_LEFT:
   8652           __ JumpIfNotSmi(rdx, &skip_allocation);
   8653           __ AllocateHeapNumber(rbx, rcx, &call_runtime);
   8654           __ movq(rdx, rbx);
   8655           __ bind(&skip_allocation);
   8656           __ movq(rax, rdx);
   8657           break;
   8658         case OVERWRITE_RIGHT:
   8659           // If the argument in rax is already an object, we skip the
   8660           // allocation of a heap number.
   8661           __ JumpIfNotSmi(rax, &skip_allocation);
   8662           // Fall through!
   8663         case NO_OVERWRITE:
   8664           // Allocate a heap number for the result. Keep rax and rdx intact
   8665           // for the possible runtime call.
   8666           __ AllocateHeapNumber(rbx, rcx, &call_runtime);
   8667           __ movq(rax, rbx);
   8668           __ bind(&skip_allocation);
   8669           break;
   8670         default: UNREACHABLE();
   8671       }
   8672       __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
   8673       GenerateReturn(masm);
   8674     }
   8675     case Token::MOD: {
   8676       // For MOD we go directly to runtime in the non-smi case.
   8677       break;
   8678     }
   8679     case Token::BIT_OR:
   8680     case Token::BIT_AND:
   8681     case Token::BIT_XOR:
   8682     case Token::SAR:
   8683     case Token::SHL:
   8684     case Token::SHR: {
   8685       Label skip_allocation, non_smi_result;
   8686       FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
   8687       switch (op_) {
   8688         case Token::BIT_OR:  __ orl(rax, rcx); break;
   8689         case Token::BIT_AND: __ andl(rax, rcx); break;
   8690         case Token::BIT_XOR: __ xorl(rax, rcx); break;
   8691         case Token::SAR: __ sarl_cl(rax); break;
   8692         case Token::SHL: __ shll_cl(rax); break;
   8693         case Token::SHR: __ shrl_cl(rax); break;
   8694         default: UNREACHABLE();
   8695       }
   8696       if (op_ == Token::SHR) {
   8697         // Check if result is non-negative. This can only happen for a shift
   8698         // by zero, which also doesn't update the sign flag.
   8699         __ testl(rax, rax);
   8700         __ j(negative, &non_smi_result);
   8701       }
   8702       __ JumpIfNotValidSmiValue(rax, &non_smi_result);
   8703       // Tag smi result, if possible, and return.
   8704       __ Integer32ToSmi(rax, rax);
   8705       GenerateReturn(masm);
   8706 
   8707       // All ops except SHR return a signed int32 that we load in a HeapNumber.
   8708       if (op_ != Token::SHR && non_smi_result.is_linked()) {
   8709         __ bind(&non_smi_result);
   8710         // Allocate a heap number if needed.
   8711         __ movsxlq(rbx, rax);  // rbx: sign extended 32-bit result
   8712         switch (mode_) {
   8713           case OVERWRITE_LEFT:
   8714           case OVERWRITE_RIGHT:
   8715             // If the operand was an object, we skip the
   8716             // allocation of a heap number.
   8717             __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
   8718                                  1 * kPointerSize : 2 * kPointerSize));
   8719             __ JumpIfNotSmi(rax, &skip_allocation);
   8720             // Fall through!
   8721           case NO_OVERWRITE:
   8722             __ AllocateHeapNumber(rax, rcx, &call_runtime);
   8723             __ bind(&skip_allocation);
   8724             break;
   8725           default: UNREACHABLE();
   8726         }
   8727         // Store the result in the HeapNumber and return.
   8728         __ movq(Operand(rsp, 1 * kPointerSize), rbx);
   8729         __ fild_s(Operand(rsp, 1 * kPointerSize));
   8730         __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
   8731         GenerateReturn(masm);
   8732       }
   8733 
   8734       // SHR should return uint32 - go to runtime for non-smi/negative result.
   8735       if (op_ == Token::SHR) {
   8736         __ bind(&non_smi_result);
   8737       }
   8738       break;
   8739     }
   8740     default: UNREACHABLE(); break;
   8741   }
   8742 
   8743   // If all else fails, use the runtime system to get the correct
   8744   // result. If arguments was passed in registers now place them on the
   8745   // stack in the correct order below the return address.
   8746   __ bind(&call_runtime);
   8747   if (HasArgsInRegisters()) {
   8748     __ pop(rcx);
   8749     if (HasArgsReversed()) {
   8750       __ push(rax);
   8751       __ push(rdx);
   8752     } else {
   8753       __ push(rdx);
   8754       __ push(rax);
   8755     }
   8756     __ push(rcx);
   8757   }
   8758   switch (op_) {
   8759     case Token::ADD: {
   8760       // Test for string arguments before calling runtime.
   8761       Label not_strings, both_strings, not_string1, string1;
   8762       Condition is_smi;
   8763       Result answer;
   8764       is_smi = masm->CheckSmi(rdx);
   8765       __ j(is_smi, &not_string1);
   8766       __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
   8767       __ j(above_equal, &not_string1);
   8768 
   8769       // First argument is a a string, test second.
   8770       is_smi = masm->CheckSmi(rax);
   8771       __ j(is_smi, &string1);
   8772       __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
   8773       __ j(above_equal, &string1);
   8774 
   8775       // First and second argument are strings.
   8776       StringAddStub stub(NO_STRING_CHECK_IN_STUB);
   8777       __ TailCallStub(&stub);
   8778 
   8779       // Only first argument is a string.
   8780       __ bind(&string1);
   8781       __ InvokeBuiltin(
   8782           HasArgsReversed() ?
   8783               Builtins::STRING_ADD_RIGHT :
   8784               Builtins::STRING_ADD_LEFT,
   8785           JUMP_FUNCTION);
   8786 
   8787       // First argument was not a string, test second.
   8788       __ bind(&not_string1);
   8789       is_smi = masm->CheckSmi(rax);
   8790       __ j(is_smi, &not_strings);
   8791       __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
   8792       __ j(above_equal, &not_strings);
   8793 
   8794       // Only second argument is a string.
   8795       __ InvokeBuiltin(
   8796           HasArgsReversed() ?
   8797               Builtins::STRING_ADD_LEFT :
   8798               Builtins::STRING_ADD_RIGHT,
   8799           JUMP_FUNCTION);
   8800 
   8801       __ bind(&not_strings);
   8802       // Neither argument is a string.
   8803       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
   8804       break;
   8805     }
   8806     case Token::SUB:
   8807       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
   8808       break;
   8809     case Token::MUL:
   8810       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
   8811       break;
   8812     case Token::DIV:
   8813       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
   8814       break;
   8815     case Token::MOD:
   8816       __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
   8817       break;
   8818     case Token::BIT_OR:
   8819       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
   8820       break;
   8821     case Token::BIT_AND:
   8822       __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
   8823       break;
   8824     case Token::BIT_XOR:
   8825       __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
   8826       break;
   8827     case Token::SAR:
   8828       __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
   8829       break;
   8830     case Token::SHL:
   8831       __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
   8832       break;
   8833     case Token::SHR:
   8834       __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
   8835       break;
   8836     default:
   8837       UNREACHABLE();
   8838   }
   8839 }
   8840 
   8841 
   8842 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
   8843   // If arguments are not passed in registers read them from the stack.
   8844   if (!HasArgsInRegisters()) {
   8845     __ movq(rax, Operand(rsp, 1 * kPointerSize));
   8846     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
   8847   }
   8848 }
   8849 
   8850 
   8851 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
   8852   // If arguments are not passed in registers remove them from the stack before
   8853   // returning.
   8854   if (!HasArgsInRegisters()) {
   8855     __ ret(2 * kPointerSize);  // Remove both operands
   8856   } else {
   8857     __ ret(0);
   8858   }
   8859 }
   8860 
   8861 
   8862 int CompareStub::MinorKey() {
   8863   // Encode the three parameters in a unique 16 bit value.
   8864   ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
   8865   int nnn_value = (never_nan_nan_ ? 2 : 0);
   8866   if (cc_ != equal) nnn_value = 0;  // Avoid duplicate stubs.
   8867   return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
   8868 }
   8869 
   8870 
   8871 const char* CompareStub::GetName() {
   8872   switch (cc_) {
   8873     case less: return "CompareStub_LT";
   8874     case greater: return "CompareStub_GT";
   8875     case less_equal: return "CompareStub_LE";
   8876     case greater_equal: return "CompareStub_GE";
   8877     case not_equal: {
   8878       if (strict_) {
   8879         if (never_nan_nan_) {
   8880           return "CompareStub_NE_STRICT_NO_NAN";
   8881         } else {
   8882           return "CompareStub_NE_STRICT";
   8883         }
   8884       } else {
   8885         if (never_nan_nan_) {
   8886           return "CompareStub_NE_NO_NAN";
   8887         } else {
   8888           return "CompareStub_NE";
   8889         }
   8890       }
   8891     }
   8892     case equal: {
   8893       if (strict_) {
   8894         if (never_nan_nan_) {
   8895           return "CompareStub_EQ_STRICT_NO_NAN";
   8896         } else {
   8897           return "CompareStub_EQ_STRICT";
   8898         }
   8899       } else {
   8900         if (never_nan_nan_) {
   8901           return "CompareStub_EQ_NO_NAN";
   8902         } else {
   8903           return "CompareStub_EQ";
   8904         }
   8905       }
   8906     }
   8907     default: return "CompareStub";
   8908   }
   8909 }
   8910 
   8911 
   8912 void StringAddStub::Generate(MacroAssembler* masm) {
   8913   Label string_add_runtime;
   8914 
   8915   // Load the two arguments.
   8916   __ movq(rax, Operand(rsp, 2 * kPointerSize));  // First argument.
   8917   __ movq(rdx, Operand(rsp, 1 * kPointerSize));  // Second argument.
   8918 
   8919   // Make sure that both arguments are strings if not known in advance.
   8920   if (string_check_) {
   8921     Condition is_smi;
   8922     is_smi = masm->CheckSmi(rax);
   8923     __ j(is_smi, &string_add_runtime);
   8924     __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
   8925     __ j(above_equal, &string_add_runtime);
   8926 
   8927     // First argument is a a string, test second.
   8928     is_smi = masm->CheckSmi(rdx);
   8929     __ j(is_smi, &string_add_runtime);
   8930     __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
   8931     __ j(above_equal, &string_add_runtime);
   8932   }
   8933 
   8934   // Both arguments are strings.
   8935   // rax: first string
   8936   // rdx: second string
   8937   // Check if either of the strings are empty. In that case return the other.
   8938   Label second_not_zero_length, both_not_zero_length;
   8939   __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
   8940   __ testl(rcx, rcx);
   8941   __ j(not_zero, &second_not_zero_length);
   8942   // Second string is empty, result is first string which is already in rax.
   8943   __ IncrementCounter(&Counters::string_add_native, 1);
   8944   __ ret(2 * kPointerSize);
   8945   __ bind(&second_not_zero_length);
   8946   __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
   8947   __ testl(rbx, rbx);
   8948   __ j(not_zero, &both_not_zero_length);
   8949   // First string is empty, result is second string which is in rdx.
   8950   __ movq(rax, rdx);
   8951   __ IncrementCounter(&Counters::string_add_native, 1);
   8952   __ ret(2 * kPointerSize);
   8953 
   8954   // Both strings are non-empty.
   8955   // rax: first string
   8956   // rbx: length of first string
   8957   // rcx: length of second string
   8958   // rdx: second string
   8959   // r8: instance type of first string if string check was performed above
   8960   // r9: instance type of first string if string check was performed above
   8961   Label string_add_flat_result;
   8962   __ bind(&both_not_zero_length);
   8963   // Look at the length of the result of adding the two strings.
   8964   __ addl(rbx, rcx);
   8965   // Use the runtime system when adding two one character strings, as it
   8966   // contains optimizations for this specific case using the symbol table.
   8967   __ cmpl(rbx, Immediate(2));
   8968   __ j(equal, &string_add_runtime);
   8969   // If arguments where known to be strings, maps are not loaded to r8 and r9
   8970   // by the code above.
   8971   if (!string_check_) {
   8972     __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
   8973     __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
   8974   }
   8975   // Get the instance types of the two strings as they will be needed soon.
   8976   __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
   8977   __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
   8978   // Check if resulting string will be flat.
   8979   __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
   8980   __ j(below, &string_add_flat_result);
   8981   // Handle exceptionally long strings in the runtime system.
   8982   ASSERT((String::kMaxLength & 0x80000000) == 0);
   8983   __ cmpl(rbx, Immediate(String::kMaxLength));
   8984   __ j(above, &string_add_runtime);
   8985 
   8986   // If result is not supposed to be flat, allocate a cons string object. If
   8987   // both strings are ascii the result is an ascii cons string.
   8988   // rax: first string
   8989   // ebx: length of resulting flat string
   8990   // rdx: second string
   8991   // r8: instance type of first string
   8992   // r9: instance type of second string
   8993   Label non_ascii, allocated;
   8994   __ movl(rcx, r8);
   8995   __ and_(rcx, r9);
   8996   ASSERT(kStringEncodingMask == kAsciiStringTag);
   8997   __ testl(rcx, Immediate(kAsciiStringTag));
   8998   __ j(zero, &non_ascii);
   8999   // Allocate an acsii cons string.
   9000   __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
   9001   __ bind(&allocated);
   9002   // Fill the fields of the cons string.
   9003   __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
   9004   __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
   9005           Immediate(String::kEmptyHashField));
   9006   __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
   9007   __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
   9008   __ movq(rax, rcx);
   9009   __ IncrementCounter(&Counters::string_add_native, 1);
   9010   __ ret(2 * kPointerSize);
   9011   __ bind(&non_ascii);
   9012   // Allocate a two byte cons string.
   9013   __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
   9014   __ jmp(&allocated);
   9015 
   9016   // Handle creating a flat result. First check that both strings are not
   9017   // external strings.
   9018   // rax: first string
   9019   // ebx: length of resulting flat string
   9020   // rdx: second string
   9021   // r8: instance type of first string
   9022   // r9: instance type of first string
   9023   __ bind(&string_add_flat_result);
   9024   __ movl(rcx, r8);
   9025   __ and_(rcx, Immediate(kStringRepresentationMask));
   9026   __ cmpl(rcx, Immediate(kExternalStringTag));
   9027   __ j(equal, &string_add_runtime);
   9028   __ movl(rcx, r9);
   9029   __ and_(rcx, Immediate(kStringRepresentationMask));
   9030   __ cmpl(rcx, Immediate(kExternalStringTag));
   9031   __ j(equal, &string_add_runtime);
   9032   // Now check if both strings are ascii strings.
   9033   // rax: first string
   9034   // ebx: length of resulting flat string
   9035   // rdx: second string
   9036   // r8: instance type of first string
   9037   // r9: instance type of second string
   9038   Label non_ascii_string_add_flat_result;
   9039   ASSERT(kStringEncodingMask == kAsciiStringTag);
   9040   __ testl(r8, Immediate(kAsciiStringTag));
   9041   __ j(zero, &non_ascii_string_add_flat_result);
   9042   __ testl(r9, Immediate(kAsciiStringTag));
   9043   __ j(zero, &string_add_runtime);
   9044   // Both strings are ascii strings. As they are short they are both flat.
   9045   __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
   9046   // rcx: result string
   9047   __ movq(rbx, rcx);
   9048   // Locate first character of result.
   9049   __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   9050   // Locate first character of first argument
   9051   __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
   9052   __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   9053   // rax: first char of first argument
   9054   // rbx: result string
   9055   // rcx: first character of result
   9056   // rdx: second string
   9057   // rdi: length of first argument
   9058   GenerateCopyCharacters(masm, rcx, rax, rdi, true);
   9059   // Locate first character of second argument.
   9060   __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
   9061   __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   9062   // rbx: result string
   9063   // rcx: next character of result
   9064   // rdx: first char of second argument
   9065   // rdi: length of second argument
   9066   GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
   9067   __ movq(rax, rbx);
   9068   __ IncrementCounter(&Counters::string_add_native, 1);
   9069   __ ret(2 * kPointerSize);
   9070 
   9071   // Handle creating a flat two byte result.
   9072   // rax: first string - known to be two byte
   9073   // rbx: length of resulting flat string
   9074   // rdx: second string
   9075   // r8: instance type of first string
   9076   // r9: instance type of first string
   9077   __ bind(&non_ascii_string_add_flat_result);
   9078   __ and_(r9, Immediate(kAsciiStringTag));
   9079   __ j(not_zero, &string_add_runtime);
   9080   // Both strings are two byte strings. As they are short they are both
   9081   // flat.
   9082   __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
   9083   // rcx: result string
   9084   __ movq(rbx, rcx);
   9085   // Locate first character of result.
   9086   __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   9087   // Locate first character of first argument.
   9088   __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
   9089   __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   9090   // rax: first char of first argument
   9091   // rbx: result string
   9092   // rcx: first character of result
   9093   // rdx: second argument
   9094   // rdi: length of first argument
   9095   GenerateCopyCharacters(masm, rcx, rax, rdi, false);
   9096   // Locate first character of second argument.
   9097   __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
   9098   __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   9099   // rbx: result string
   9100   // rcx: next character of result
   9101   // rdx: first char of second argument
   9102   // rdi: length of second argument
   9103   GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
   9104   __ movq(rax, rbx);
   9105   __ IncrementCounter(&Counters::string_add_native, 1);
   9106   __ ret(2 * kPointerSize);
   9107 
   9108   // Just jump to runtime to add the two strings.
   9109   __ bind(&string_add_runtime);
   9110   __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
   9111 }
   9112 
   9113 
   9114 void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
   9115                                             Register dest,
   9116                                             Register src,
   9117                                             Register count,
   9118                                             bool ascii) {
   9119   Label loop;
   9120   __ bind(&loop);
   9121   // This loop just copies one character at a time, as it is only used for very
   9122   // short strings.
   9123   if (ascii) {
   9124     __ movb(kScratchRegister, Operand(src, 0));
   9125     __ movb(Operand(dest, 0), kScratchRegister);
   9126     __ addq(src, Immediate(1));
   9127     __ addq(dest, Immediate(1));
   9128   } else {
   9129     __ movzxwl(kScratchRegister, Operand(src, 0));
   9130     __ movw(Operand(dest, 0), kScratchRegister);
   9131     __ addq(src, Immediate(2));
   9132     __ addq(dest, Immediate(2));
   9133   }
   9134   __ subl(count, Immediate(1));
   9135   __ j(not_zero, &loop);
   9136 }
   9137 
   9138 
   9139 void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
   9140                                                Register dest,
   9141                                                Register src,
   9142                                                Register count,
   9143                                                bool ascii) {
   9144   // Copy characters using rep movs of doublewords. Align destination on 4 byte
   9145   // boundary before starting rep movs. Copy remaining characters after running
   9146   // rep movs.
   9147   ASSERT(dest.is(rdi));  // rep movs destination
   9148   ASSERT(src.is(rsi));  // rep movs source
   9149   ASSERT(count.is(rcx));  // rep movs count
   9150 
   9151   // Nothing to do for zero characters.
   9152   Label done;
   9153   __ testq(count, count);
   9154   __ j(zero, &done);
   9155 
   9156   // Make count the number of bytes to copy.
   9157   if (!ascii) {
   9158     ASSERT_EQ(2, sizeof(uc16));  // NOLINT
   9159     __ addq(count, count);
   9160   }
   9161 
   9162   // Don't enter the rep movs if there are less than 4 bytes to copy.
   9163   Label last_bytes;
   9164   __ testq(count, Immediate(~7));
   9165   __ j(zero, &last_bytes);
   9166 
   9167   // Copy from edi to esi using rep movs instruction.
   9168   __ movq(kScratchRegister, count);
   9169   __ sar(count, Immediate(3));  // Number of doublewords to copy.
   9170   __ repmovsq();
   9171 
   9172   // Find number of bytes left.
   9173   __ movq(count, kScratchRegister);
   9174   __ and_(count, Immediate(7));
   9175 
   9176   // Check if there are more bytes to copy.
   9177   __ bind(&last_bytes);
   9178   __ testq(count, count);
   9179   __ j(zero, &done);
   9180 
   9181   // Copy remaining characters.
   9182   Label loop;
   9183   __ bind(&loop);
   9184   __ movb(kScratchRegister, Operand(src, 0));
   9185   __ movb(Operand(dest, 0), kScratchRegister);
   9186   __ addq(src, Immediate(1));
   9187   __ addq(dest, Immediate(1));
   9188   __ subq(count, Immediate(1));
   9189   __ j(not_zero, &loop);
   9190 
   9191   __ bind(&done);
   9192 }
   9193 
   9194 
   9195 void SubStringStub::Generate(MacroAssembler* masm) {
   9196   Label runtime;
   9197 
   9198   // Stack frame on entry.
   9199   //  rsp[0]: return address
   9200   //  rsp[8]: to
   9201   //  rsp[16]: from
   9202   //  rsp[24]: string
   9203 
   9204   const int kToOffset = 1 * kPointerSize;
   9205   const int kFromOffset = kToOffset + kPointerSize;
   9206   const int kStringOffset = kFromOffset + kPointerSize;
   9207   const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
   9208 
   9209   // Make sure first argument is a string.
   9210   __ movq(rax, Operand(rsp, kStringOffset));
   9211   ASSERT_EQ(0, kSmiTag);
   9212   __ testl(rax, Immediate(kSmiTagMask));
   9213   __ j(zero, &runtime);
   9214   Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
   9215   __ j(NegateCondition(is_string), &runtime);
   9216 
   9217   // rax: string
   9218   // rbx: instance type
   9219   // Calculate length of sub string using the smi values.
   9220   __ movq(rcx, Operand(rsp, kToOffset));
   9221   __ movq(rdx, Operand(rsp, kFromOffset));
   9222   __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
   9223 
   9224   __ SmiSub(rcx, rcx, rdx, NULL);  // Overflow doesn't happen.
   9225   __ j(negative, &runtime);
   9226   // Handle sub-strings of length 2 and less in the runtime system.
   9227   __ SmiToInteger32(rcx, rcx);
   9228   __ cmpl(rcx, Immediate(2));
   9229   __ j(below_equal, &runtime);
   9230 
   9231   // rax: string
   9232   // rbx: instance type
   9233   // rcx: result string length
   9234   // Check for flat ascii string
   9235   Label non_ascii_flat;
   9236   __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
   9237   __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
   9238   __ j(not_equal, &non_ascii_flat);
   9239 
   9240   // Allocate the result.
   9241   __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
   9242 
   9243   // rax: result string
   9244   // rcx: result string length
   9245   __ movq(rdx, rsi);  // esi used by following code.
   9246   // Locate first character of result.
   9247   __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
   9248   // Load string argument and locate character of sub string start.
   9249   __ movq(rsi, Operand(rsp, kStringOffset));
   9250   __ movq(rbx, Operand(rsp, kFromOffset));
   9251   {
   9252     SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
   9253     __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
   9254                         SeqAsciiString::kHeaderSize - kHeapObjectTag));
   9255   }
   9256 
   9257   // rax: result string
   9258   // rcx: result length
   9259   // rdx: original value of rsi
   9260   // rdi: first character of result
   9261   // rsi: character of sub string start
   9262   GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
   9263   __ movq(rsi, rdx);  // Restore rsi.
   9264   __ IncrementCounter(&Counters::sub_string_native, 1);
   9265   __ ret(kArgumentsSize);
   9266 
   9267   __ bind(&non_ascii_flat);
   9268   // rax: string
   9269   // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
   9270   // rcx: result string length
   9271   // Check for sequential two byte string
   9272   __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
   9273   __ j(not_equal, &runtime);
   9274 
   9275   // Allocate the result.
   9276   __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
   9277 
   9278   // rax: result string
   9279   // rcx: result string length
   9280   __ movq(rdx, rsi);  // esi used by following code.
   9281   // Locate first character of result.
   9282   __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
   9283   // Load string argument and locate character of sub string start.
   9284   __ movq(rsi, Operand(rsp, kStringOffset));
   9285   __ movq(rbx, Operand(rsp, kFromOffset));
   9286   {
   9287     SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
   9288     __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
   9289                         SeqAsciiString::kHeaderSize - kHeapObjectTag));
   9290   }
   9291 
   9292   // rax: result string
   9293   // rcx: result length
   9294   // rdx: original value of rsi
   9295   // rdi: first character of result
   9296   // rsi: character of sub string start
   9297   GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
   9298   __ movq(rsi, rdx);  // Restore esi.
   9299   __ IncrementCounter(&Counters::sub_string_native, 1);
   9300   __ ret(kArgumentsSize);
   9301 
   9302   // Just jump to runtime to create the sub string.
   9303   __ bind(&runtime);
   9304   __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
   9305 }
   9306 
   9307 
   9308 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
   9309                                                         Register left,
   9310                                                         Register right,
   9311                                                         Register scratch1,
   9312                                                         Register scratch2,
   9313                                                         Register scratch3,
   9314                                                         Register scratch4) {
   9315   // Ensure that you can always subtract a string length from a non-negative
   9316   // number (e.g. another length).
   9317   ASSERT(String::kMaxLength < 0x7fffffff);
   9318 
   9319   // Find minimum length and length difference.
   9320   __ movl(scratch1, FieldOperand(left, String::kLengthOffset));
   9321   __ movl(scratch4, scratch1);
   9322   __ subl(scratch4, FieldOperand(right, String::kLengthOffset));
   9323   // Register scratch4 now holds left.length - right.length.
   9324   const Register length_difference = scratch4;
   9325   Label left_shorter;
   9326   __ j(less, &left_shorter);
   9327   // The right string isn't longer that the left one.
   9328   // Get the right string's length by subtracting the (non-negative) difference
   9329   // from the left string's length.
   9330   __ subl(scratch1, length_difference);
   9331   __ bind(&left_shorter);
   9332   // Register scratch1 now holds Min(left.length, right.length).
   9333   const Register min_length = scratch1;
   9334 
   9335   Label compare_lengths;
   9336   // If min-length is zero, go directly to comparing lengths.
   9337   __ testl(min_length, min_length);
   9338   __ j(zero, &compare_lengths);
   9339 
   9340   // Registers scratch2 and scratch3 are free.
   9341   Label result_not_equal;
   9342   Label loop;
   9343   {
   9344     // Check characters 0 .. min_length - 1 in a loop.
   9345     // Use scratch3 as loop index, min_length as limit and scratch2
   9346     // for computation.
   9347     const Register index = scratch3;
   9348     __ movl(index, Immediate(0));  // Index into strings.
   9349     __ bind(&loop);
   9350     // Compare characters.
   9351     // TODO(lrn): Could we load more than one character at a time?
   9352     __ movb(scratch2, FieldOperand(left,
   9353                                    index,
   9354                                    times_1,
   9355                                    SeqAsciiString::kHeaderSize));
   9356     // Increment index and use -1 modifier on next load to give
   9357     // the previous load extra time to complete.
   9358     __ addl(index, Immediate(1));
   9359     __ cmpb(scratch2, FieldOperand(right,
   9360                                    index,
   9361                                    times_1,
   9362                                    SeqAsciiString::kHeaderSize - 1));
   9363     __ j(not_equal, &result_not_equal);
   9364     __ cmpl(index, min_length);
   9365     __ j(not_equal, &loop);
   9366   }
   9367   // Completed loop without finding different characters.
   9368   // Compare lengths (precomputed).
   9369   __ bind(&compare_lengths);
   9370   __ testl(length_difference, length_difference);
   9371   __ j(not_zero, &result_not_equal);
   9372 
   9373   // Result is EQUAL.
   9374   __ Move(rax, Smi::FromInt(EQUAL));
   9375   __ ret(2 * kPointerSize);
   9376 
   9377   Label result_greater;
   9378   __ bind(&result_not_equal);
   9379   // Unequal comparison of left to right, either character or length.
   9380   __ j(greater, &result_greater);
   9381 
   9382   // Result is LESS.
   9383   __ Move(rax, Smi::FromInt(LESS));
   9384   __ ret(2 * kPointerSize);
   9385 
   9386   // Result is GREATER.
   9387   __ bind(&result_greater);
   9388   __ Move(rax, Smi::FromInt(GREATER));
   9389   __ ret(2 * kPointerSize);
   9390 }
   9391 
   9392 
   9393 void StringCompareStub::Generate(MacroAssembler* masm) {
   9394   Label runtime;
   9395 
   9396   // Stack frame on entry.
   9397   //  rsp[0]: return address
   9398   //  rsp[8]: right string
   9399   //  rsp[16]: left string
   9400 
   9401   __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // left
   9402   __ movq(rax, Operand(rsp, 1 * kPointerSize));  // right
   9403 
   9404   // Check for identity.
   9405   Label not_same;
   9406   __ cmpq(rdx, rax);
   9407   __ j(not_equal, &not_same);
   9408   __ Move(rax, Smi::FromInt(EQUAL));
   9409   __ IncrementCounter(&Counters::string_compare_native, 1);
   9410   __ ret(2 * kPointerSize);
   9411 
   9412   __ bind(&not_same);
   9413 
   9414   // Check that both are sequential ASCII strings.
   9415   __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
   9416 
   9417   // Inline comparison of ascii strings.
   9418   __ IncrementCounter(&Counters::string_compare_native, 1);
   9419   GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
   9420 
   9421   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   9422   // tagged as a small integer.
   9423   __ bind(&runtime);
   9424   __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
   9425 }
   9426 
   9427 #undef __
   9428 
   9429 #define __ masm.
   9430 
   9431 #ifdef _WIN64
   9432 typedef double (*ModuloFunction)(double, double);
   9433 // Define custom fmod implementation.
   9434 ModuloFunction CreateModuloFunction() {
   9435   size_t actual_size;
   9436   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
   9437                                                  &actual_size,
   9438                                                  true));
   9439   CHECK(buffer);
   9440   Assembler masm(buffer, static_cast<int>(actual_size));
   9441   // Generated code is put into a fixed, unmovable, buffer, and not into
   9442   // the V8 heap. We can't, and don't, refer to any relocatable addresses
   9443   // (e.g. the JavaScript nan-object).
   9444 
   9445   // Windows 64 ABI passes double arguments in xmm0, xmm1 and
   9446   // returns result in xmm0.
   9447   // Argument backing space is allocated on the stack above
   9448   // the return address.
   9449 
   9450   // Compute x mod y.
   9451   // Load y and x (use argument backing store as temporary storage).
   9452   __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
   9453   __ movsd(Operand(rsp, kPointerSize), xmm0);
   9454   __ fld_d(Operand(rsp, kPointerSize * 2));
   9455   __ fld_d(Operand(rsp, kPointerSize));
   9456 
   9457   // Clear exception flags before operation.
   9458   {
   9459     Label no_exceptions;
   9460     __ fwait();
   9461     __ fnstsw_ax();
   9462     // Clear if Illegal Operand or Zero Division exceptions are set.
   9463     __ testb(rax, Immediate(5));
   9464     __ j(zero, &no_exceptions);
   9465     __ fnclex();
   9466     __ bind(&no_exceptions);
   9467   }
   9468 
   9469   // Compute st(0) % st(1)
   9470   {
   9471     Label partial_remainder_loop;
   9472     __ bind(&partial_remainder_loop);
   9473     __ fprem();
   9474     __ fwait();
   9475     __ fnstsw_ax();
   9476     __ testl(rax, Immediate(0x400 /* C2 */));
   9477     // If C2 is set, computation only has partial result. Loop to
   9478     // continue computation.
   9479     __ j(not_zero, &partial_remainder_loop);
   9480   }
   9481 
   9482   Label valid_result;
   9483   Label return_result;
   9484   // If Invalid Operand or Zero Division exceptions are set,
   9485   // return NaN.
   9486   __ testb(rax, Immediate(5));
   9487   __ j(zero, &valid_result);
   9488   __ fstp(0);  // Drop result in st(0).
   9489   int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
   9490   __ movq(rcx, kNaNValue, RelocInfo::NONE);
   9491   __ movq(Operand(rsp, kPointerSize), rcx);
   9492   __ movsd(xmm0, Operand(rsp, kPointerSize));
   9493   __ jmp(&return_result);
   9494 
   9495   // If result is valid, return that.
   9496   __ bind(&valid_result);
   9497   __ fstp_d(Operand(rsp, kPointerSize));
   9498   __ movsd(xmm0, Operand(rsp, kPointerSize));
   9499 
   9500   // Clean up FPU stack and exceptions and return xmm0
   9501   __ bind(&return_result);
   9502   __ fstp(0);  // Unload y.
   9503 
   9504   Label clear_exceptions;
   9505   __ testb(rax, Immediate(0x3f /* Any Exception*/));
   9506   __ j(not_zero, &clear_exceptions);
   9507   __ ret(0);
   9508   __ bind(&clear_exceptions);
   9509   __ fnclex();
   9510   __ ret(0);
   9511 
   9512   CodeDesc desc;
   9513   masm.GetCode(&desc);
   9514   // Call the function from C++.
   9515   return FUNCTION_CAST<ModuloFunction>(buffer);
   9516 }
   9517 
   9518 #endif
   9519 
   9520 
   9521 #undef __
   9522 
   9523 } }  // namespace v8::internal
   9524