Home | History | Annotate | Download | only in x64
      1 // Copyright 2009 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "bootstrapper.h"
     31 #include "codegen-inl.h"
     32 #include "assembler-x64.h"
     33 #include "macro-assembler-x64.h"
     34 #include "serialize.h"
     35 #include "debug.h"
     36 
     37 namespace v8 {
     38 namespace internal {
     39 
     40 MacroAssembler::MacroAssembler(void* buffer, int size)
     41     : Assembler(buffer, size),
     42       generating_stub_(false),
     43       allow_stub_calls_(true),
     44       code_object_(Heap::undefined_value()) {
     45 }
     46 
     47 
     48 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
     49   movq(destination, Operand(r13, index << kPointerSizeLog2));
     50 }
     51 
     52 
     53 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
     54   push(Operand(r13, index << kPointerSizeLog2));
     55 }
     56 
     57 
     58 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
     59   cmpq(with, Operand(r13, index << kPointerSizeLog2));
     60 }
     61 
     62 
     63 void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
     64   LoadRoot(kScratchRegister, index);
     65   cmpq(with, kScratchRegister);
     66 }
     67 
     68 
     69 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
     70   CompareRoot(rsp, Heap::kStackLimitRootIndex);
     71   j(below, on_stack_overflow);
     72 }
     73 
     74 
     75 static void RecordWriteHelper(MacroAssembler* masm,
     76                               Register object,
     77                               Register addr,
     78                               Register scratch) {
     79   Label fast;
     80 
     81   // Compute the page start address from the heap object pointer, and reuse
     82   // the 'object' register for it.
     83   ASSERT(is_int32(~Page::kPageAlignmentMask));
     84   masm->and_(object,
     85              Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
     86   Register page_start = object;
     87 
     88   // Compute the bit addr in the remembered set/index of the pointer in the
     89   // page. Reuse 'addr' as pointer_offset.
     90   masm->subq(addr, page_start);
     91   masm->shr(addr, Immediate(kPointerSizeLog2));
     92   Register pointer_offset = addr;
     93 
     94   // If the bit offset lies beyond the normal remembered set range, it is in
     95   // the extra remembered set area of a large object.
     96   masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
     97   masm->j(less, &fast);
     98 
     99   // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
    100   // extra remembered set after the large object.
    101 
    102   // Load the array length into 'scratch'.
    103   masm->movl(scratch,
    104              Operand(page_start,
    105                      Page::kObjectStartOffset + FixedArray::kLengthOffset));
    106   Register array_length = scratch;
    107 
    108   // Extra remembered set starts right after the large object (a FixedArray), at
    109   //   page_start + kObjectStartOffset + objectSize
    110   // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
    111   // Add the delta between the end of the normal RSet and the start of the
    112   // extra RSet to 'page_start', so that addressing the bit using
    113   // 'pointer_offset' hits the extra RSet words.
    114   masm->lea(page_start,
    115             Operand(page_start, array_length, times_pointer_size,
    116                     Page::kObjectStartOffset + FixedArray::kHeaderSize
    117                         - Page::kRSetEndOffset));
    118 
    119   // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
    120   // to limit code size. We should probably evaluate this decision by
    121   // measuring the performance of an equivalent implementation using
    122   // "simpler" instructions
    123   masm->bind(&fast);
    124   masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
    125 }
    126 
    127 
    128 class RecordWriteStub : public CodeStub {
    129  public:
    130   RecordWriteStub(Register object, Register addr, Register scratch)
    131       : object_(object), addr_(addr), scratch_(scratch) { }
    132 
    133   void Generate(MacroAssembler* masm);
    134 
    135  private:
    136   Register object_;
    137   Register addr_;
    138   Register scratch_;
    139 
    140 #ifdef DEBUG
    141   void Print() {
    142     PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
    143            object_.code(), addr_.code(), scratch_.code());
    144   }
    145 #endif
    146 
    147   // Minor key encoding in 12 bits of three registers (object, address and
    148   // scratch) OOOOAAAASSSS.
    149   class ScratchBits : public BitField<uint32_t, 0, 4> {};
    150   class AddressBits : public BitField<uint32_t, 4, 4> {};
    151   class ObjectBits : public BitField<uint32_t, 8, 4> {};
    152 
    153   Major MajorKey() { return RecordWrite; }
    154 
    155   int MinorKey() {
    156     // Encode the registers.
    157     return ObjectBits::encode(object_.code()) |
    158            AddressBits::encode(addr_.code()) |
    159            ScratchBits::encode(scratch_.code());
    160   }
    161 };
    162 
    163 
    164 void RecordWriteStub::Generate(MacroAssembler* masm) {
    165   RecordWriteHelper(masm, object_, addr_, scratch_);
    166   masm->ret(0);
    167 }
    168 
    169 
    170 // Set the remembered set bit for [object+offset].
    171 // object is the object being stored into, value is the object being stored.
    172 // If offset is zero, then the smi_index register contains the array index into
    173 // the elements array represented as a smi. Otherwise it can be used as a
    174 // scratch register.
    175 // All registers are clobbered by the operation.
    176 void MacroAssembler::RecordWrite(Register object,
    177                                  int offset,
    178                                  Register value,
    179                                  Register smi_index) {
    180   // The compiled code assumes that record write doesn't change the
    181   // context register, so we check that none of the clobbered
    182   // registers are rsi.
    183   ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
    184 
    185   // First, check if a remembered set write is even needed. The tests below
    186   // catch stores of Smis and stores into young gen (which does not have space
    187   // for the remembered set bits.
    188   Label done;
    189   JumpIfSmi(value, &done);
    190 
    191   RecordWriteNonSmi(object, offset, value, smi_index);
    192   bind(&done);
    193 
    194   // Clobber all input registers when running with the debug-code flag
    195   // turned on to provoke errors. This clobbering repeats the
    196   // clobbering done inside RecordWriteNonSmi but it's necessary to
    197   // avoid having the fast case for smis leave the registers
    198   // unchanged.
    199   if (FLAG_debug_code) {
    200     movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
    201     movq(value, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
    202     movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
    203   }
    204 }
    205 
    206 
    207 void MacroAssembler::RecordWriteNonSmi(Register object,
    208                                        int offset,
    209                                        Register scratch,
    210                                        Register smi_index) {
    211   Label done;
    212 
    213   if (FLAG_debug_code) {
    214     Label okay;
    215     JumpIfNotSmi(object, &okay);
    216     Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
    217     bind(&okay);
    218   }
    219 
    220   // Test that the object address is not in the new space.  We cannot
    221   // set remembered set bits in the new space.
    222   movq(scratch, object);
    223   ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
    224   and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
    225   movq(kScratchRegister, ExternalReference::new_space_start());
    226   cmpq(scratch, kScratchRegister);
    227   j(equal, &done);
    228 
    229   if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
    230     // Compute the bit offset in the remembered set, leave it in 'value'.
    231     lea(scratch, Operand(object, offset));
    232     ASSERT(is_int32(Page::kPageAlignmentMask));
    233     and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
    234     shr(scratch, Immediate(kObjectAlignmentBits));
    235 
    236     // Compute the page address from the heap object pointer, leave it in
    237     // 'object' (immediate value is sign extended).
    238     and_(object, Immediate(~Page::kPageAlignmentMask));
    239 
    240     // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
    241     // to limit code size. We should probably evaluate this decision by
    242     // measuring the performance of an equivalent implementation using
    243     // "simpler" instructions
    244     bts(Operand(object, Page::kRSetOffset), scratch);
    245   } else {
    246     Register dst = smi_index;
    247     if (offset != 0) {
    248       lea(dst, Operand(object, offset));
    249     } else {
    250       // array access: calculate the destination address in the same manner as
    251       // KeyedStoreIC::GenerateGeneric.
    252       SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
    253       lea(dst, Operand(object,
    254                        index.reg,
    255                        index.scale,
    256                        FixedArray::kHeaderSize - kHeapObjectTag));
    257     }
    258     // If we are already generating a shared stub, not inlining the
    259     // record write code isn't going to save us any memory.
    260     if (generating_stub()) {
    261       RecordWriteHelper(this, object, dst, scratch);
    262     } else {
    263       RecordWriteStub stub(object, dst, scratch);
    264       CallStub(&stub);
    265     }
    266   }
    267 
    268   bind(&done);
    269 
    270   // Clobber all input registers when running with the debug-code flag
    271   // turned on to provoke errors.
    272   if (FLAG_debug_code) {
    273     movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
    274     movq(scratch, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
    275     movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
    276   }
    277 }
    278 
    279 
    280 void MacroAssembler::Assert(Condition cc, const char* msg) {
    281   if (FLAG_debug_code) Check(cc, msg);
    282 }
    283 
    284 
    285 void MacroAssembler::Check(Condition cc, const char* msg) {
    286   Label L;
    287   j(cc, &L);
    288   Abort(msg);
    289   // will not return here
    290   bind(&L);
    291 }
    292 
    293 
    294 void MacroAssembler::NegativeZeroTest(Register result,
    295                                       Register op,
    296                                       Label* then_label) {
    297   Label ok;
    298   testl(result, result);
    299   j(not_zero, &ok);
    300   testl(op, op);
    301   j(sign, then_label);
    302   bind(&ok);
    303 }
    304 
    305 
    306 void MacroAssembler::Abort(const char* msg) {
    307   // We want to pass the msg string like a smi to avoid GC
    308   // problems, however msg is not guaranteed to be aligned
    309   // properly. Instead, we pass an aligned pointer that is
    310   // a proper v8 smi, but also pass the alignment difference
    311   // from the real pointer as a smi.
    312   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
    313   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
    314   // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
    315   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
    316 #ifdef DEBUG
    317   if (msg != NULL) {
    318     RecordComment("Abort message: ");
    319     RecordComment(msg);
    320   }
    321 #endif
    322   // Disable stub call restrictions to always allow calls to abort.
    323   set_allow_stub_calls(true);
    324 
    325   push(rax);
    326   movq(kScratchRegister, p0, RelocInfo::NONE);
    327   push(kScratchRegister);
    328   movq(kScratchRegister,
    329        reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
    330        RelocInfo::NONE);
    331   push(kScratchRegister);
    332   CallRuntime(Runtime::kAbort, 2);
    333   // will not return here
    334   int3();
    335 }
    336 
    337 
    338 void MacroAssembler::CallStub(CodeStub* stub) {
    339   ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
    340   Call(stub->GetCode(), RelocInfo::CODE_TARGET);
    341 }
    342 
    343 
    344 void MacroAssembler::TailCallStub(CodeStub* stub) {
    345   ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
    346   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
    347 }
    348 
    349 
    350 void MacroAssembler::StubReturn(int argc) {
    351   ASSERT(argc >= 1 && generating_stub());
    352   ret((argc - 1) * kPointerSize);
    353 }
    354 
    355 
    356 void MacroAssembler::IllegalOperation(int num_arguments) {
    357   if (num_arguments > 0) {
    358     addq(rsp, Immediate(num_arguments * kPointerSize));
    359   }
    360   LoadRoot(rax, Heap::kUndefinedValueRootIndex);
    361 }
    362 
    363 
    364 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
    365   CallRuntime(Runtime::FunctionForId(id), num_arguments);
    366 }
    367 
    368 
    369 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
    370   // If the expected number of arguments of the runtime function is
    371   // constant, we check that the actual number of arguments match the
    372   // expectation.
    373   if (f->nargs >= 0 && f->nargs != num_arguments) {
    374     IllegalOperation(num_arguments);
    375     return;
    376   }
    377 
    378   // TODO(1236192): Most runtime routines don't need the number of
    379   // arguments passed in because it is constant. At some point we
    380   // should remove this need and make the runtime routine entry code
    381   // smarter.
    382   movq(rax, Immediate(num_arguments));
    383   movq(rbx, ExternalReference(f));
    384   CEntryStub ces(f->result_size);
    385   CallStub(&ces);
    386 }
    387 
    388 
    389 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
    390                                            int num_arguments) {
    391   movq(rax, Immediate(num_arguments));
    392   movq(rbx, ext);
    393 
    394   CEntryStub stub(1);
    395   CallStub(&stub);
    396 }
    397 
    398 
    399 void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
    400                                      int num_arguments,
    401                                      int result_size) {
    402   // ----------- S t a t e -------------
    403   //  -- rsp[0] : return address
    404   //  -- rsp[8] : argument num_arguments - 1
    405   //  ...
    406   //  -- rsp[8 * num_arguments] : argument 0 (receiver)
    407   // -----------------------------------
    408 
    409   // TODO(1236192): Most runtime routines don't need the number of
    410   // arguments passed in because it is constant. At some point we
    411   // should remove this need and make the runtime routine entry code
    412   // smarter.
    413   movq(rax, Immediate(num_arguments));
    414   JumpToRuntime(ext, result_size);
    415 }
    416 
    417 
    418 void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
    419                                    int result_size) {
    420   // Set the entry point and jump to the C entry runtime stub.
    421   movq(rbx, ext);
    422   CEntryStub ces(result_size);
    423   jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
    424 }
    425 
    426 
    427 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
    428   // Calls are not allowed in some stubs.
    429   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
    430 
    431   // Rely on the assertion to check that the number of provided
    432   // arguments match the expected number of arguments. Fake a
    433   // parameter count to avoid emitting code to do the check.
    434   ParameterCount expected(0);
    435   GetBuiltinEntry(rdx, id);
    436   InvokeCode(rdx, expected, expected, flag);
    437 }
    438 
    439 
    440 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
    441   // Load the JavaScript builtin function from the builtins object.
    442   movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
    443   movq(rdi, FieldOperand(rdi, GlobalObject::kBuiltinsOffset));
    444   int builtins_offset =
    445       JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
    446   movq(rdi, FieldOperand(rdi, builtins_offset));
    447   // Load the code entry point from the function into the target register.
    448   movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
    449   movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
    450   addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
    451 }
    452 
    453 
    454 void MacroAssembler::Set(Register dst, int64_t x) {
    455   if (x == 0) {
    456     xor_(dst, dst);
    457   } else if (is_int32(x)) {
    458     movq(dst, Immediate(static_cast<int32_t>(x)));
    459   } else if (is_uint32(x)) {
    460     movl(dst, Immediate(static_cast<uint32_t>(x)));
    461   } else {
    462     movq(dst, x, RelocInfo::NONE);
    463   }
    464 }
    465 
    466 
    467 void MacroAssembler::Set(const Operand& dst, int64_t x) {
    468   if (x == 0) {
    469     xor_(kScratchRegister, kScratchRegister);
    470     movq(dst, kScratchRegister);
    471   } else if (is_int32(x)) {
    472     movq(dst, Immediate(static_cast<int32_t>(x)));
    473   } else if (is_uint32(x)) {
    474     movl(dst, Immediate(static_cast<uint32_t>(x)));
    475   } else {
    476     movq(kScratchRegister, x, RelocInfo::NONE);
    477     movq(dst, kScratchRegister);
    478   }
    479 }
    480 
    481 // ----------------------------------------------------------------------------
    482 // Smi tagging, untagging and tag detection.
    483 
    484 static int kSmiShift = kSmiTagSize + kSmiShiftSize;
    485 
    486 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
    487   ASSERT_EQ(0, kSmiTag);
    488   if (!dst.is(src)) {
    489     movl(dst, src);
    490   }
    491   shl(dst, Immediate(kSmiShift));
    492 }
    493 
    494 
    495 void MacroAssembler::Integer32ToSmi(Register dst,
    496                                     Register src,
    497                                     Label* on_overflow) {
    498   ASSERT_EQ(0, kSmiTag);
    499   // 32-bit integer always fits in a long smi.
    500   if (!dst.is(src)) {
    501     movl(dst, src);
    502   }
    503   shl(dst, Immediate(kSmiShift));
    504 }
    505 
    506 
    507 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
    508                                                 Register src,
    509                                                 int constant) {
    510   if (dst.is(src)) {
    511     addq(dst, Immediate(constant));
    512   } else {
    513     lea(dst, Operand(src, constant));
    514   }
    515   shl(dst, Immediate(kSmiShift));
    516 }
    517 
    518 
    519 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
    520   ASSERT_EQ(0, kSmiTag);
    521   if (!dst.is(src)) {
    522     movq(dst, src);
    523   }
    524   shr(dst, Immediate(kSmiShift));
    525 }
    526 
    527 
    528 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
    529   ASSERT_EQ(0, kSmiTag);
    530   if (!dst.is(src)) {
    531     movq(dst, src);
    532   }
    533   sar(dst, Immediate(kSmiShift));
    534 }
    535 
    536 
    537 void MacroAssembler::SmiTest(Register src) {
    538   testq(src, src);
    539 }
    540 
    541 
    542 void MacroAssembler::SmiCompare(Register dst, Register src) {
    543   cmpq(dst, src);
    544 }
    545 
    546 
    547 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
    548   ASSERT(!dst.is(kScratchRegister));
    549   if (src->value() == 0) {
    550     testq(dst, dst);
    551   } else {
    552     Move(kScratchRegister, src);
    553     cmpq(dst, kScratchRegister);
    554   }
    555 }
    556 
    557 
    558 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
    559   cmpq(dst, src);
    560 }
    561 
    562 
    563 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
    564   if (src->value() == 0) {
    565     // Only tagged long smi to have 32-bit representation.
    566     cmpq(dst, Immediate(0));
    567   } else {
    568     Move(kScratchRegister, src);
    569     cmpq(dst, kScratchRegister);
    570   }
    571 }
    572 
    573 
    574 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
    575                                                            Register src,
    576                                                            int power) {
    577   ASSERT(power >= 0);
    578   ASSERT(power < 64);
    579   if (power == 0) {
    580     SmiToInteger64(dst, src);
    581     return;
    582   }
    583   if (!dst.is(src)) {
    584     movq(dst, src);
    585   }
    586   if (power < kSmiShift) {
    587     sar(dst, Immediate(kSmiShift - power));
    588   } else if (power > kSmiShift) {
    589     shl(dst, Immediate(power - kSmiShift));
    590   }
    591 }
    592 
    593 
    594 Condition MacroAssembler::CheckSmi(Register src) {
    595   ASSERT_EQ(0, kSmiTag);
    596   testb(src, Immediate(kSmiTagMask));
    597   return zero;
    598 }
    599 
    600 
    601 Condition MacroAssembler::CheckPositiveSmi(Register src) {
    602   ASSERT_EQ(0, kSmiTag);
    603   movq(kScratchRegister, src);
    604   rol(kScratchRegister, Immediate(1));
    605   testl(kScratchRegister, Immediate(0x03));
    606   return zero;
    607 }
    608 
    609 
    610 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
    611   if (first.is(second)) {
    612     return CheckSmi(first);
    613   }
    614   movl(kScratchRegister, first);
    615   orl(kScratchRegister, second);
    616   testb(kScratchRegister, Immediate(kSmiTagMask));
    617   return zero;
    618 }
    619 
    620 
    621 Condition MacroAssembler::CheckBothPositiveSmi(Register first,
    622                                                Register second) {
    623   if (first.is(second)) {
    624     return CheckPositiveSmi(first);
    625   }
    626   movl(kScratchRegister, first);
    627   orl(kScratchRegister, second);
    628   rol(kScratchRegister, Immediate(1));
    629   testl(kScratchRegister, Immediate(0x03));
    630   return zero;
    631 }
    632 
    633 
    634 
    635 Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
    636   if (first.is(second)) {
    637     return CheckSmi(first);
    638   }
    639   movl(kScratchRegister, first);
    640   andl(kScratchRegister, second);
    641   testb(kScratchRegister, Immediate(kSmiTagMask));
    642   return zero;
    643 }
    644 
    645 
    646 Condition MacroAssembler::CheckIsMinSmi(Register src) {
    647   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
    648   movq(kScratchRegister, src);
    649   rol(kScratchRegister, Immediate(1));
    650   cmpq(kScratchRegister, Immediate(1));
    651   return equal;
    652 }
    653 
    654 
    655 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
    656   // A 32-bit integer value can always be converted to a smi.
    657   return always;
    658 }
    659 
    660 
    661 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
    662   // An unsigned 32-bit integer value is valid as long as the high bit
    663   // is not set.
    664   testq(src, Immediate(0x80000000));
    665   return zero;
    666 }
    667 
    668 
    669 void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
    670   if (dst.is(src)) {
    671     ASSERT(!dst.is(kScratchRegister));
    672     movq(kScratchRegister, src);
    673     neg(dst);  // Low 32 bits are retained as zero by negation.
    674     // Test if result is zero or Smi::kMinValue.
    675     cmpq(dst, kScratchRegister);
    676     j(not_equal, on_smi_result);
    677     movq(src, kScratchRegister);
    678   } else {
    679     movq(dst, src);
    680     neg(dst);
    681     cmpq(dst, src);
    682     // If the result is zero or Smi::kMinValue, negation failed to create a smi.
    683     j(not_equal, on_smi_result);
    684   }
    685 }
    686 
    687 
    688 void MacroAssembler::SmiAdd(Register dst,
    689                             Register src1,
    690                             Register src2,
    691                             Label* on_not_smi_result) {
    692   ASSERT(!dst.is(src2));
    693   if (dst.is(src1)) {
    694     addq(dst, src2);
    695     Label smi_result;
    696     j(no_overflow, &smi_result);
    697     // Restore src1.
    698     subq(src1, src2);
    699     jmp(on_not_smi_result);
    700     bind(&smi_result);
    701   } else {
    702     movq(dst, src1);
    703     addq(dst, src2);
    704     j(overflow, on_not_smi_result);
    705   }
    706 }
    707 
    708 
    709 void MacroAssembler::SmiSub(Register dst,
    710                             Register src1,
    711                             Register src2,
    712                             Label* on_not_smi_result) {
    713   ASSERT(!dst.is(src2));
    714   if (on_not_smi_result == NULL) {
    715     // No overflow checking. Use only when it's known that
    716     // overflowing is impossible (e.g., subtracting two positive smis).
    717     if (dst.is(src1)) {
    718       subq(dst, src2);
    719     } else {
    720       movq(dst, src1);
    721       subq(dst, src2);
    722     }
    723     Assert(no_overflow, "Smi substraction onverflow");
    724   } else if (dst.is(src1)) {
    725     subq(dst, src2);
    726     Label smi_result;
    727     j(no_overflow, &smi_result);
    728     // Restore src1.
    729     addq(src1, src2);
    730     jmp(on_not_smi_result);
    731     bind(&smi_result);
    732   } else {
    733     movq(dst, src1);
    734     subq(dst, src2);
    735     j(overflow, on_not_smi_result);
    736   }
    737 }
    738 
    739 
    740 void MacroAssembler::SmiMul(Register dst,
    741                             Register src1,
    742                             Register src2,
    743                             Label* on_not_smi_result) {
    744   ASSERT(!dst.is(src2));
    745   ASSERT(!dst.is(kScratchRegister));
    746   ASSERT(!src1.is(kScratchRegister));
    747   ASSERT(!src2.is(kScratchRegister));
    748 
    749   if (dst.is(src1)) {
    750     Label failure, zero_correct_result;
    751     movq(kScratchRegister, src1);  // Create backup for later testing.
    752     SmiToInteger64(dst, src1);
    753     imul(dst, src2);
    754     j(overflow, &failure);
    755 
    756     // Check for negative zero result.  If product is zero, and one
    757     // argument is negative, go to slow case.
    758     Label correct_result;
    759     testq(dst, dst);
    760     j(not_zero, &correct_result);
    761 
    762     movq(dst, kScratchRegister);
    763     xor_(dst, src2);
    764     j(positive, &zero_correct_result);  // Result was positive zero.
    765 
    766     bind(&failure);  // Reused failure exit, restores src1.
    767     movq(src1, kScratchRegister);
    768     jmp(on_not_smi_result);
    769 
    770     bind(&zero_correct_result);
    771     xor_(dst, dst);
    772 
    773     bind(&correct_result);
    774   } else {
    775     SmiToInteger64(dst, src1);
    776     imul(dst, src2);
    777     j(overflow, on_not_smi_result);
    778     // Check for negative zero result.  If product is zero, and one
    779     // argument is negative, go to slow case.
    780     Label correct_result;
    781     testq(dst, dst);
    782     j(not_zero, &correct_result);
    783     // One of src1 and src2 is zero, the check whether the other is
    784     // negative.
    785     movq(kScratchRegister, src1);
    786     xor_(kScratchRegister, src2);
    787     j(negative, on_not_smi_result);
    788     bind(&correct_result);
    789   }
    790 }
    791 
    792 
    793 void MacroAssembler::SmiTryAddConstant(Register dst,
    794                                        Register src,
    795                                        Smi* constant,
    796                                        Label* on_not_smi_result) {
    797   // Does not assume that src is a smi.
    798   ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
    799   ASSERT_EQ(0, kSmiTag);
    800   ASSERT(!dst.is(kScratchRegister));
    801   ASSERT(!src.is(kScratchRegister));
    802 
    803   JumpIfNotSmi(src, on_not_smi_result);
    804   Register tmp = (dst.is(src) ? kScratchRegister : dst);
    805   Move(tmp, constant);
    806   addq(tmp, src);
    807   j(overflow, on_not_smi_result);
    808   if (dst.is(src)) {
    809     movq(dst, tmp);
    810   }
    811 }
    812 
    813 
    814 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
    815   if (constant->value() == 0) {
    816     if (!dst.is(src)) {
    817       movq(dst, src);
    818     }
    819   } else if (dst.is(src)) {
    820     ASSERT(!dst.is(kScratchRegister));
    821 
    822     Move(kScratchRegister, constant);
    823     addq(dst, kScratchRegister);
    824   } else {
    825     Move(dst, constant);
    826     addq(dst, src);
    827   }
    828 }
    829 
    830 
    831 void MacroAssembler::SmiAddConstant(Register dst,
    832                                     Register src,
    833                                     Smi* constant,
    834                                     Label* on_not_smi_result) {
    835   if (constant->value() == 0) {
    836     if (!dst.is(src)) {
    837       movq(dst, src);
    838     }
    839   } else if (dst.is(src)) {
    840     ASSERT(!dst.is(kScratchRegister));
    841 
    842     Move(kScratchRegister, constant);
    843     addq(dst, kScratchRegister);
    844     Label result_ok;
    845     j(no_overflow, &result_ok);
    846     subq(dst, kScratchRegister);
    847     jmp(on_not_smi_result);
    848     bind(&result_ok);
    849   } else {
    850     Move(dst, constant);
    851     addq(dst, src);
    852     j(overflow, on_not_smi_result);
    853   }
    854 }
    855 
    856 
    857 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
    858   if (constant->value() == 0) {
    859     if (!dst.is(src)) {
    860       movq(dst, src);
    861     }
    862   } else if (dst.is(src)) {
    863     ASSERT(!dst.is(kScratchRegister));
    864 
    865     Move(kScratchRegister, constant);
    866     subq(dst, kScratchRegister);
    867   } else {
    868     // Subtract by adding the negative, to do it in two operations.
    869     if (constant->value() == Smi::kMinValue) {
    870       Move(kScratchRegister, constant);
    871       movq(dst, src);
    872       subq(dst, kScratchRegister);
    873     } else {
    874       Move(dst, Smi::FromInt(-constant->value()));
    875       addq(dst, src);
    876     }
    877   }
    878 }
    879 
    880 
    881 void MacroAssembler::SmiSubConstant(Register dst,
    882                                     Register src,
    883                                     Smi* constant,
    884                                     Label* on_not_smi_result) {
    885   if (constant->value() == 0) {
    886     if (!dst.is(src)) {
    887       movq(dst, src);
    888     }
    889   } else if (dst.is(src)) {
    890     ASSERT(!dst.is(kScratchRegister));
    891 
    892     Move(kScratchRegister, constant);
    893     subq(dst, kScratchRegister);
    894     Label sub_success;
    895     j(no_overflow, &sub_success);
    896     addq(src, kScratchRegister);
    897     jmp(on_not_smi_result);
    898     bind(&sub_success);
    899   } else {
    900     if (constant->value() == Smi::kMinValue) {
    901       Move(kScratchRegister, constant);
    902       movq(dst, src);
    903       subq(dst, kScratchRegister);
    904       j(overflow, on_not_smi_result);
    905     } else {
    906       Move(dst, Smi::FromInt(-(constant->value())));
    907       addq(dst, src);
    908       j(overflow, on_not_smi_result);
    909     }
    910   }
    911 }
    912 
    913 
    914 void MacroAssembler::SmiDiv(Register dst,
    915                             Register src1,
    916                             Register src2,
    917                             Label* on_not_smi_result) {
    918   ASSERT(!src1.is(kScratchRegister));
    919   ASSERT(!src2.is(kScratchRegister));
    920   ASSERT(!dst.is(kScratchRegister));
    921   ASSERT(!src2.is(rax));
    922   ASSERT(!src2.is(rdx));
    923   ASSERT(!src1.is(rdx));
    924 
    925   // Check for 0 divisor (result is +/-Infinity).
    926   Label positive_divisor;
    927   testq(src2, src2);
    928   j(zero, on_not_smi_result);
    929 
    930   if (src1.is(rax)) {
    931     movq(kScratchRegister, src1);
    932   }
    933   SmiToInteger32(rax, src1);
    934   // We need to rule out dividing Smi::kMinValue by -1, since that would
    935   // overflow in idiv and raise an exception.
    936   // We combine this with negative zero test (negative zero only happens
    937   // when dividing zero by a negative number).
    938 
    939   // We overshoot a little and go to slow case if we divide min-value
    940   // by any negative value, not just -1.
    941   Label safe_div;
    942   testl(rax, Immediate(0x7fffffff));
    943   j(not_zero, &safe_div);
    944   testq(src2, src2);
    945   if (src1.is(rax)) {
    946     j(positive, &safe_div);
    947     movq(src1, kScratchRegister);
    948     jmp(on_not_smi_result);
    949   } else {
    950     j(negative, on_not_smi_result);
    951   }
    952   bind(&safe_div);
    953 
    954   SmiToInteger32(src2, src2);
    955   // Sign extend src1 into edx:eax.
    956   cdq();
    957   idivl(src2);
    958   Integer32ToSmi(src2, src2);
    959   // Check that the remainder is zero.
    960   testl(rdx, rdx);
    961   if (src1.is(rax)) {
    962     Label smi_result;
    963     j(zero, &smi_result);
    964     movq(src1, kScratchRegister);
    965     jmp(on_not_smi_result);
    966     bind(&smi_result);
    967   } else {
    968     j(not_zero, on_not_smi_result);
    969   }
    970   if (!dst.is(src1) && src1.is(rax)) {
    971     movq(src1, kScratchRegister);
    972   }
    973   Integer32ToSmi(dst, rax);
    974 }
    975 
    976 
    977 void MacroAssembler::SmiMod(Register dst,
    978                             Register src1,
    979                             Register src2,
    980                             Label* on_not_smi_result) {
    981   ASSERT(!dst.is(kScratchRegister));
    982   ASSERT(!src1.is(kScratchRegister));
    983   ASSERT(!src2.is(kScratchRegister));
    984   ASSERT(!src2.is(rax));
    985   ASSERT(!src2.is(rdx));
    986   ASSERT(!src1.is(rdx));
    987   ASSERT(!src1.is(src2));
    988 
    989   testq(src2, src2);
    990   j(zero, on_not_smi_result);
    991 
    992   if (src1.is(rax)) {
    993     movq(kScratchRegister, src1);
    994   }
    995   SmiToInteger32(rax, src1);
    996   SmiToInteger32(src2, src2);
    997 
    998   // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
    999   Label safe_div;
   1000   cmpl(rax, Immediate(Smi::kMinValue));
   1001   j(not_equal, &safe_div);
   1002   cmpl(src2, Immediate(-1));
   1003   j(not_equal, &safe_div);
   1004   // Retag inputs and go slow case.
   1005   Integer32ToSmi(src2, src2);
   1006   if (src1.is(rax)) {
   1007     movq(src1, kScratchRegister);
   1008   }
   1009   jmp(on_not_smi_result);
   1010   bind(&safe_div);
   1011 
   1012   // Sign extend eax into edx:eax.
   1013   cdq();
   1014   idivl(src2);
   1015   // Restore smi tags on inputs.
   1016   Integer32ToSmi(src2, src2);
   1017   if (src1.is(rax)) {
   1018     movq(src1, kScratchRegister);
   1019   }
   1020   // Check for a negative zero result.  If the result is zero, and the
   1021   // dividend is negative, go slow to return a floating point negative zero.
   1022   Label smi_result;
   1023   testl(rdx, rdx);
   1024   j(not_zero, &smi_result);
   1025   testq(src1, src1);
   1026   j(negative, on_not_smi_result);
   1027   bind(&smi_result);
   1028   Integer32ToSmi(dst, rdx);
   1029 }
   1030 
   1031 
   1032 void MacroAssembler::SmiNot(Register dst, Register src) {
   1033   ASSERT(!dst.is(kScratchRegister));
   1034   ASSERT(!src.is(kScratchRegister));
   1035   // Set tag and padding bits before negating, so that they are zero afterwards.
   1036   movl(kScratchRegister, Immediate(~0));
   1037   if (dst.is(src)) {
   1038     xor_(dst, kScratchRegister);
   1039   } else {
   1040     lea(dst, Operand(src, kScratchRegister, times_1, 0));
   1041   }
   1042   not_(dst);
   1043 }
   1044 
   1045 
   1046 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
   1047   ASSERT(!dst.is(src2));
   1048   if (!dst.is(src1)) {
   1049     movq(dst, src1);
   1050   }
   1051   and_(dst, src2);
   1052 }
   1053 
   1054 
   1055 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
   1056   if (constant->value() == 0) {
   1057     xor_(dst, dst);
   1058   } else if (dst.is(src)) {
   1059     ASSERT(!dst.is(kScratchRegister));
   1060     Move(kScratchRegister, constant);
   1061     and_(dst, kScratchRegister);
   1062   } else {
   1063     Move(dst, constant);
   1064     and_(dst, src);
   1065   }
   1066 }
   1067 
   1068 
   1069 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   1070   if (!dst.is(src1)) {
   1071     movq(dst, src1);
   1072   }
   1073   or_(dst, src2);
   1074 }
   1075 
   1076 
   1077 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
   1078   if (dst.is(src)) {
   1079     ASSERT(!dst.is(kScratchRegister));
   1080     Move(kScratchRegister, constant);
   1081     or_(dst, kScratchRegister);
   1082   } else {
   1083     Move(dst, constant);
   1084     or_(dst, src);
   1085   }
   1086 }
   1087 
   1088 
   1089 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   1090   if (!dst.is(src1)) {
   1091     movq(dst, src1);
   1092   }
   1093   xor_(dst, src2);
   1094 }
   1095 
   1096 
   1097 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
   1098   if (dst.is(src)) {
   1099     ASSERT(!dst.is(kScratchRegister));
   1100     Move(kScratchRegister, constant);
   1101     xor_(dst, kScratchRegister);
   1102   } else {
   1103     Move(dst, constant);
   1104     xor_(dst, src);
   1105   }
   1106 }
   1107 
   1108 
   1109 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
   1110                                                      Register src,
   1111                                                      int shift_value) {
   1112   ASSERT(is_uint5(shift_value));
   1113   if (shift_value > 0) {
   1114     if (dst.is(src)) {
   1115       sar(dst, Immediate(shift_value + kSmiShift));
   1116       shl(dst, Immediate(kSmiShift));
   1117     } else {
   1118       UNIMPLEMENTED();  // Not used.
   1119     }
   1120   }
   1121 }
   1122 
   1123 
   1124 void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
   1125                                                   Register src,
   1126                                                   int shift_value,
   1127                                                   Label* on_not_smi_result) {
   1128   // Logic right shift interprets its result as an *unsigned* number.
   1129   if (dst.is(src)) {
   1130     UNIMPLEMENTED();  // Not used.
   1131   } else {
   1132     movq(dst, src);
   1133     if (shift_value == 0) {
   1134       testq(dst, dst);
   1135       j(negative, on_not_smi_result);
   1136     }
   1137     shr(dst, Immediate(shift_value + kSmiShift));
   1138     shl(dst, Immediate(kSmiShift));
   1139   }
   1140 }
   1141 
   1142 
   1143 void MacroAssembler::SmiShiftLeftConstant(Register dst,
   1144                                           Register src,
   1145                                           int shift_value,
   1146                                           Label* on_not_smi_result) {
   1147   if (!dst.is(src)) {
   1148     movq(dst, src);
   1149   }
   1150   if (shift_value > 0) {
   1151     shl(dst, Immediate(shift_value));
   1152   }
   1153 }
   1154 
   1155 
   1156 void MacroAssembler::SmiShiftLeft(Register dst,
   1157                                   Register src1,
   1158                                   Register src2,
   1159                                   Label* on_not_smi_result) {
   1160   ASSERT(!dst.is(rcx));
   1161   Label result_ok;
   1162   // Untag shift amount.
   1163   if (!dst.is(src1)) {
   1164     movq(dst, src1);
   1165   }
   1166   SmiToInteger32(rcx, src2);
   1167   // Shift amount specified by lower 5 bits, not six as the shl opcode.
   1168   and_(rcx, Immediate(0x1f));
   1169   shl_cl(dst);
   1170 }
   1171 
   1172 
   1173 void MacroAssembler::SmiShiftLogicalRight(Register dst,
   1174                                           Register src1,
   1175                                           Register src2,
   1176                                           Label* on_not_smi_result) {
   1177   ASSERT(!dst.is(kScratchRegister));
   1178   ASSERT(!src1.is(kScratchRegister));
   1179   ASSERT(!src2.is(kScratchRegister));
   1180   ASSERT(!dst.is(rcx));
   1181   Label result_ok;
   1182   if (src1.is(rcx) || src2.is(rcx)) {
   1183     movq(kScratchRegister, rcx);
   1184   }
   1185   if (!dst.is(src1)) {
   1186     movq(dst, src1);
   1187   }
   1188   SmiToInteger32(rcx, src2);
   1189   orl(rcx, Immediate(kSmiShift));
   1190   shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
   1191   shl(dst, Immediate(kSmiShift));
   1192   testq(dst, dst);
   1193   if (src1.is(rcx) || src2.is(rcx)) {
   1194     Label positive_result;
   1195     j(positive, &positive_result);
   1196     if (src1.is(rcx)) {
   1197       movq(src1, kScratchRegister);
   1198     } else {
   1199       movq(src2, kScratchRegister);
   1200     }
   1201     jmp(on_not_smi_result);
   1202     bind(&positive_result);
   1203   } else {
   1204     j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
   1205   }
   1206 }
   1207 
   1208 
   1209 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
   1210                                              Register src1,
   1211                                              Register src2) {
   1212   ASSERT(!dst.is(kScratchRegister));
   1213   ASSERT(!src1.is(kScratchRegister));
   1214   ASSERT(!src2.is(kScratchRegister));
   1215   ASSERT(!dst.is(rcx));
   1216   if (src1.is(rcx)) {
   1217     movq(kScratchRegister, src1);
   1218   } else if (src2.is(rcx)) {
   1219     movq(kScratchRegister, src2);
   1220   }
   1221   if (!dst.is(src1)) {
   1222     movq(dst, src1);
   1223   }
   1224   SmiToInteger32(rcx, src2);
   1225   orl(rcx, Immediate(kSmiShift));
   1226   sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
   1227   shl(dst, Immediate(kSmiShift));
   1228   if (src1.is(rcx)) {
   1229     movq(src1, kScratchRegister);
   1230   } else if (src2.is(rcx)) {
   1231     movq(src2, kScratchRegister);
   1232   }
   1233 }
   1234 
   1235 
   1236 void MacroAssembler::SelectNonSmi(Register dst,
   1237                                   Register src1,
   1238                                   Register src2,
   1239                                   Label* on_not_smis) {
   1240   ASSERT(!dst.is(kScratchRegister));
   1241   ASSERT(!src1.is(kScratchRegister));
   1242   ASSERT(!src2.is(kScratchRegister));
   1243   ASSERT(!dst.is(src1));
   1244   ASSERT(!dst.is(src2));
   1245   // Both operands must not be smis.
   1246 #ifdef DEBUG
   1247   if (allow_stub_calls()) {  // Check contains a stub call.
   1248     Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
   1249     Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
   1250   }
   1251 #endif
   1252   ASSERT_EQ(0, kSmiTag);
   1253   ASSERT_EQ(0, Smi::FromInt(0));
   1254   movl(kScratchRegister, Immediate(kSmiTagMask));
   1255   and_(kScratchRegister, src1);
   1256   testl(kScratchRegister, src2);
   1257   // If non-zero then both are smis.
   1258   j(not_zero, on_not_smis);
   1259 
   1260   // Exactly one operand is a smi.
   1261   ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
   1262   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
   1263   subq(kScratchRegister, Immediate(1));
   1264   // If src1 is a smi, then scratch register all 1s, else it is all 0s.
   1265   movq(dst, src1);
   1266   xor_(dst, src2);
   1267   and_(dst, kScratchRegister);
   1268   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
   1269   xor_(dst, src1);
   1270   // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
   1271 }
   1272 
   1273 SmiIndex MacroAssembler::SmiToIndex(Register dst,
   1274                                     Register src,
   1275                                     int shift) {
   1276   ASSERT(is_uint6(shift));
   1277   // There is a possible optimization if shift is in the range 60-63, but that
   1278   // will (and must) never happen.
   1279   if (!dst.is(src)) {
   1280     movq(dst, src);
   1281   }
   1282   if (shift < kSmiShift) {
   1283     sar(dst, Immediate(kSmiShift - shift));
   1284   } else {
   1285     shl(dst, Immediate(shift - kSmiShift));
   1286   }
   1287   return SmiIndex(dst, times_1);
   1288 }
   1289 
   1290 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
   1291                                             Register src,
   1292                                             int shift) {
   1293   // Register src holds a positive smi.
   1294   ASSERT(is_uint6(shift));
   1295   if (!dst.is(src)) {
   1296     movq(dst, src);
   1297   }
   1298   neg(dst);
   1299   if (shift < kSmiShift) {
   1300     sar(dst, Immediate(kSmiShift - shift));
   1301   } else {
   1302     shl(dst, Immediate(shift - kSmiShift));
   1303   }
   1304   return SmiIndex(dst, times_1);
   1305 }
   1306 
   1307 
   1308 void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
   1309   ASSERT_EQ(0, kSmiTag);
   1310   Condition smi = CheckSmi(src);
   1311   j(smi, on_smi);
   1312 }
   1313 
   1314 
   1315 void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
   1316   Condition smi = CheckSmi(src);
   1317   j(NegateCondition(smi), on_not_smi);
   1318 }
   1319 
   1320 
   1321 void MacroAssembler::JumpIfNotPositiveSmi(Register src,
   1322                                           Label* on_not_positive_smi) {
   1323   Condition positive_smi = CheckPositiveSmi(src);
   1324   j(NegateCondition(positive_smi), on_not_positive_smi);
   1325 }
   1326 
   1327 
   1328 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
   1329                                              Smi* constant,
   1330                                              Label* on_equals) {
   1331   SmiCompare(src, constant);
   1332   j(equal, on_equals);
   1333 }
   1334 
   1335 
   1336 void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
   1337   Condition is_valid = CheckInteger32ValidSmiValue(src);
   1338   j(NegateCondition(is_valid), on_invalid);
   1339 }
   1340 
   1341 
   1342 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
   1343                                                 Label* on_invalid) {
   1344   Condition is_valid = CheckUInteger32ValidSmiValue(src);
   1345   j(NegateCondition(is_valid), on_invalid);
   1346 }
   1347 
   1348 
   1349 void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
   1350                                       Label* on_not_both_smi) {
   1351   Condition both_smi = CheckBothSmi(src1, src2);
   1352   j(NegateCondition(both_smi), on_not_both_smi);
   1353 }
   1354 
   1355 
   1356 void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
   1357                                               Label* on_not_both_smi) {
   1358   Condition both_smi = CheckBothPositiveSmi(src1, src2);
   1359   j(NegateCondition(both_smi), on_not_both_smi);
   1360 }
   1361 
   1362 
   1363 
   1364 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
   1365                                                          Register second_object,
   1366                                                          Register scratch1,
   1367                                                          Register scratch2,
   1368                                                          Label* on_fail) {
   1369   // Check that both objects are not smis.
   1370   Condition either_smi = CheckEitherSmi(first_object, second_object);
   1371   j(either_smi, on_fail);
   1372 
   1373   // Load instance type for both strings.
   1374   movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
   1375   movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
   1376   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   1377   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
   1378 
   1379   // Check that both are flat ascii strings.
   1380   ASSERT(kNotStringTag != 0);
   1381   const int kFlatAsciiStringMask =
   1382       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   1383   const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
   1384 
   1385   andl(scratch1, Immediate(kFlatAsciiStringMask));
   1386   andl(scratch2, Immediate(kFlatAsciiStringMask));
   1387   // Interleave the bits to check both scratch1 and scratch2 in one test.
   1388   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
   1389   lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
   1390   cmpl(scratch1,
   1391        Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
   1392   j(not_equal, on_fail);
   1393 }
   1394 
   1395 
   1396 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   1397   ASSERT(!source->IsFailure());
   1398   if (source->IsSmi()) {
   1399     Move(dst, Smi::cast(*source));
   1400   } else {
   1401     movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
   1402   }
   1403 }
   1404 
   1405 
   1406 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
   1407   ASSERT(!source->IsFailure());
   1408   if (source->IsSmi()) {
   1409     Move(dst, Smi::cast(*source));
   1410   } else {
   1411     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   1412     movq(dst, kScratchRegister);
   1413   }
   1414 }
   1415 
   1416 
   1417 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
   1418   if (source->IsSmi()) {
   1419     SmiCompare(dst, Smi::cast(*source));
   1420   } else {
   1421     Move(kScratchRegister, source);
   1422     cmpq(dst, kScratchRegister);
   1423   }
   1424 }
   1425 
   1426 
   1427 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
   1428   if (source->IsSmi()) {
   1429     SmiCompare(dst, Smi::cast(*source));
   1430   } else {
   1431     ASSERT(source->IsHeapObject());
   1432     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   1433     cmpq(dst, kScratchRegister);
   1434   }
   1435 }
   1436 
   1437 
   1438 void MacroAssembler::Push(Handle<Object> source) {
   1439   if (source->IsSmi()) {
   1440     Push(Smi::cast(*source));
   1441   } else {
   1442     ASSERT(source->IsHeapObject());
   1443     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   1444     push(kScratchRegister);
   1445   }
   1446 }
   1447 
   1448 
   1449 void MacroAssembler::Push(Smi* source) {
   1450   intptr_t smi = reinterpret_cast<intptr_t>(source);
   1451   if (is_int32(smi)) {
   1452     push(Immediate(static_cast<int32_t>(smi)));
   1453   } else {
   1454     Set(kScratchRegister, smi);
   1455     push(kScratchRegister);
   1456   }
   1457 }
   1458 
   1459 
   1460 void MacroAssembler::Drop(int stack_elements) {
   1461   if (stack_elements > 0) {
   1462     addq(rsp, Immediate(stack_elements * kPointerSize));
   1463   }
   1464 }
   1465 
   1466 
   1467 void MacroAssembler::Test(const Operand& src, Smi* source) {
   1468   intptr_t smi = reinterpret_cast<intptr_t>(source);
   1469   if (is_int32(smi)) {
   1470     testl(src, Immediate(static_cast<int32_t>(smi)));
   1471   } else {
   1472     Move(kScratchRegister, source);
   1473     testq(src, kScratchRegister);
   1474   }
   1475 }
   1476 
   1477 
   1478 void MacroAssembler::Jump(ExternalReference ext) {
   1479   movq(kScratchRegister, ext);
   1480   jmp(kScratchRegister);
   1481 }
   1482 
   1483 
   1484 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
   1485   movq(kScratchRegister, destination, rmode);
   1486   jmp(kScratchRegister);
   1487 }
   1488 
   1489 
   1490 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
   1491   // TODO(X64): Inline this
   1492   jmp(code_object, rmode);
   1493 }
   1494 
   1495 
   1496 void MacroAssembler::Call(ExternalReference ext) {
   1497   movq(kScratchRegister, ext);
   1498   call(kScratchRegister);
   1499 }
   1500 
   1501 
   1502 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
   1503   movq(kScratchRegister, destination, rmode);
   1504   call(kScratchRegister);
   1505 }
   1506 
   1507 
   1508 void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
   1509   ASSERT(RelocInfo::IsCodeTarget(rmode));
   1510   WriteRecordedPositions();
   1511   call(code_object, rmode);
   1512 }
   1513 
   1514 
   1515 void MacroAssembler::PushTryHandler(CodeLocation try_location,
   1516                                     HandlerType type) {
   1517   // Adjust this code if not the case.
   1518   ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
   1519 
   1520   // The pc (return address) is already on TOS.  This code pushes state,
   1521   // frame pointer and current handler.  Check that they are expected
   1522   // next on the stack, in that order.
   1523   ASSERT_EQ(StackHandlerConstants::kStateOffset,
   1524             StackHandlerConstants::kPCOffset - kPointerSize);
   1525   ASSERT_EQ(StackHandlerConstants::kFPOffset,
   1526             StackHandlerConstants::kStateOffset - kPointerSize);
   1527   ASSERT_EQ(StackHandlerConstants::kNextOffset,
   1528             StackHandlerConstants::kFPOffset - kPointerSize);
   1529 
   1530   if (try_location == IN_JAVASCRIPT) {
   1531     if (type == TRY_CATCH_HANDLER) {
   1532       push(Immediate(StackHandler::TRY_CATCH));
   1533     } else {
   1534       push(Immediate(StackHandler::TRY_FINALLY));
   1535     }
   1536     push(rbp);
   1537   } else {
   1538     ASSERT(try_location == IN_JS_ENTRY);
   1539     // The frame pointer does not point to a JS frame so we save NULL
   1540     // for rbp. We expect the code throwing an exception to check rbp
   1541     // before dereferencing it to restore the context.
   1542     push(Immediate(StackHandler::ENTRY));
   1543     push(Immediate(0));  // NULL frame pointer.
   1544   }
   1545   // Save the current handler.
   1546   movq(kScratchRegister, ExternalReference(Top::k_handler_address));
   1547   push(Operand(kScratchRegister, 0));
   1548   // Link this handler.
   1549   movq(Operand(kScratchRegister, 0), rsp);
   1550 }
   1551 
   1552 
   1553 void MacroAssembler::PopTryHandler() {
   1554   ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
   1555   // Unlink this handler.
   1556   movq(kScratchRegister, ExternalReference(Top::k_handler_address));
   1557   pop(Operand(kScratchRegister, 0));
   1558   // Remove the remaining fields.
   1559   addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
   1560 }
   1561 
   1562 
   1563 void MacroAssembler::Ret() {
   1564   ret(0);
   1565 }
   1566 
   1567 
   1568 void MacroAssembler::FCmp() {
   1569   fucomip();
   1570   ffree(0);
   1571   fincstp();
   1572 }
   1573 
   1574 
   1575 void MacroAssembler::CmpObjectType(Register heap_object,
   1576                                    InstanceType type,
   1577                                    Register map) {
   1578   movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   1579   CmpInstanceType(map, type);
   1580 }
   1581 
   1582 
   1583 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
   1584   cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
   1585        Immediate(static_cast<int8_t>(type)));
   1586 }
   1587 
   1588 
   1589 void MacroAssembler::CheckMap(Register obj,
   1590                               Handle<Map> map,
   1591                               Label* fail,
   1592                               bool is_heap_object) {
   1593   if (!is_heap_object) {
   1594     JumpIfSmi(obj, fail);
   1595   }
   1596   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
   1597   j(not_equal, fail);
   1598 }
   1599 
   1600 
   1601 void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
   1602   Label ok;
   1603   Condition is_smi = CheckSmi(object);
   1604   j(is_smi, &ok);
   1605   Cmp(FieldOperand(object, HeapObject::kMapOffset),
   1606       Factory::heap_number_map());
   1607   Assert(equal, msg);
   1608   bind(&ok);
   1609 }
   1610 
   1611 
   1612 Condition MacroAssembler::IsObjectStringType(Register heap_object,
   1613                                              Register map,
   1614                                              Register instance_type) {
   1615   movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   1616   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
   1617   ASSERT(kNotStringTag != 0);
   1618   testb(instance_type, Immediate(kIsNotStringMask));
   1619   return zero;
   1620 }
   1621 
   1622 
   1623 void MacroAssembler::TryGetFunctionPrototype(Register function,
   1624                                              Register result,
   1625                                              Label* miss) {
   1626   // Check that the receiver isn't a smi.
   1627   testl(function, Immediate(kSmiTagMask));
   1628   j(zero, miss);
   1629 
   1630   // Check that the function really is a function.
   1631   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   1632   j(not_equal, miss);
   1633 
   1634   // Make sure that the function has an instance prototype.
   1635   Label non_instance;
   1636   testb(FieldOperand(result, Map::kBitFieldOffset),
   1637         Immediate(1 << Map::kHasNonInstancePrototype));
   1638   j(not_zero, &non_instance);
   1639 
   1640   // Get the prototype or initial map from the function.
   1641   movq(result,
   1642        FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   1643 
   1644   // If the prototype or initial map is the hole, don't return it and
   1645   // simply miss the cache instead. This will allow us to allocate a
   1646   // prototype object on-demand in the runtime system.
   1647   CompareRoot(result, Heap::kTheHoleValueRootIndex);
   1648   j(equal, miss);
   1649 
   1650   // If the function does not have an initial map, we're done.
   1651   Label done;
   1652   CmpObjectType(result, MAP_TYPE, kScratchRegister);
   1653   j(not_equal, &done);
   1654 
   1655   // Get the prototype from the initial map.
   1656   movq(result, FieldOperand(result, Map::kPrototypeOffset));
   1657   jmp(&done);
   1658 
   1659   // Non-instance prototype: Fetch prototype from constructor field
   1660   // in initial map.
   1661   bind(&non_instance);
   1662   movq(result, FieldOperand(result, Map::kConstructorOffset));
   1663 
   1664   // All done.
   1665   bind(&done);
   1666 }
   1667 
   1668 
   1669 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   1670   if (FLAG_native_code_counters && counter->Enabled()) {
   1671     movq(kScratchRegister, ExternalReference(counter));
   1672     movl(Operand(kScratchRegister, 0), Immediate(value));
   1673   }
   1674 }
   1675 
   1676 
   1677 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
   1678   ASSERT(value > 0);
   1679   if (FLAG_native_code_counters && counter->Enabled()) {
   1680     movq(kScratchRegister, ExternalReference(counter));
   1681     Operand operand(kScratchRegister, 0);
   1682     if (value == 1) {
   1683       incl(operand);
   1684     } else {
   1685       addl(operand, Immediate(value));
   1686     }
   1687   }
   1688 }
   1689 
   1690 
   1691 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
   1692   ASSERT(value > 0);
   1693   if (FLAG_native_code_counters && counter->Enabled()) {
   1694     movq(kScratchRegister, ExternalReference(counter));
   1695     Operand operand(kScratchRegister, 0);
   1696     if (value == 1) {
   1697       decl(operand);
   1698     } else {
   1699       subl(operand, Immediate(value));
   1700     }
   1701   }
   1702 }
   1703 
   1704 #ifdef ENABLE_DEBUGGER_SUPPORT
   1705 
   1706 void MacroAssembler::PushRegistersFromMemory(RegList regs) {
   1707   ASSERT((regs & ~kJSCallerSaved) == 0);
   1708   // Push the content of the memory location to the stack.
   1709   for (int i = 0; i < kNumJSCallerSaved; i++) {
   1710     int r = JSCallerSavedCode(i);
   1711     if ((regs & (1 << r)) != 0) {
   1712       ExternalReference reg_addr =
   1713           ExternalReference(Debug_Address::Register(i));
   1714       movq(kScratchRegister, reg_addr);
   1715       push(Operand(kScratchRegister, 0));
   1716     }
   1717   }
   1718 }
   1719 
   1720 
   1721 void MacroAssembler::SaveRegistersToMemory(RegList regs) {
   1722   ASSERT((regs & ~kJSCallerSaved) == 0);
   1723   // Copy the content of registers to memory location.
   1724   for (int i = 0; i < kNumJSCallerSaved; i++) {
   1725     int r = JSCallerSavedCode(i);
   1726     if ((regs & (1 << r)) != 0) {
   1727       Register reg = { r };
   1728       ExternalReference reg_addr =
   1729           ExternalReference(Debug_Address::Register(i));
   1730       movq(kScratchRegister, reg_addr);
   1731       movq(Operand(kScratchRegister, 0), reg);
   1732     }
   1733   }
   1734 }
   1735 
   1736 
   1737 void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
   1738   ASSERT((regs & ~kJSCallerSaved) == 0);
   1739   // Copy the content of memory location to registers.
   1740   for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
   1741     int r = JSCallerSavedCode(i);
   1742     if ((regs & (1 << r)) != 0) {
   1743       Register reg = { r };
   1744       ExternalReference reg_addr =
   1745           ExternalReference(Debug_Address::Register(i));
   1746       movq(kScratchRegister, reg_addr);
   1747       movq(reg, Operand(kScratchRegister, 0));
   1748     }
   1749   }
   1750 }
   1751 
   1752 
   1753 void MacroAssembler::PopRegistersToMemory(RegList regs) {
   1754   ASSERT((regs & ~kJSCallerSaved) == 0);
   1755   // Pop the content from the stack to the memory location.
   1756   for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
   1757     int r = JSCallerSavedCode(i);
   1758     if ((regs & (1 << r)) != 0) {
   1759       ExternalReference reg_addr =
   1760           ExternalReference(Debug_Address::Register(i));
   1761       movq(kScratchRegister, reg_addr);
   1762       pop(Operand(kScratchRegister, 0));
   1763     }
   1764   }
   1765 }
   1766 
   1767 
   1768 void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
   1769                                                     Register scratch,
   1770                                                     RegList regs) {
   1771   ASSERT(!scratch.is(kScratchRegister));
   1772   ASSERT(!base.is(kScratchRegister));
   1773   ASSERT(!base.is(scratch));
   1774   ASSERT((regs & ~kJSCallerSaved) == 0);
   1775   // Copy the content of the stack to the memory location and adjust base.
   1776   for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
   1777     int r = JSCallerSavedCode(i);
   1778     if ((regs & (1 << r)) != 0) {
   1779       movq(scratch, Operand(base, 0));
   1780       ExternalReference reg_addr =
   1781           ExternalReference(Debug_Address::Register(i));
   1782       movq(kScratchRegister, reg_addr);
   1783       movq(Operand(kScratchRegister, 0), scratch);
   1784       lea(base, Operand(base, kPointerSize));
   1785     }
   1786   }
   1787 }
   1788 
   1789 void MacroAssembler::DebugBreak() {
   1790   ASSERT(allow_stub_calls());
   1791   xor_(rax, rax);  // no arguments
   1792   movq(rbx, ExternalReference(Runtime::kDebugBreak));
   1793   CEntryStub ces(1);
   1794   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
   1795 }
   1796 #endif  // ENABLE_DEBUGGER_SUPPORT
   1797 
   1798 
   1799 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
   1800                                     const ParameterCount& actual,
   1801                                     Handle<Code> code_constant,
   1802                                     Register code_register,
   1803                                     Label* done,
   1804                                     InvokeFlag flag) {
   1805   bool definitely_matches = false;
   1806   Label invoke;
   1807   if (expected.is_immediate()) {
   1808     ASSERT(actual.is_immediate());
   1809     if (expected.immediate() == actual.immediate()) {
   1810       definitely_matches = true;
   1811     } else {
   1812       movq(rax, Immediate(actual.immediate()));
   1813       if (expected.immediate() ==
   1814               SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
   1815         // Don't worry about adapting arguments for built-ins that
   1816         // don't want that done. Skip adaption code by making it look
   1817         // like we have a match between expected and actual number of
   1818         // arguments.
   1819         definitely_matches = true;
   1820       } else {
   1821         movq(rbx, Immediate(expected.immediate()));
   1822       }
   1823     }
   1824   } else {
   1825     if (actual.is_immediate()) {
   1826       // Expected is in register, actual is immediate. This is the
   1827       // case when we invoke function values without going through the
   1828       // IC mechanism.
   1829       cmpq(expected.reg(), Immediate(actual.immediate()));
   1830       j(equal, &invoke);
   1831       ASSERT(expected.reg().is(rbx));
   1832       movq(rax, Immediate(actual.immediate()));
   1833     } else if (!expected.reg().is(actual.reg())) {
   1834       // Both expected and actual are in (different) registers. This
   1835       // is the case when we invoke functions using call and apply.
   1836       cmpq(expected.reg(), actual.reg());
   1837       j(equal, &invoke);
   1838       ASSERT(actual.reg().is(rax));
   1839       ASSERT(expected.reg().is(rbx));
   1840     }
   1841   }
   1842 
   1843   if (!definitely_matches) {
   1844     Handle<Code> adaptor =
   1845         Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
   1846     if (!code_constant.is_null()) {
   1847       movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
   1848       addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
   1849     } else if (!code_register.is(rdx)) {
   1850       movq(rdx, code_register);
   1851     }
   1852 
   1853     if (flag == CALL_FUNCTION) {
   1854       Call(adaptor, RelocInfo::CODE_TARGET);
   1855       jmp(done);
   1856     } else {
   1857       Jump(adaptor, RelocInfo::CODE_TARGET);
   1858     }
   1859     bind(&invoke);
   1860   }
   1861 }
   1862 
   1863 
   1864 void MacroAssembler::InvokeCode(Register code,
   1865                                 const ParameterCount& expected,
   1866                                 const ParameterCount& actual,
   1867                                 InvokeFlag flag) {
   1868   Label done;
   1869   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
   1870   if (flag == CALL_FUNCTION) {
   1871     call(code);
   1872   } else {
   1873     ASSERT(flag == JUMP_FUNCTION);
   1874     jmp(code);
   1875   }
   1876   bind(&done);
   1877 }
   1878 
   1879 
   1880 void MacroAssembler::InvokeCode(Handle<Code> code,
   1881                                 const ParameterCount& expected,
   1882                                 const ParameterCount& actual,
   1883                                 RelocInfo::Mode rmode,
   1884                                 InvokeFlag flag) {
   1885   Label done;
   1886   Register dummy = rax;
   1887   InvokePrologue(expected, actual, code, dummy, &done, flag);
   1888   if (flag == CALL_FUNCTION) {
   1889     Call(code, rmode);
   1890   } else {
   1891     ASSERT(flag == JUMP_FUNCTION);
   1892     Jump(code, rmode);
   1893   }
   1894   bind(&done);
   1895 }
   1896 
   1897 
   1898 void MacroAssembler::InvokeFunction(Register function,
   1899                                     const ParameterCount& actual,
   1900                                     InvokeFlag flag) {
   1901   ASSERT(function.is(rdi));
   1902   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   1903   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
   1904   movsxlq(rbx,
   1905           FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
   1906   movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
   1907   // Advances rdx to the end of the Code object header, to the start of
   1908   // the executable code.
   1909   lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
   1910 
   1911   ParameterCount expected(rbx);
   1912   InvokeCode(rdx, expected, actual, flag);
   1913 }
   1914 
   1915 
   1916 void MacroAssembler::InvokeFunction(JSFunction* function,
   1917                                     const ParameterCount& actual,
   1918                                     InvokeFlag flag) {
   1919   ASSERT(function->is_compiled());
   1920   // Get the function and setup the context.
   1921   Move(rdi, Handle<JSFunction>(function));
   1922   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
   1923 
   1924   // Invoke the cached code.
   1925   Handle<Code> code(function->code());
   1926   ParameterCount expected(function->shared()->formal_parameter_count());
   1927   InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
   1928 }
   1929 
   1930 
   1931 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   1932   push(rbp);
   1933   movq(rbp, rsp);
   1934   push(rsi);  // Context.
   1935   Push(Smi::FromInt(type));
   1936   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   1937   push(kScratchRegister);
   1938   if (FLAG_debug_code) {
   1939     movq(kScratchRegister,
   1940          Factory::undefined_value(),
   1941          RelocInfo::EMBEDDED_OBJECT);
   1942     cmpq(Operand(rsp, 0), kScratchRegister);
   1943     Check(not_equal, "code object not properly patched");
   1944   }
   1945 }
   1946 
   1947 
   1948 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   1949   if (FLAG_debug_code) {
   1950     Move(kScratchRegister, Smi::FromInt(type));
   1951     cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
   1952     Check(equal, "stack frame types must match");
   1953   }
   1954   movq(rsp, rbp);
   1955   pop(rbp);
   1956 }
   1957 
   1958 
   1959 void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
   1960   // Setup the frame structure on the stack.
   1961   // All constants are relative to the frame pointer of the exit frame.
   1962   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   1963   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
   1964   ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   1965   push(rbp);
   1966   movq(rbp, rsp);
   1967 
   1968   // Reserve room for entry stack pointer and push the debug marker.
   1969   ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
   1970   push(Immediate(0));  // Saved entry sp, patched before call.
   1971   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   1972   push(kScratchRegister);  // Accessed from EditFrame::code_slot.
   1973 
   1974   // Save the frame pointer and the context in top.
   1975   ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
   1976   ExternalReference context_address(Top::k_context_address);
   1977   movq(r14, rax);  // Backup rax before we use it.
   1978 
   1979   movq(rax, rbp);
   1980   store_rax(c_entry_fp_address);
   1981   movq(rax, rsi);
   1982   store_rax(context_address);
   1983 
   1984   // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
   1985   // so it must be retained across the C-call.
   1986   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
   1987   lea(r15, Operand(rbp, r14, times_pointer_size, offset));
   1988 
   1989 #ifdef ENABLE_DEBUGGER_SUPPORT
   1990   // Save the state of all registers to the stack from the memory
   1991   // location. This is needed to allow nested break points.
   1992   if (mode == ExitFrame::MODE_DEBUG) {
   1993     // TODO(1243899): This should be symmetric to
   1994     // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
   1995     // correct here, but computed for the other call. Very error
   1996     // prone! FIX THIS.  Actually there are deeper problems with
   1997     // register saving than this asymmetry (see the bug report
   1998     // associated with this issue).
   1999     PushRegistersFromMemory(kJSCallerSaved);
   2000   }
   2001 #endif
   2002 
   2003 #ifdef _WIN64
   2004   // Reserve space on stack for result and argument structures, if necessary.
   2005   int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
   2006   // Reserve space for the Arguments object.  The Windows 64-bit ABI
   2007   // requires us to pass this structure as a pointer to its location on
   2008   // the stack.  The structure contains 2 values.
   2009   int argument_stack_space = 2 * kPointerSize;
   2010   // We also need backing space for 4 parameters, even though
   2011   // we only pass one or two parameter, and it is in a register.
   2012   int argument_mirror_space = 4 * kPointerSize;
   2013   int total_stack_space =
   2014       argument_mirror_space + argument_stack_space + result_stack_space;
   2015   subq(rsp, Immediate(total_stack_space));
   2016 #endif
   2017 
   2018   // Get the required frame alignment for the OS.
   2019   static const int kFrameAlignment = OS::ActivationFrameAlignment();
   2020   if (kFrameAlignment > 0) {
   2021     ASSERT(IsPowerOf2(kFrameAlignment));
   2022     movq(kScratchRegister, Immediate(-kFrameAlignment));
   2023     and_(rsp, kScratchRegister);
   2024   }
   2025 
   2026   // Patch the saved entry sp.
   2027   movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
   2028 }
   2029 
   2030 
   2031 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
   2032   // Registers:
   2033   // r15 : argv
   2034 #ifdef ENABLE_DEBUGGER_SUPPORT
   2035   // Restore the memory copy of the registers by digging them out from
   2036   // the stack. This is needed to allow nested break points.
   2037   if (mode == ExitFrame::MODE_DEBUG) {
   2038     // It's okay to clobber register rbx below because we don't need
   2039     // the function pointer after this.
   2040     const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
   2041     int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
   2042     lea(rbx, Operand(rbp, kOffset));
   2043     CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
   2044   }
   2045 #endif
   2046 
   2047   // Get the return address from the stack and restore the frame pointer.
   2048   movq(rcx, Operand(rbp, 1 * kPointerSize));
   2049   movq(rbp, Operand(rbp, 0 * kPointerSize));
   2050 
   2051   // Pop everything up to and including the arguments and the receiver
   2052   // from the caller stack.
   2053   lea(rsp, Operand(r15, 1 * kPointerSize));
   2054 
   2055   // Restore current context from top and clear it in debug mode.
   2056   ExternalReference context_address(Top::k_context_address);
   2057   movq(kScratchRegister, context_address);
   2058   movq(rsi, Operand(kScratchRegister, 0));
   2059 #ifdef DEBUG
   2060   movq(Operand(kScratchRegister, 0), Immediate(0));
   2061 #endif
   2062 
   2063   // Push the return address to get ready to return.
   2064   push(rcx);
   2065 
   2066   // Clear the top frame.
   2067   ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
   2068   movq(kScratchRegister, c_entry_fp_address);
   2069   movq(Operand(kScratchRegister, 0), Immediate(0));
   2070 }
   2071 
   2072 
   2073 Register MacroAssembler::CheckMaps(JSObject* object,
   2074                                    Register object_reg,
   2075                                    JSObject* holder,
   2076                                    Register holder_reg,
   2077                                    Register scratch,
   2078                                    Label* miss) {
   2079   // Make sure there's no overlap between scratch and the other
   2080   // registers.
   2081   ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
   2082 
   2083   // Keep track of the current object in register reg.  On the first
   2084   // iteration, reg is an alias for object_reg, on later iterations,
   2085   // it is an alias for holder_reg.
   2086   Register reg = object_reg;
   2087   int depth = 1;
   2088 
   2089   // Check the maps in the prototype chain.
   2090   // Traverse the prototype chain from the object and do map checks.
   2091   while (object != holder) {
   2092     depth++;
   2093 
   2094     // Only global objects and objects that do not require access
   2095     // checks are allowed in stubs.
   2096     ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
   2097 
   2098     JSObject* prototype = JSObject::cast(object->GetPrototype());
   2099     if (Heap::InNewSpace(prototype)) {
   2100       // Get the map of the current object.
   2101       movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
   2102       Cmp(scratch, Handle<Map>(object->map()));
   2103       // Branch on the result of the map check.
   2104       j(not_equal, miss);
   2105       // Check access rights to the global object.  This has to happen
   2106       // after the map check so that we know that the object is
   2107       // actually a global object.
   2108       if (object->IsJSGlobalProxy()) {
   2109         CheckAccessGlobalProxy(reg, scratch, miss);
   2110 
   2111         // Restore scratch register to be the map of the object.
   2112         // We load the prototype from the map in the scratch register.
   2113         movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
   2114       }
   2115       // The prototype is in new space; we cannot store a reference
   2116       // to it in the code. Load it from the map.
   2117       reg = holder_reg;  // from now the object is in holder_reg
   2118       movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
   2119 
   2120     } else {
   2121       // Check the map of the current object.
   2122       Cmp(FieldOperand(reg, HeapObject::kMapOffset),
   2123           Handle<Map>(object->map()));
   2124       // Branch on the result of the map check.
   2125       j(not_equal, miss);
   2126       // Check access rights to the global object.  This has to happen
   2127       // after the map check so that we know that the object is
   2128       // actually a global object.
   2129       if (object->IsJSGlobalProxy()) {
   2130         CheckAccessGlobalProxy(reg, scratch, miss);
   2131       }
   2132       // The prototype is in old space; load it directly.
   2133       reg = holder_reg;  // from now the object is in holder_reg
   2134       Move(reg, Handle<JSObject>(prototype));
   2135     }
   2136 
   2137     // Go to the next object in the prototype chain.
   2138     object = prototype;
   2139   }
   2140 
   2141   // Check the holder map.
   2142   Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
   2143   j(not_equal, miss);
   2144 
   2145   // Log the check depth.
   2146   LOG(IntEvent("check-maps-depth", depth));
   2147 
   2148   // Perform security check for access to the global object and return
   2149   // the holder register.
   2150   ASSERT(object == holder);
   2151   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
   2152   if (object->IsJSGlobalProxy()) {
   2153     CheckAccessGlobalProxy(reg, scratch, miss);
   2154   }
   2155   return reg;
   2156 }
   2157 
   2158 
   2159 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
   2160                                             Register scratch,
   2161                                             Label* miss) {
   2162   Label same_contexts;
   2163 
   2164   ASSERT(!holder_reg.is(scratch));
   2165   ASSERT(!scratch.is(kScratchRegister));
   2166   // Load current lexical context from the stack frame.
   2167   movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
   2168 
   2169   // When generating debug code, make sure the lexical context is set.
   2170   if (FLAG_debug_code) {
   2171     cmpq(scratch, Immediate(0));
   2172     Check(not_equal, "we should not have an empty lexical context");
   2173   }
   2174   // Load the global context of the current context.
   2175   int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
   2176   movq(scratch, FieldOperand(scratch, offset));
   2177   movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
   2178 
   2179   // Check the context is a global context.
   2180   if (FLAG_debug_code) {
   2181     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
   2182         Factory::global_context_map());
   2183     Check(equal, "JSGlobalObject::global_context should be a global context.");
   2184   }
   2185 
   2186   // Check if both contexts are the same.
   2187   cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
   2188   j(equal, &same_contexts);
   2189 
   2190   // Compare security tokens.
   2191   // Check that the security token in the calling global object is
   2192   // compatible with the security token in the receiving global
   2193   // object.
   2194 
   2195   // Check the context is a global context.
   2196   if (FLAG_debug_code) {
   2197     // Preserve original value of holder_reg.
   2198     push(holder_reg);
   2199     movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
   2200     CompareRoot(holder_reg, Heap::kNullValueRootIndex);
   2201     Check(not_equal, "JSGlobalProxy::context() should not be null.");
   2202 
   2203     // Read the first word and compare to global_context_map(),
   2204     movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
   2205     CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
   2206     Check(equal, "JSGlobalObject::global_context should be a global context.");
   2207     pop(holder_reg);
   2208   }
   2209 
   2210   movq(kScratchRegister,
   2211        FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
   2212   int token_offset =
   2213       Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
   2214   movq(scratch, FieldOperand(scratch, token_offset));
   2215   cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
   2216   j(not_equal, miss);
   2217 
   2218   bind(&same_contexts);
   2219 }
   2220 
   2221 
   2222 void MacroAssembler::LoadAllocationTopHelper(Register result,
   2223                                              Register result_end,
   2224                                              Register scratch,
   2225                                              AllocationFlags flags) {
   2226   ExternalReference new_space_allocation_top =
   2227       ExternalReference::new_space_allocation_top_address();
   2228 
   2229   // Just return if allocation top is already known.
   2230   if ((flags & RESULT_CONTAINS_TOP) != 0) {
   2231     // No use of scratch if allocation top is provided.
   2232     ASSERT(scratch.is(no_reg));
   2233 #ifdef DEBUG
   2234     // Assert that result actually contains top on entry.
   2235     movq(kScratchRegister, new_space_allocation_top);
   2236     cmpq(result, Operand(kScratchRegister, 0));
   2237     Check(equal, "Unexpected allocation top");
   2238 #endif
   2239     return;
   2240   }
   2241 
   2242   // Move address of new object to result. Use scratch register if available.
   2243   if (scratch.is(no_reg)) {
   2244     movq(kScratchRegister, new_space_allocation_top);
   2245     movq(result, Operand(kScratchRegister, 0));
   2246   } else {
   2247     ASSERT(!scratch.is(result_end));
   2248     movq(scratch, new_space_allocation_top);
   2249     movq(result, Operand(scratch, 0));
   2250   }
   2251 }
   2252 
   2253 
   2254 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
   2255                                                Register scratch) {
   2256   if (FLAG_debug_code) {
   2257     testq(result_end, Immediate(kObjectAlignmentMask));
   2258     Check(zero, "Unaligned allocation in new space");
   2259   }
   2260 
   2261   ExternalReference new_space_allocation_top =
   2262       ExternalReference::new_space_allocation_top_address();
   2263 
   2264   // Update new top.
   2265   if (result_end.is(rax)) {
   2266     // rax can be stored directly to a memory location.
   2267     store_rax(new_space_allocation_top);
   2268   } else {
   2269     // Register required - use scratch provided if available.
   2270     if (scratch.is(no_reg)) {
   2271       movq(kScratchRegister, new_space_allocation_top);
   2272       movq(Operand(kScratchRegister, 0), result_end);
   2273     } else {
   2274       movq(Operand(scratch, 0), result_end);
   2275     }
   2276   }
   2277 }
   2278 
   2279 
   2280 void MacroAssembler::AllocateInNewSpace(int object_size,
   2281                                         Register result,
   2282                                         Register result_end,
   2283                                         Register scratch,
   2284                                         Label* gc_required,
   2285                                         AllocationFlags flags) {
   2286   ASSERT(!result.is(result_end));
   2287 
   2288   // Load address of new object into result.
   2289   LoadAllocationTopHelper(result, result_end, scratch, flags);
   2290 
   2291   // Calculate new top and bail out if new space is exhausted.
   2292   ExternalReference new_space_allocation_limit =
   2293       ExternalReference::new_space_allocation_limit_address();
   2294   lea(result_end, Operand(result, object_size));
   2295   movq(kScratchRegister, new_space_allocation_limit);
   2296   cmpq(result_end, Operand(kScratchRegister, 0));
   2297   j(above, gc_required);
   2298 
   2299   // Update allocation top.
   2300   UpdateAllocationTopHelper(result_end, scratch);
   2301 
   2302   // Tag the result if requested.
   2303   if ((flags & TAG_OBJECT) != 0) {
   2304     addq(result, Immediate(kHeapObjectTag));
   2305   }
   2306 }
   2307 
   2308 
   2309 void MacroAssembler::AllocateInNewSpace(int header_size,
   2310                                         ScaleFactor element_size,
   2311                                         Register element_count,
   2312                                         Register result,
   2313                                         Register result_end,
   2314                                         Register scratch,
   2315                                         Label* gc_required,
   2316                                         AllocationFlags flags) {
   2317   ASSERT(!result.is(result_end));
   2318 
   2319   // Load address of new object into result.
   2320   LoadAllocationTopHelper(result, result_end, scratch, flags);
   2321 
   2322   // Calculate new top and bail out if new space is exhausted.
   2323   ExternalReference new_space_allocation_limit =
   2324       ExternalReference::new_space_allocation_limit_address();
   2325   lea(result_end, Operand(result, element_count, element_size, header_size));
   2326   movq(kScratchRegister, new_space_allocation_limit);
   2327   cmpq(result_end, Operand(kScratchRegister, 0));
   2328   j(above, gc_required);
   2329 
   2330   // Update allocation top.
   2331   UpdateAllocationTopHelper(result_end, scratch);
   2332 
   2333   // Tag the result if requested.
   2334   if ((flags & TAG_OBJECT) != 0) {
   2335     addq(result, Immediate(kHeapObjectTag));
   2336   }
   2337 }
   2338 
   2339 
   2340 void MacroAssembler::AllocateInNewSpace(Register object_size,
   2341                                         Register result,
   2342                                         Register result_end,
   2343                                         Register scratch,
   2344                                         Label* gc_required,
   2345                                         AllocationFlags flags) {
   2346   // Load address of new object into result.
   2347   LoadAllocationTopHelper(result, result_end, scratch, flags);
   2348 
   2349   // Calculate new top and bail out if new space is exhausted.
   2350   ExternalReference new_space_allocation_limit =
   2351       ExternalReference::new_space_allocation_limit_address();
   2352   if (!object_size.is(result_end)) {
   2353     movq(result_end, object_size);
   2354   }
   2355   addq(result_end, result);
   2356   movq(kScratchRegister, new_space_allocation_limit);
   2357   cmpq(result_end, Operand(kScratchRegister, 0));
   2358   j(above, gc_required);
   2359 
   2360   // Update allocation top.
   2361   UpdateAllocationTopHelper(result_end, scratch);
   2362 
   2363   // Tag the result if requested.
   2364   if ((flags & TAG_OBJECT) != 0) {
   2365     addq(result, Immediate(kHeapObjectTag));
   2366   }
   2367 }
   2368 
   2369 
   2370 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
   2371   ExternalReference new_space_allocation_top =
   2372       ExternalReference::new_space_allocation_top_address();
   2373 
   2374   // Make sure the object has no tag before resetting top.
   2375   and_(object, Immediate(~kHeapObjectTagMask));
   2376   movq(kScratchRegister, new_space_allocation_top);
   2377 #ifdef DEBUG
   2378   cmpq(object, Operand(kScratchRegister, 0));
   2379   Check(below, "Undo allocation of non allocated memory");
   2380 #endif
   2381   movq(Operand(kScratchRegister, 0), object);
   2382 }
   2383 
   2384 
   2385 void MacroAssembler::AllocateHeapNumber(Register result,
   2386                                         Register scratch,
   2387                                         Label* gc_required) {
   2388   // Allocate heap number in new space.
   2389   AllocateInNewSpace(HeapNumber::kSize,
   2390                      result,
   2391                      scratch,
   2392                      no_reg,
   2393                      gc_required,
   2394                      TAG_OBJECT);
   2395 
   2396   // Set the map.
   2397   LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
   2398   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   2399 }
   2400 
   2401 
   2402 void MacroAssembler::AllocateTwoByteString(Register result,
   2403                                            Register length,
   2404                                            Register scratch1,
   2405                                            Register scratch2,
   2406                                            Register scratch3,
   2407                                            Label* gc_required) {
   2408   // Calculate the number of bytes needed for the characters in the string while
   2409   // observing object alignment.
   2410   ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   2411   ASSERT(kShortSize == 2);
   2412   // scratch1 = length * 2 + kObjectAlignmentMask.
   2413   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
   2414   and_(scratch1, Immediate(~kObjectAlignmentMask));
   2415 
   2416   // Allocate two byte string in new space.
   2417   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
   2418                      times_1,
   2419                      scratch1,
   2420                      result,
   2421                      scratch2,
   2422                      scratch3,
   2423                      gc_required,
   2424                      TAG_OBJECT);
   2425 
   2426   // Set the map, length and hash field.
   2427   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
   2428   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   2429   movl(FieldOperand(result, String::kLengthOffset), length);
   2430   movl(FieldOperand(result, String::kHashFieldOffset),
   2431        Immediate(String::kEmptyHashField));
   2432 }
   2433 
   2434 
   2435 void MacroAssembler::AllocateAsciiString(Register result,
   2436                                          Register length,
   2437                                          Register scratch1,
   2438                                          Register scratch2,
   2439                                          Register scratch3,
   2440                                          Label* gc_required) {
   2441   // Calculate the number of bytes needed for the characters in the string while
   2442   // observing object alignment.
   2443   ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   2444   movl(scratch1, length);
   2445   ASSERT(kCharSize == 1);
   2446   addq(scratch1, Immediate(kObjectAlignmentMask));
   2447   and_(scratch1, Immediate(~kObjectAlignmentMask));
   2448 
   2449   // Allocate ascii string in new space.
   2450   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
   2451                      times_1,
   2452                      scratch1,
   2453                      result,
   2454                      scratch2,
   2455                      scratch3,
   2456                      gc_required,
   2457                      TAG_OBJECT);
   2458 
   2459   // Set the map, length and hash field.
   2460   LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
   2461   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   2462   movl(FieldOperand(result, String::kLengthOffset), length);
   2463   movl(FieldOperand(result, String::kHashFieldOffset),
   2464        Immediate(String::kEmptyHashField));
   2465 }
   2466 
   2467 
   2468 void MacroAssembler::AllocateConsString(Register result,
   2469                                         Register scratch1,
   2470                                         Register scratch2,
   2471                                         Label* gc_required) {
   2472   // Allocate heap number in new space.
   2473   AllocateInNewSpace(ConsString::kSize,
   2474                      result,
   2475                      scratch1,
   2476                      scratch2,
   2477                      gc_required,
   2478                      TAG_OBJECT);
   2479 
   2480   // Set the map. The other fields are left uninitialized.
   2481   LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
   2482   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   2483 }
   2484 
   2485 
   2486 void MacroAssembler::AllocateAsciiConsString(Register result,
   2487                                              Register scratch1,
   2488                                              Register scratch2,
   2489                                              Label* gc_required) {
   2490   // Allocate heap number in new space.
   2491   AllocateInNewSpace(ConsString::kSize,
   2492                      result,
   2493                      scratch1,
   2494                      scratch2,
   2495                      gc_required,
   2496                      TAG_OBJECT);
   2497 
   2498   // Set the map. The other fields are left uninitialized.
   2499   LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
   2500   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   2501 }
   2502 
   2503 
   2504 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   2505   if (context_chain_length > 0) {
   2506     // Move up the chain of contexts to the context containing the slot.
   2507     movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
   2508     // Load the function context (which is the incoming, outer context).
   2509     movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
   2510     for (int i = 1; i < context_chain_length; i++) {
   2511       movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
   2512       movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
   2513     }
   2514     // The context may be an intermediate context, not a function context.
   2515     movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
   2516   } else {  // context is the current function context.
   2517     // The context may be an intermediate context, not a function context.
   2518     movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
   2519   }
   2520 }
   2521 
   2522 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
   2523   // On Windows stack slots are reserved by the caller for all arguments
   2524   // including the ones passed in registers. On Linux 6 arguments are passed in
   2525   // registers and the caller does not reserve stack slots for them.
   2526   ASSERT(num_arguments >= 0);
   2527 #ifdef _WIN64
   2528   static const int kArgumentsWithoutStackSlot = 0;
   2529 #else
   2530   static const int kArgumentsWithoutStackSlot = 6;
   2531 #endif
   2532   return num_arguments > kArgumentsWithoutStackSlot ?
   2533       num_arguments - kArgumentsWithoutStackSlot : 0;
   2534 }
   2535 
   2536 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
   2537   int frame_alignment = OS::ActivationFrameAlignment();
   2538   ASSERT(frame_alignment != 0);
   2539   ASSERT(num_arguments >= 0);
   2540   // Make stack end at alignment and allocate space for arguments and old rsp.
   2541   movq(kScratchRegister, rsp);
   2542   ASSERT(IsPowerOf2(frame_alignment));
   2543   int argument_slots_on_stack =
   2544       ArgumentStackSlotsForCFunctionCall(num_arguments);
   2545   subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
   2546   and_(rsp, Immediate(-frame_alignment));
   2547   movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
   2548 }
   2549 
   2550 
   2551 void MacroAssembler::CallCFunction(ExternalReference function,
   2552                                    int num_arguments) {
   2553   movq(rax, function);
   2554   CallCFunction(rax, num_arguments);
   2555 }
   2556 
   2557 
   2558 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
   2559   call(function);
   2560   ASSERT(OS::ActivationFrameAlignment() != 0);
   2561   ASSERT(num_arguments >= 0);
   2562   int argument_slots_on_stack =
   2563       ArgumentStackSlotsForCFunctionCall(num_arguments);
   2564   movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
   2565 }
   2566 
   2567 
   2568 CodePatcher::CodePatcher(byte* address, int size)
   2569     : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
   2570   // Create a new macro assembler pointing to the address of the code to patch.
   2571   // The size is adjusted with kGap on order for the assembler to generate size
   2572   // bytes of instructions without failing with buffer size constraints.
   2573   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   2574 }
   2575 
   2576 
   2577 CodePatcher::~CodePatcher() {
   2578   // Indicate that code has changed.
   2579   CPU::FlushICache(address_, size_);
   2580 
   2581   // Check that the code was patched as expected.
   2582   ASSERT(masm_.pc_ == address_ + size_);
   2583   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   2584 }
   2585 
   2586 } }  // namespace v8::internal
   2587