Home | History | Annotate | Download | only in x64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if V8_TARGET_ARCH_X64
     31 
     32 #include "bootstrapper.h"
     33 #include "codegen.h"
     34 #include "cpu-profiler.h"
     35 #include "assembler-x64.h"
     36 #include "macro-assembler-x64.h"
     37 #include "serialize.h"
     38 #include "debug.h"
     39 #include "heap.h"
     40 
     41 namespace v8 {
     42 namespace internal {
     43 
     44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     45     : Assembler(arg_isolate, buffer, size),
     46       generating_stub_(false),
     47       allow_stub_calls_(true),
     48       has_frame_(false),
     49       root_array_available_(true) {
     50   if (isolate() != NULL) {
     51     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
     52                                   isolate());
     53   }
     54 }
     55 
     56 
     57 static const int kInvalidRootRegisterDelta = -1;
     58 
     59 
     60 intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
     61   if (predictable_code_size() &&
     62       (other.address() < reinterpret_cast<Address>(isolate()) ||
     63        other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
     64     return kInvalidRootRegisterDelta;
     65   }
     66   Address roots_register_value = kRootRegisterBias +
     67       reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
     68   intptr_t delta = other.address() - roots_register_value;
     69   return delta;
     70 }
     71 
     72 
     73 Operand MacroAssembler::ExternalOperand(ExternalReference target,
     74                                         Register scratch) {
     75   if (root_array_available_ && !Serializer::enabled()) {
     76     intptr_t delta = RootRegisterDelta(target);
     77     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
     78       Serializer::TooLateToEnableNow();
     79       return Operand(kRootRegister, static_cast<int32_t>(delta));
     80     }
     81   }
     82   movq(scratch, target);
     83   return Operand(scratch, 0);
     84 }
     85 
     86 
     87 void MacroAssembler::Load(Register destination, ExternalReference source) {
     88   if (root_array_available_ && !Serializer::enabled()) {
     89     intptr_t delta = RootRegisterDelta(source);
     90     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
     91       Serializer::TooLateToEnableNow();
     92       movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
     93       return;
     94     }
     95   }
     96   // Safe code.
     97   if (destination.is(rax)) {
     98     load_rax(source);
     99   } else {
    100     movq(kScratchRegister, source);
    101     movq(destination, Operand(kScratchRegister, 0));
    102   }
    103 }
    104 
    105 
    106 void MacroAssembler::Store(ExternalReference destination, Register source) {
    107   if (root_array_available_ && !Serializer::enabled()) {
    108     intptr_t delta = RootRegisterDelta(destination);
    109     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
    110       Serializer::TooLateToEnableNow();
    111       movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
    112       return;
    113     }
    114   }
    115   // Safe code.
    116   if (source.is(rax)) {
    117     store_rax(destination);
    118   } else {
    119     movq(kScratchRegister, destination);
    120     movq(Operand(kScratchRegister, 0), source);
    121   }
    122 }
    123 
    124 
    125 void MacroAssembler::LoadAddress(Register destination,
    126                                  ExternalReference source) {
    127   if (root_array_available_ && !Serializer::enabled()) {
    128     intptr_t delta = RootRegisterDelta(source);
    129     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
    130       Serializer::TooLateToEnableNow();
    131       lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
    132       return;
    133     }
    134   }
    135   // Safe code.
    136   movq(destination, source);
    137 }
    138 
    139 
    140 int MacroAssembler::LoadAddressSize(ExternalReference source) {
    141   if (root_array_available_ && !Serializer::enabled()) {
    142     // This calculation depends on the internals of LoadAddress.
    143     // It's correctness is ensured by the asserts in the Call
    144     // instruction below.
    145     intptr_t delta = RootRegisterDelta(source);
    146     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
    147       Serializer::TooLateToEnableNow();
    148       // Operand is lea(scratch, Operand(kRootRegister, delta));
    149       // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
    150       int size = 4;
    151       if (!is_int8(static_cast<int32_t>(delta))) {
    152         size += 3;  // Need full four-byte displacement in lea.
    153       }
    154       return size;
    155     }
    156   }
    157   // Size of movq(destination, src);
    158   return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
    159 }
    160 
    161 
    162 void MacroAssembler::PushAddress(ExternalReference source) {
    163   int64_t address = reinterpret_cast<int64_t>(source.address());
    164   if (is_int32(address) && !Serializer::enabled()) {
    165     if (emit_debug_code()) {
    166       movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
    167     }
    168     push(Immediate(static_cast<int32_t>(address)));
    169     return;
    170   }
    171   LoadAddress(kScratchRegister, source);
    172   push(kScratchRegister);
    173 }
    174 
    175 
    176 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
    177   ASSERT(root_array_available_);
    178   movq(destination, Operand(kRootRegister,
    179                             (index << kPointerSizeLog2) - kRootRegisterBias));
    180 }
    181 
    182 
    183 void MacroAssembler::LoadRootIndexed(Register destination,
    184                                      Register variable_offset,
    185                                      int fixed_offset) {
    186   ASSERT(root_array_available_);
    187   movq(destination,
    188        Operand(kRootRegister,
    189                variable_offset, times_pointer_size,
    190                (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
    191 }
    192 
    193 
    194 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
    195   ASSERT(root_array_available_);
    196   movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
    197        source);
    198 }
    199 
    200 
    201 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
    202   ASSERT(root_array_available_);
    203   push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
    204 }
    205 
    206 
    207 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
    208   ASSERT(root_array_available_);
    209   cmpq(with, Operand(kRootRegister,
    210                      (index << kPointerSizeLog2) - kRootRegisterBias));
    211 }
    212 
    213 
    214 void MacroAssembler::CompareRoot(const Operand& with,
    215                                  Heap::RootListIndex index) {
    216   ASSERT(root_array_available_);
    217   ASSERT(!with.AddressUsesRegister(kScratchRegister));
    218   LoadRoot(kScratchRegister, index);
    219   cmpq(with, kScratchRegister);
    220 }
    221 
    222 
    223 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
    224                                          Register addr,
    225                                          Register scratch,
    226                                          SaveFPRegsMode save_fp,
    227                                          RememberedSetFinalAction and_then) {
    228   if (emit_debug_code()) {
    229     Label ok;
    230     JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
    231     int3();
    232     bind(&ok);
    233   }
    234   // Load store buffer top.
    235   LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
    236   // Store pointer to buffer.
    237   movq(Operand(scratch, 0), addr);
    238   // Increment buffer top.
    239   addq(scratch, Immediate(kPointerSize));
    240   // Write back new top of buffer.
    241   StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
    242   // Call stub on end of buffer.
    243   Label done;
    244   // Check for end of buffer.
    245   testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
    246   if (and_then == kReturnAtEnd) {
    247     Label buffer_overflowed;
    248     j(not_equal, &buffer_overflowed, Label::kNear);
    249     ret(0);
    250     bind(&buffer_overflowed);
    251   } else {
    252     ASSERT(and_then == kFallThroughAtEnd);
    253     j(equal, &done, Label::kNear);
    254   }
    255   StoreBufferOverflowStub store_buffer_overflow =
    256       StoreBufferOverflowStub(save_fp);
    257   CallStub(&store_buffer_overflow);
    258   if (and_then == kReturnAtEnd) {
    259     ret(0);
    260   } else {
    261     ASSERT(and_then == kFallThroughAtEnd);
    262     bind(&done);
    263   }
    264 }
    265 
    266 
    267 void MacroAssembler::InNewSpace(Register object,
    268                                 Register scratch,
    269                                 Condition cc,
    270                                 Label* branch,
    271                                 Label::Distance distance) {
    272   if (Serializer::enabled()) {
    273     // Can't do arithmetic on external references if it might get serialized.
    274     // The mask isn't really an address.  We load it as an external reference in
    275     // case the size of the new space is different between the snapshot maker
    276     // and the running system.
    277     if (scratch.is(object)) {
    278       movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
    279       and_(scratch, kScratchRegister);
    280     } else {
    281       movq(scratch, ExternalReference::new_space_mask(isolate()));
    282       and_(scratch, object);
    283     }
    284     movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
    285     cmpq(scratch, kScratchRegister);
    286     j(cc, branch, distance);
    287   } else {
    288     ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
    289     intptr_t new_space_start =
    290         reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
    291     movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
    292     if (scratch.is(object)) {
    293       addq(scratch, kScratchRegister);
    294     } else {
    295       lea(scratch, Operand(object, kScratchRegister, times_1, 0));
    296     }
    297     and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
    298     j(cc, branch, distance);
    299   }
    300 }
    301 
    302 
    303 void MacroAssembler::RecordWriteField(
    304     Register object,
    305     int offset,
    306     Register value,
    307     Register dst,
    308     SaveFPRegsMode save_fp,
    309     RememberedSetAction remembered_set_action,
    310     SmiCheck smi_check) {
    311   // The compiled code assumes that record write doesn't change the
    312   // context register, so we check that none of the clobbered
    313   // registers are rsi.
    314   ASSERT(!value.is(rsi) && !dst.is(rsi));
    315 
    316   // First, check if a write barrier is even needed. The tests below
    317   // catch stores of Smis.
    318   Label done;
    319 
    320   // Skip barrier if writing a smi.
    321   if (smi_check == INLINE_SMI_CHECK) {
    322     JumpIfSmi(value, &done);
    323   }
    324 
    325   // Although the object register is tagged, the offset is relative to the start
    326   // of the object, so so offset must be a multiple of kPointerSize.
    327   ASSERT(IsAligned(offset, kPointerSize));
    328 
    329   lea(dst, FieldOperand(object, offset));
    330   if (emit_debug_code()) {
    331     Label ok;
    332     testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
    333     j(zero, &ok, Label::kNear);
    334     int3();
    335     bind(&ok);
    336   }
    337 
    338   RecordWrite(
    339       object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
    340 
    341   bind(&done);
    342 
    343   // Clobber clobbered input registers when running with the debug-code flag
    344   // turned on to provoke errors.
    345   if (emit_debug_code()) {
    346     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
    347     movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
    348   }
    349 }
    350 
    351 
    352 void MacroAssembler::RecordWriteArray(Register object,
    353                                       Register value,
    354                                       Register index,
    355                                       SaveFPRegsMode save_fp,
    356                                       RememberedSetAction remembered_set_action,
    357                                       SmiCheck smi_check) {
    358   // First, check if a write barrier is even needed. The tests below
    359   // catch stores of Smis.
    360   Label done;
    361 
    362   // Skip barrier if writing a smi.
    363   if (smi_check == INLINE_SMI_CHECK) {
    364     JumpIfSmi(value, &done);
    365   }
    366 
    367   // Array access: calculate the destination address. Index is not a smi.
    368   Register dst = index;
    369   lea(dst, Operand(object, index, times_pointer_size,
    370                    FixedArray::kHeaderSize - kHeapObjectTag));
    371 
    372   RecordWrite(
    373       object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
    374 
    375   bind(&done);
    376 
    377   // Clobber clobbered input registers when running with the debug-code flag
    378   // turned on to provoke errors.
    379   if (emit_debug_code()) {
    380     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
    381     movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
    382   }
    383 }
    384 
    385 
    386 void MacroAssembler::RecordWrite(Register object,
    387                                  Register address,
    388                                  Register value,
    389                                  SaveFPRegsMode fp_mode,
    390                                  RememberedSetAction remembered_set_action,
    391                                  SmiCheck smi_check) {
    392   // The compiled code assumes that record write doesn't change the
    393   // context register, so we check that none of the clobbered
    394   // registers are rsi.
    395   ASSERT(!value.is(rsi) && !address.is(rsi));
    396 
    397   ASSERT(!object.is(value));
    398   ASSERT(!object.is(address));
    399   ASSERT(!value.is(address));
    400   AssertNotSmi(object);
    401 
    402   if (remembered_set_action == OMIT_REMEMBERED_SET &&
    403       !FLAG_incremental_marking) {
    404     return;
    405   }
    406 
    407   if (emit_debug_code()) {
    408     Label ok;
    409     cmpq(value, Operand(address, 0));
    410     j(equal, &ok, Label::kNear);
    411     int3();
    412     bind(&ok);
    413   }
    414 
    415   // First, check if a write barrier is even needed. The tests below
    416   // catch stores of smis and stores into the young generation.
    417   Label done;
    418 
    419   if (smi_check == INLINE_SMI_CHECK) {
    420     // Skip barrier if writing a smi.
    421     JumpIfSmi(value, &done);
    422   }
    423 
    424   CheckPageFlag(value,
    425                 value,  // Used as scratch.
    426                 MemoryChunk::kPointersToHereAreInterestingMask,
    427                 zero,
    428                 &done,
    429                 Label::kNear);
    430 
    431   CheckPageFlag(object,
    432                 value,  // Used as scratch.
    433                 MemoryChunk::kPointersFromHereAreInterestingMask,
    434                 zero,
    435                 &done,
    436                 Label::kNear);
    437 
    438   RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
    439   CallStub(&stub);
    440 
    441   bind(&done);
    442 
    443   // Clobber clobbered registers when running with the debug-code flag
    444   // turned on to provoke errors.
    445   if (emit_debug_code()) {
    446     movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
    447     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
    448   }
    449 }
    450 
    451 
    452 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
    453   if (emit_debug_code()) Check(cc, reason);
    454 }
    455 
    456 
    457 void MacroAssembler::AssertFastElements(Register elements) {
    458   if (emit_debug_code()) {
    459     Label ok;
    460     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    461                 Heap::kFixedArrayMapRootIndex);
    462     j(equal, &ok, Label::kNear);
    463     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    464                 Heap::kFixedDoubleArrayMapRootIndex);
    465     j(equal, &ok, Label::kNear);
    466     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    467                 Heap::kFixedCOWArrayMapRootIndex);
    468     j(equal, &ok, Label::kNear);
    469     Abort(kJSObjectWithFastElementsMapHasSlowElements);
    470     bind(&ok);
    471   }
    472 }
    473 
    474 
    475 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
    476   Label L;
    477   j(cc, &L, Label::kNear);
    478   Abort(reason);
    479   // Control will not return here.
    480   bind(&L);
    481 }
    482 
    483 
    484 void MacroAssembler::CheckStackAlignment() {
    485   int frame_alignment = OS::ActivationFrameAlignment();
    486   int frame_alignment_mask = frame_alignment - 1;
    487   if (frame_alignment > kPointerSize) {
    488     ASSERT(IsPowerOf2(frame_alignment));
    489     Label alignment_as_expected;
    490     testq(rsp, Immediate(frame_alignment_mask));
    491     j(zero, &alignment_as_expected, Label::kNear);
    492     // Abort if stack is not aligned.
    493     int3();
    494     bind(&alignment_as_expected);
    495   }
    496 }
    497 
    498 
    499 void MacroAssembler::NegativeZeroTest(Register result,
    500                                       Register op,
    501                                       Label* then_label) {
    502   Label ok;
    503   testl(result, result);
    504   j(not_zero, &ok, Label::kNear);
    505   testl(op, op);
    506   j(sign, then_label);
    507   bind(&ok);
    508 }
    509 
    510 
    511 void MacroAssembler::Abort(BailoutReason reason) {
    512   // We want to pass the msg string like a smi to avoid GC
    513   // problems, however msg is not guaranteed to be aligned
    514   // properly. Instead, we pass an aligned pointer that is
    515   // a proper v8 smi, but also pass the alignment difference
    516   // from the real pointer as a smi.
    517   const char* msg = GetBailoutReason(reason);
    518   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
    519   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
    520   // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
    521   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
    522 #ifdef DEBUG
    523   if (msg != NULL) {
    524     RecordComment("Abort message: ");
    525     RecordComment(msg);
    526   }
    527 #endif
    528   push(rax);
    529   movq(kScratchRegister, p0, RelocInfo::NONE64);
    530   push(kScratchRegister);
    531   movq(kScratchRegister,
    532        reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
    533        RelocInfo::NONE64);
    534   push(kScratchRegister);
    535 
    536   if (!has_frame_) {
    537     // We don't actually want to generate a pile of code for this, so just
    538     // claim there is a stack frame, without generating one.
    539     FrameScope scope(this, StackFrame::NONE);
    540     CallRuntime(Runtime::kAbort, 2);
    541   } else {
    542     CallRuntime(Runtime::kAbort, 2);
    543   }
    544   // Control will not return here.
    545   int3();
    546 }
    547 
    548 
    549 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
    550   ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
    551   Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
    552 }
    553 
    554 
    555 void MacroAssembler::TailCallStub(CodeStub* stub) {
    556   ASSERT(allow_stub_calls_ ||
    557          stub->CompilingCallsToThisStubIsGCSafe(isolate()));
    558   Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
    559 }
    560 
    561 
    562 void MacroAssembler::StubReturn(int argc) {
    563   ASSERT(argc >= 1 && generating_stub());
    564   ret((argc - 1) * kPointerSize);
    565 }
    566 
    567 
    568 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
    569   if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
    570   return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
    571 }
    572 
    573 
    574 void MacroAssembler::IllegalOperation(int num_arguments) {
    575   if (num_arguments > 0) {
    576     addq(rsp, Immediate(num_arguments * kPointerSize));
    577   }
    578   LoadRoot(rax, Heap::kUndefinedValueRootIndex);
    579 }
    580 
    581 
    582 void MacroAssembler::IndexFromHash(Register hash, Register index) {
    583   // The assert checks that the constants for the maximum number of digits
    584   // for an array index cached in the hash field and the number of bits
    585   // reserved for it does not conflict.
    586   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
    587          (1 << String::kArrayIndexValueBits));
    588   // We want the smi-tagged index in key. Even if we subsequently go to
    589   // the slow case, converting the key to a smi is always valid.
    590   // key: string key
    591   // hash: key's hash field, including its array index value.
    592   and_(hash, Immediate(String::kArrayIndexValueMask));
    593   shr(hash, Immediate(String::kHashShift));
    594   // Here we actually clobber the key which will be used if calling into
    595   // runtime later. However as the new key is the numeric value of a string key
    596   // there is no difference in using either key.
    597   Integer32ToSmi(index, hash);
    598 }
    599 
    600 
    601 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
    602   CallRuntime(Runtime::FunctionForId(id), num_arguments);
    603 }
    604 
    605 
    606 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
    607   const Runtime::Function* function = Runtime::FunctionForId(id);
    608   Set(rax, function->nargs);
    609   LoadAddress(rbx, ExternalReference(function, isolate()));
    610   CEntryStub ces(1, kSaveFPRegs);
    611   CallStub(&ces);
    612 }
    613 
    614 
    615 void MacroAssembler::CallRuntime(const Runtime::Function* f,
    616                                  int num_arguments) {
    617   // If the expected number of arguments of the runtime function is
    618   // constant, we check that the actual number of arguments match the
    619   // expectation.
    620   if (f->nargs >= 0 && f->nargs != num_arguments) {
    621     IllegalOperation(num_arguments);
    622     return;
    623   }
    624 
    625   // TODO(1236192): Most runtime routines don't need the number of
    626   // arguments passed in because it is constant. At some point we
    627   // should remove this need and make the runtime routine entry code
    628   // smarter.
    629   Set(rax, num_arguments);
    630   LoadAddress(rbx, ExternalReference(f, isolate()));
    631   CEntryStub ces(f->result_size);
    632   CallStub(&ces);
    633 }
    634 
    635 
    636 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
    637                                            int num_arguments) {
    638   Set(rax, num_arguments);
    639   LoadAddress(rbx, ext);
    640 
    641   CEntryStub stub(1);
    642   CallStub(&stub);
    643 }
    644 
    645 
    646 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
    647                                                int num_arguments,
    648                                                int result_size) {
    649   // ----------- S t a t e -------------
    650   //  -- rsp[0]                 : return address
    651   //  -- rsp[8]                 : argument num_arguments - 1
    652   //  ...
    653   //  -- rsp[8 * num_arguments] : argument 0 (receiver)
    654   // -----------------------------------
    655 
    656   // TODO(1236192): Most runtime routines don't need the number of
    657   // arguments passed in because it is constant. At some point we
    658   // should remove this need and make the runtime routine entry code
    659   // smarter.
    660   Set(rax, num_arguments);
    661   JumpToExternalReference(ext, result_size);
    662 }
    663 
    664 
    665 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
    666                                      int num_arguments,
    667                                      int result_size) {
    668   TailCallExternalReference(ExternalReference(fid, isolate()),
    669                             num_arguments,
    670                             result_size);
    671 }
    672 
    673 
    674 static int Offset(ExternalReference ref0, ExternalReference ref1) {
    675   int64_t offset = (ref0.address() - ref1.address());
    676   // Check that fits into int.
    677   ASSERT(static_cast<int>(offset) == offset);
    678   return static_cast<int>(offset);
    679 }
    680 
    681 
    682 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space,
    683                                             bool returns_handle) {
    684 #if defined(_WIN64) && !defined(__MINGW64__)
    685   if (!returns_handle) {
    686     EnterApiExitFrame(arg_stack_space);
    687     return;
    688   }
    689   // We need to prepare a slot for result handle on stack and put
    690   // a pointer to it into 1st arg register.
    691   EnterApiExitFrame(arg_stack_space + 1);
    692 
    693   // rcx must be used to pass the pointer to the return value slot.
    694   lea(rcx, StackSpaceOperand(arg_stack_space));
    695 #else
    696   EnterApiExitFrame(arg_stack_space);
    697 #endif
    698 }
    699 
    700 
    701 void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
    702                                               Address thunk_address,
    703                                               Register thunk_last_arg,
    704                                               int stack_space,
    705                                               bool returns_handle,
    706                                               int return_value_offset) {
    707   Label prologue;
    708   Label promote_scheduled_exception;
    709   Label delete_allocated_handles;
    710   Label leave_exit_frame;
    711   Label write_back;
    712 
    713   Factory* factory = isolate()->factory();
    714   ExternalReference next_address =
    715       ExternalReference::handle_scope_next_address(isolate());
    716   const int kNextOffset = 0;
    717   const int kLimitOffset = Offset(
    718       ExternalReference::handle_scope_limit_address(isolate()),
    719       next_address);
    720   const int kLevelOffset = Offset(
    721       ExternalReference::handle_scope_level_address(isolate()),
    722       next_address);
    723   ExternalReference scheduled_exception_address =
    724       ExternalReference::scheduled_exception_address(isolate());
    725 
    726   // Allocate HandleScope in callee-save registers.
    727   Register prev_next_address_reg = r14;
    728   Register prev_limit_reg = rbx;
    729   Register base_reg = r15;
    730   movq(base_reg, next_address);
    731   movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
    732   movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
    733   addl(Operand(base_reg, kLevelOffset), Immediate(1));
    734 
    735   if (FLAG_log_timer_events) {
    736     FrameScope frame(this, StackFrame::MANUAL);
    737     PushSafepointRegisters();
    738     PrepareCallCFunction(1);
    739     LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
    740     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
    741     PopSafepointRegisters();
    742   }
    743 
    744 
    745   Label profiler_disabled;
    746   Label end_profiler_check;
    747   bool* is_profiling_flag =
    748       isolate()->cpu_profiler()->is_profiling_address();
    749   STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
    750   movq(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
    751   cmpb(Operand(rax, 0), Immediate(0));
    752   j(zero, &profiler_disabled);
    753 
    754   // Third parameter is the address of the actual getter function.
    755   movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE);
    756   movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
    757   jmp(&end_profiler_check);
    758 
    759   bind(&profiler_disabled);
    760   // Call the api function!
    761   movq(rax, reinterpret_cast<int64_t>(function_address),
    762        RelocInfo::EXTERNAL_REFERENCE);
    763 
    764   bind(&end_profiler_check);
    765 
    766   // Call the api function!
    767   call(rax);
    768 
    769   if (FLAG_log_timer_events) {
    770     FrameScope frame(this, StackFrame::MANUAL);
    771     PushSafepointRegisters();
    772     PrepareCallCFunction(1);
    773     LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
    774     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
    775     PopSafepointRegisters();
    776   }
    777 
    778   // Can skip the result check for new-style callbacks
    779   // TODO(dcarney): may need to pass this information down
    780   // as some function_addresses might not have been registered
    781   if (returns_handle) {
    782     Label empty_result;
    783 #if defined(_WIN64) && !defined(__MINGW64__)
    784     // rax keeps a pointer to v8::Handle, unpack it.
    785     movq(rax, Operand(rax, 0));
    786 #endif
    787     // Check if the result handle holds 0.
    788     testq(rax, rax);
    789     j(zero, &empty_result);
    790     // It was non-zero.  Dereference to get the result value.
    791     movq(rax, Operand(rax, 0));
    792     jmp(&prologue);
    793     bind(&empty_result);
    794   }
    795   // Load the value from ReturnValue
    796   movq(rax, Operand(rbp, return_value_offset * kPointerSize));
    797   bind(&prologue);
    798 
    799   // No more valid handles (the result handle was the last one). Restore
    800   // previous handle scope.
    801   subl(Operand(base_reg, kLevelOffset), Immediate(1));
    802   movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
    803   cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
    804   j(not_equal, &delete_allocated_handles);
    805   bind(&leave_exit_frame);
    806 
    807   // Check if the function scheduled an exception.
    808   movq(rsi, scheduled_exception_address);
    809   Cmp(Operand(rsi, 0), factory->the_hole_value());
    810   j(not_equal, &promote_scheduled_exception);
    811 
    812 #if ENABLE_EXTRA_CHECKS
    813   // Check if the function returned a valid JavaScript value.
    814   Label ok;
    815   Register return_value = rax;
    816   Register map = rcx;
    817 
    818   JumpIfSmi(return_value, &ok, Label::kNear);
    819   movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
    820 
    821   CmpInstanceType(map, FIRST_NONSTRING_TYPE);
    822   j(below, &ok, Label::kNear);
    823 
    824   CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
    825   j(above_equal, &ok, Label::kNear);
    826 
    827   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
    828   j(equal, &ok, Label::kNear);
    829 
    830   CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
    831   j(equal, &ok, Label::kNear);
    832 
    833   CompareRoot(return_value, Heap::kTrueValueRootIndex);
    834   j(equal, &ok, Label::kNear);
    835 
    836   CompareRoot(return_value, Heap::kFalseValueRootIndex);
    837   j(equal, &ok, Label::kNear);
    838 
    839   CompareRoot(return_value, Heap::kNullValueRootIndex);
    840   j(equal, &ok, Label::kNear);
    841 
    842   Abort(kAPICallReturnedInvalidObject);
    843 
    844   bind(&ok);
    845 #endif
    846 
    847   LeaveApiExitFrame();
    848   ret(stack_space * kPointerSize);
    849 
    850   bind(&promote_scheduled_exception);
    851   TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
    852 
    853   // HandleScope limit has changed. Delete allocated extensions.
    854   bind(&delete_allocated_handles);
    855   movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
    856   movq(prev_limit_reg, rax);
    857   LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
    858   LoadAddress(rax,
    859               ExternalReference::delete_handle_scope_extensions(isolate()));
    860   call(rax);
    861   movq(rax, prev_limit_reg);
    862   jmp(&leave_exit_frame);
    863 }
    864 
    865 
    866 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
    867                                              int result_size) {
    868   // Set the entry point and jump to the C entry runtime stub.
    869   LoadAddress(rbx, ext);
    870   CEntryStub ces(result_size);
    871   jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
    872 }
    873 
    874 
    875 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
    876                                    InvokeFlag flag,
    877                                    const CallWrapper& call_wrapper) {
    878   // You can't call a builtin without a valid frame.
    879   ASSERT(flag == JUMP_FUNCTION || has_frame());
    880 
    881   // Rely on the assertion to check that the number of provided
    882   // arguments match the expected number of arguments. Fake a
    883   // parameter count to avoid emitting code to do the check.
    884   ParameterCount expected(0);
    885   GetBuiltinEntry(rdx, id);
    886   InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
    887 }
    888 
    889 
    890 void MacroAssembler::GetBuiltinFunction(Register target,
    891                                         Builtins::JavaScript id) {
    892   // Load the builtins object into target register.
    893   movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
    894   movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
    895   movq(target, FieldOperand(target,
    896                             JSBuiltinsObject::OffsetOfFunctionWithId(id)));
    897 }
    898 
    899 
    900 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
    901   ASSERT(!target.is(rdi));
    902   // Load the JavaScript builtin function from the builtins object.
    903   GetBuiltinFunction(rdi, id);
    904   movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
    905 }
    906 
    907 
    908 #define REG(Name) { kRegister_ ## Name ## _Code }
    909 
    910 static const Register saved_regs[] = {
    911   REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
    912   REG(r9), REG(r10), REG(r11)
    913 };
    914 
    915 #undef REG
    916 
    917 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
    918 
    919 
    920 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
    921                                      Register exclusion1,
    922                                      Register exclusion2,
    923                                      Register exclusion3) {
    924   // We don't allow a GC during a store buffer overflow so there is no need to
    925   // store the registers in any particular way, but we do have to store and
    926   // restore them.
    927   for (int i = 0; i < kNumberOfSavedRegs; i++) {
    928     Register reg = saved_regs[i];
    929     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
    930       push(reg);
    931     }
    932   }
    933   // R12 to r15 are callee save on all platforms.
    934   if (fp_mode == kSaveFPRegs) {
    935     subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
    936     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
    937       XMMRegister reg = XMMRegister::from_code(i);
    938       movsd(Operand(rsp, i * kDoubleSize), reg);
    939     }
    940   }
    941 }
    942 
    943 
    944 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
    945                                     Register exclusion1,
    946                                     Register exclusion2,
    947                                     Register exclusion3) {
    948   if (fp_mode == kSaveFPRegs) {
    949     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
    950       XMMRegister reg = XMMRegister::from_code(i);
    951       movsd(reg, Operand(rsp, i * kDoubleSize));
    952     }
    953     addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
    954   }
    955   for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
    956     Register reg = saved_regs[i];
    957     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
    958       pop(reg);
    959     }
    960   }
    961 }
    962 
    963 
    964 void MacroAssembler::Set(Register dst, int64_t x) {
    965   if (x == 0) {
    966     xorl(dst, dst);
    967   } else if (is_uint32(x)) {
    968     movl(dst, Immediate(static_cast<uint32_t>(x)));
    969   } else if (is_int32(x)) {
    970     movq(dst, Immediate(static_cast<int32_t>(x)));
    971   } else {
    972     movq(dst, x, RelocInfo::NONE64);
    973   }
    974 }
    975 
    976 
    977 void MacroAssembler::Set(const Operand& dst, int64_t x) {
    978   if (is_int32(x)) {
    979     movq(dst, Immediate(static_cast<int32_t>(x)));
    980   } else {
    981     Set(kScratchRegister, x);
    982     movq(dst, kScratchRegister);
    983   }
    984 }
    985 
    986 
    987 bool MacroAssembler::IsUnsafeInt(const int x) {
    988   static const int kMaxBits = 17;
    989   return !is_intn(x, kMaxBits);
    990 }
    991 
    992 
    993 void MacroAssembler::SafeMove(Register dst, Smi* src) {
    994   ASSERT(!dst.is(kScratchRegister));
    995   ASSERT(kSmiValueSize == 32);  // JIT cookie can be converted to Smi.
    996   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
    997     Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
    998     Move(kScratchRegister, Smi::FromInt(jit_cookie()));
    999     xor_(dst, kScratchRegister);
   1000   } else {
   1001     Move(dst, src);
   1002   }
   1003 }
   1004 
   1005 
   1006 void MacroAssembler::SafePush(Smi* src) {
   1007   ASSERT(kSmiValueSize == 32);  // JIT cookie can be converted to Smi.
   1008   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
   1009     Push(Smi::FromInt(src->value() ^ jit_cookie()));
   1010     Move(kScratchRegister, Smi::FromInt(jit_cookie()));
   1011     xor_(Operand(rsp, 0), kScratchRegister);
   1012   } else {
   1013     Push(src);
   1014   }
   1015 }
   1016 
   1017 
   1018 // ----------------------------------------------------------------------------
   1019 // Smi tagging, untagging and tag detection.
   1020 
   1021 Register MacroAssembler::GetSmiConstant(Smi* source) {
   1022   int value = source->value();
   1023   if (value == 0) {
   1024     xorl(kScratchRegister, kScratchRegister);
   1025     return kScratchRegister;
   1026   }
   1027   if (value == 1) {
   1028     return kSmiConstantRegister;
   1029   }
   1030   LoadSmiConstant(kScratchRegister, source);
   1031   return kScratchRegister;
   1032 }
   1033 
   1034 
   1035 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
   1036   if (emit_debug_code()) {
   1037     movq(dst,
   1038          reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
   1039          RelocInfo::NONE64);
   1040     cmpq(dst, kSmiConstantRegister);
   1041     if (allow_stub_calls()) {
   1042       Assert(equal, kUninitializedKSmiConstantRegister);
   1043     } else {
   1044       Label ok;
   1045       j(equal, &ok, Label::kNear);
   1046       int3();
   1047       bind(&ok);
   1048     }
   1049   }
   1050   int value = source->value();
   1051   if (value == 0) {
   1052     xorl(dst, dst);
   1053     return;
   1054   }
   1055   bool negative = value < 0;
   1056   unsigned int uvalue = negative ? -value : value;
   1057 
   1058   switch (uvalue) {
   1059     case 9:
   1060       lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
   1061       break;
   1062     case 8:
   1063       xorl(dst, dst);
   1064       lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
   1065       break;
   1066     case 4:
   1067       xorl(dst, dst);
   1068       lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
   1069       break;
   1070     case 5:
   1071       lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
   1072       break;
   1073     case 3:
   1074       lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
   1075       break;
   1076     case 2:
   1077       lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
   1078       break;
   1079     case 1:
   1080       movq(dst, kSmiConstantRegister);
   1081       break;
   1082     case 0:
   1083       UNREACHABLE();
   1084       return;
   1085     default:
   1086       movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
   1087       return;
   1088   }
   1089   if (negative) {
   1090     neg(dst);
   1091   }
   1092 }
   1093 
   1094 
   1095 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
   1096   STATIC_ASSERT(kSmiTag == 0);
   1097   if (!dst.is(src)) {
   1098     movl(dst, src);
   1099   }
   1100   shl(dst, Immediate(kSmiShift));
   1101 }
   1102 
   1103 
   1104 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
   1105   if (emit_debug_code()) {
   1106     testb(dst, Immediate(0x01));
   1107     Label ok;
   1108     j(zero, &ok, Label::kNear);
   1109     if (allow_stub_calls()) {
   1110       Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
   1111     } else {
   1112       int3();
   1113     }
   1114     bind(&ok);
   1115   }
   1116   ASSERT(kSmiShift % kBitsPerByte == 0);
   1117   movl(Operand(dst, kSmiShift / kBitsPerByte), src);
   1118 }
   1119 
   1120 
   1121 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
   1122                                                 Register src,
   1123                                                 int constant) {
   1124   if (dst.is(src)) {
   1125     addl(dst, Immediate(constant));
   1126   } else {
   1127     leal(dst, Operand(src, constant));
   1128   }
   1129   shl(dst, Immediate(kSmiShift));
   1130 }
   1131 
   1132 
   1133 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
   1134   STATIC_ASSERT(kSmiTag == 0);
   1135   if (!dst.is(src)) {
   1136     movq(dst, src);
   1137   }
   1138   shr(dst, Immediate(kSmiShift));
   1139 }
   1140 
   1141 
   1142 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
   1143   movl(dst, Operand(src, kSmiShift / kBitsPerByte));
   1144 }
   1145 
   1146 
   1147 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
   1148   STATIC_ASSERT(kSmiTag == 0);
   1149   if (!dst.is(src)) {
   1150     movq(dst, src);
   1151   }
   1152   sar(dst, Immediate(kSmiShift));
   1153 }
   1154 
   1155 
   1156 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
   1157   movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
   1158 }
   1159 
   1160 
   1161 void MacroAssembler::SmiTest(Register src) {
   1162   AssertSmi(src);
   1163   testq(src, src);
   1164 }
   1165 
   1166 
   1167 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
   1168   AssertSmi(smi1);
   1169   AssertSmi(smi2);
   1170   cmpq(smi1, smi2);
   1171 }
   1172 
   1173 
   1174 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
   1175   AssertSmi(dst);
   1176   Cmp(dst, src);
   1177 }
   1178 
   1179 
   1180 void MacroAssembler::Cmp(Register dst, Smi* src) {
   1181   ASSERT(!dst.is(kScratchRegister));
   1182   if (src->value() == 0) {
   1183     testq(dst, dst);
   1184   } else {
   1185     Register constant_reg = GetSmiConstant(src);
   1186     cmpq(dst, constant_reg);
   1187   }
   1188 }
   1189 
   1190 
   1191 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
   1192   AssertSmi(dst);
   1193   AssertSmi(src);
   1194   cmpq(dst, src);
   1195 }
   1196 
   1197 
   1198 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
   1199   AssertSmi(dst);
   1200   AssertSmi(src);
   1201   cmpq(dst, src);
   1202 }
   1203 
   1204 
   1205 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
   1206   AssertSmi(dst);
   1207   cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
   1208 }
   1209 
   1210 
   1211 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
   1212   // The Operand cannot use the smi register.
   1213   Register smi_reg = GetSmiConstant(src);
   1214   ASSERT(!dst.AddressUsesRegister(smi_reg));
   1215   cmpq(dst, smi_reg);
   1216 }
   1217 
   1218 
   1219 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
   1220   cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
   1221 }
   1222 
   1223 
   1224 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
   1225                                                            Register src,
   1226                                                            int power) {
   1227   ASSERT(power >= 0);
   1228   ASSERT(power < 64);
   1229   if (power == 0) {
   1230     SmiToInteger64(dst, src);
   1231     return;
   1232   }
   1233   if (!dst.is(src)) {
   1234     movq(dst, src);
   1235   }
   1236   if (power < kSmiShift) {
   1237     sar(dst, Immediate(kSmiShift - power));
   1238   } else if (power > kSmiShift) {
   1239     shl(dst, Immediate(power - kSmiShift));
   1240   }
   1241 }
   1242 
   1243 
   1244 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
   1245                                                          Register src,
   1246                                                          int power) {
   1247   ASSERT((0 <= power) && (power < 32));
   1248   if (dst.is(src)) {
   1249     shr(dst, Immediate(power + kSmiShift));
   1250   } else {
   1251     UNIMPLEMENTED();  // Not used.
   1252   }
   1253 }
   1254 
   1255 
   1256 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
   1257                                  Label* on_not_smis,
   1258                                  Label::Distance near_jump) {
   1259   if (dst.is(src1) || dst.is(src2)) {
   1260     ASSERT(!src1.is(kScratchRegister));
   1261     ASSERT(!src2.is(kScratchRegister));
   1262     movq(kScratchRegister, src1);
   1263     or_(kScratchRegister, src2);
   1264     JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
   1265     movq(dst, kScratchRegister);
   1266   } else {
   1267     movq(dst, src1);
   1268     or_(dst, src2);
   1269     JumpIfNotSmi(dst, on_not_smis, near_jump);
   1270   }
   1271 }
   1272 
   1273 
   1274 Condition MacroAssembler::CheckSmi(Register src) {
   1275   STATIC_ASSERT(kSmiTag == 0);
   1276   testb(src, Immediate(kSmiTagMask));
   1277   return zero;
   1278 }
   1279 
   1280 
   1281 Condition MacroAssembler::CheckSmi(const Operand& src) {
   1282   STATIC_ASSERT(kSmiTag == 0);
   1283   testb(src, Immediate(kSmiTagMask));
   1284   return zero;
   1285 }
   1286 
   1287 
   1288 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
   1289   STATIC_ASSERT(kSmiTag == 0);
   1290   // Test that both bits of the mask 0x8000000000000001 are zero.
   1291   movq(kScratchRegister, src);
   1292   rol(kScratchRegister, Immediate(1));
   1293   testb(kScratchRegister, Immediate(3));
   1294   return zero;
   1295 }
   1296 
   1297 
   1298 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
   1299   if (first.is(second)) {
   1300     return CheckSmi(first);
   1301   }
   1302   STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
   1303   leal(kScratchRegister, Operand(first, second, times_1, 0));
   1304   testb(kScratchRegister, Immediate(0x03));
   1305   return zero;
   1306 }
   1307 
   1308 
   1309 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
   1310                                                   Register second) {
   1311   if (first.is(second)) {
   1312     return CheckNonNegativeSmi(first);
   1313   }
   1314   movq(kScratchRegister, first);
   1315   or_(kScratchRegister, second);
   1316   rol(kScratchRegister, Immediate(1));
   1317   testl(kScratchRegister, Immediate(3));
   1318   return zero;
   1319 }
   1320 
   1321 
   1322 Condition MacroAssembler::CheckEitherSmi(Register first,
   1323                                          Register second,
   1324                                          Register scratch) {
   1325   if (first.is(second)) {
   1326     return CheckSmi(first);
   1327   }
   1328   if (scratch.is(second)) {
   1329     andl(scratch, first);
   1330   } else {
   1331     if (!scratch.is(first)) {
   1332       movl(scratch, first);
   1333     }
   1334     andl(scratch, second);
   1335   }
   1336   testb(scratch, Immediate(kSmiTagMask));
   1337   return zero;
   1338 }
   1339 
   1340 
   1341 Condition MacroAssembler::CheckIsMinSmi(Register src) {
   1342   ASSERT(!src.is(kScratchRegister));
   1343   // If we overflow by subtracting one, it's the minimal smi value.
   1344   cmpq(src, kSmiConstantRegister);
   1345   return overflow;
   1346 }
   1347 
   1348 
   1349 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
   1350   // A 32-bit integer value can always be converted to a smi.
   1351   return always;
   1352 }
   1353 
   1354 
   1355 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
   1356   // An unsigned 32-bit integer value is valid as long as the high bit
   1357   // is not set.
   1358   testl(src, src);
   1359   return positive;
   1360 }
   1361 
   1362 
   1363 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
   1364   if (dst.is(src)) {
   1365     andl(dst, Immediate(kSmiTagMask));
   1366   } else {
   1367     movl(dst, Immediate(kSmiTagMask));
   1368     andl(dst, src);
   1369   }
   1370 }
   1371 
   1372 
   1373 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
   1374   if (!(src.AddressUsesRegister(dst))) {
   1375     movl(dst, Immediate(kSmiTagMask));
   1376     andl(dst, src);
   1377   } else {
   1378     movl(dst, src);
   1379     andl(dst, Immediate(kSmiTagMask));
   1380   }
   1381 }
   1382 
   1383 
   1384 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
   1385                                             Label* on_invalid,
   1386                                             Label::Distance near_jump) {
   1387   Condition is_valid = CheckInteger32ValidSmiValue(src);
   1388   j(NegateCondition(is_valid), on_invalid, near_jump);
   1389 }
   1390 
   1391 
   1392 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
   1393                                                 Label* on_invalid,
   1394                                                 Label::Distance near_jump) {
   1395   Condition is_valid = CheckUInteger32ValidSmiValue(src);
   1396   j(NegateCondition(is_valid), on_invalid, near_jump);
   1397 }
   1398 
   1399 
   1400 void MacroAssembler::JumpIfSmi(Register src,
   1401                                Label* on_smi,
   1402                                Label::Distance near_jump) {
   1403   Condition smi = CheckSmi(src);
   1404   j(smi, on_smi, near_jump);
   1405 }
   1406 
   1407 
   1408 void MacroAssembler::JumpIfNotSmi(Register src,
   1409                                   Label* on_not_smi,
   1410                                   Label::Distance near_jump) {
   1411   Condition smi = CheckSmi(src);
   1412   j(NegateCondition(smi), on_not_smi, near_jump);
   1413 }
   1414 
   1415 
   1416 void MacroAssembler::JumpUnlessNonNegativeSmi(
   1417     Register src, Label* on_not_smi_or_negative,
   1418     Label::Distance near_jump) {
   1419   Condition non_negative_smi = CheckNonNegativeSmi(src);
   1420   j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
   1421 }
   1422 
   1423 
   1424 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
   1425                                              Smi* constant,
   1426                                              Label* on_equals,
   1427                                              Label::Distance near_jump) {
   1428   SmiCompare(src, constant);
   1429   j(equal, on_equals, near_jump);
   1430 }
   1431 
   1432 
   1433 void MacroAssembler::JumpIfNotBothSmi(Register src1,
   1434                                       Register src2,
   1435                                       Label* on_not_both_smi,
   1436                                       Label::Distance near_jump) {
   1437   Condition both_smi = CheckBothSmi(src1, src2);
   1438   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
   1439 }
   1440 
   1441 
   1442 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
   1443                                                   Register src2,
   1444                                                   Label* on_not_both_smi,
   1445                                                   Label::Distance near_jump) {
   1446   Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
   1447   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
   1448 }
   1449 
   1450 
   1451 void MacroAssembler::SmiTryAddConstant(Register dst,
   1452                                        Register src,
   1453                                        Smi* constant,
   1454                                        Label* on_not_smi_result,
   1455                                        Label::Distance near_jump) {
   1456   // Does not assume that src is a smi.
   1457   ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
   1458   STATIC_ASSERT(kSmiTag == 0);
   1459   ASSERT(!dst.is(kScratchRegister));
   1460   ASSERT(!src.is(kScratchRegister));
   1461 
   1462   JumpIfNotSmi(src, on_not_smi_result, near_jump);
   1463   Register tmp = (dst.is(src) ? kScratchRegister : dst);
   1464   LoadSmiConstant(tmp, constant);
   1465   addq(tmp, src);
   1466   j(overflow, on_not_smi_result, near_jump);
   1467   if (dst.is(src)) {
   1468     movq(dst, tmp);
   1469   }
   1470 }
   1471 
   1472 
   1473 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
   1474   if (constant->value() == 0) {
   1475     if (!dst.is(src)) {
   1476       movq(dst, src);
   1477     }
   1478     return;
   1479   } else if (dst.is(src)) {
   1480     ASSERT(!dst.is(kScratchRegister));
   1481     switch (constant->value()) {
   1482       case 1:
   1483         addq(dst, kSmiConstantRegister);
   1484         return;
   1485       case 2:
   1486         lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
   1487         return;
   1488       case 4:
   1489         lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
   1490         return;
   1491       case 8:
   1492         lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
   1493         return;
   1494       default:
   1495         Register constant_reg = GetSmiConstant(constant);
   1496         addq(dst, constant_reg);
   1497         return;
   1498     }
   1499   } else {
   1500     switch (constant->value()) {
   1501       case 1:
   1502         lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
   1503         return;
   1504       case 2:
   1505         lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
   1506         return;
   1507       case 4:
   1508         lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
   1509         return;
   1510       case 8:
   1511         lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
   1512         return;
   1513       default:
   1514         LoadSmiConstant(dst, constant);
   1515         addq(dst, src);
   1516         return;
   1517     }
   1518   }
   1519 }
   1520 
   1521 
   1522 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
   1523   if (constant->value() != 0) {
   1524     addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
   1525   }
   1526 }
   1527 
   1528 
   1529 void MacroAssembler::SmiAddConstant(Register dst,
   1530                                     Register src,
   1531                                     Smi* constant,
   1532                                     Label* on_not_smi_result,
   1533                                     Label::Distance near_jump) {
   1534   if (constant->value() == 0) {
   1535     if (!dst.is(src)) {
   1536       movq(dst, src);
   1537     }
   1538   } else if (dst.is(src)) {
   1539     ASSERT(!dst.is(kScratchRegister));
   1540 
   1541     LoadSmiConstant(kScratchRegister, constant);
   1542     addq(kScratchRegister, src);
   1543     j(overflow, on_not_smi_result, near_jump);
   1544     movq(dst, kScratchRegister);
   1545   } else {
   1546     LoadSmiConstant(dst, constant);
   1547     addq(dst, src);
   1548     j(overflow, on_not_smi_result, near_jump);
   1549   }
   1550 }
   1551 
   1552 
   1553 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
   1554   if (constant->value() == 0) {
   1555     if (!dst.is(src)) {
   1556       movq(dst, src);
   1557     }
   1558   } else if (dst.is(src)) {
   1559     ASSERT(!dst.is(kScratchRegister));
   1560     Register constant_reg = GetSmiConstant(constant);
   1561     subq(dst, constant_reg);
   1562   } else {
   1563     if (constant->value() == Smi::kMinValue) {
   1564       LoadSmiConstant(dst, constant);
   1565       // Adding and subtracting the min-value gives the same result, it only
   1566       // differs on the overflow bit, which we don't check here.
   1567       addq(dst, src);
   1568     } else {
   1569       // Subtract by adding the negation.
   1570       LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
   1571       addq(dst, src);
   1572     }
   1573   }
   1574 }
   1575 
   1576 
   1577 void MacroAssembler::SmiSubConstant(Register dst,
   1578                                     Register src,
   1579                                     Smi* constant,
   1580                                     Label* on_not_smi_result,
   1581                                     Label::Distance near_jump) {
   1582   if (constant->value() == 0) {
   1583     if (!dst.is(src)) {
   1584       movq(dst, src);
   1585     }
   1586   } else if (dst.is(src)) {
   1587     ASSERT(!dst.is(kScratchRegister));
   1588     if (constant->value() == Smi::kMinValue) {
   1589       // Subtracting min-value from any non-negative value will overflow.
   1590       // We test the non-negativeness before doing the subtraction.
   1591       testq(src, src);
   1592       j(not_sign, on_not_smi_result, near_jump);
   1593       LoadSmiConstant(kScratchRegister, constant);
   1594       subq(dst, kScratchRegister);
   1595     } else {
   1596       // Subtract by adding the negation.
   1597       LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
   1598       addq(kScratchRegister, dst);
   1599       j(overflow, on_not_smi_result, near_jump);
   1600       movq(dst, kScratchRegister);
   1601     }
   1602   } else {
   1603     if (constant->value() == Smi::kMinValue) {
   1604       // Subtracting min-value from any non-negative value will overflow.
   1605       // We test the non-negativeness before doing the subtraction.
   1606       testq(src, src);
   1607       j(not_sign, on_not_smi_result, near_jump);
   1608       LoadSmiConstant(dst, constant);
   1609       // Adding and subtracting the min-value gives the same result, it only
   1610       // differs on the overflow bit, which we don't check here.
   1611       addq(dst, src);
   1612     } else {
   1613       // Subtract by adding the negation.
   1614       LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
   1615       addq(dst, src);
   1616       j(overflow, on_not_smi_result, near_jump);
   1617     }
   1618   }
   1619 }
   1620 
   1621 
   1622 void MacroAssembler::SmiNeg(Register dst,
   1623                             Register src,
   1624                             Label* on_smi_result,
   1625                             Label::Distance near_jump) {
   1626   if (dst.is(src)) {
   1627     ASSERT(!dst.is(kScratchRegister));
   1628     movq(kScratchRegister, src);
   1629     neg(dst);  // Low 32 bits are retained as zero by negation.
   1630     // Test if result is zero or Smi::kMinValue.
   1631     cmpq(dst, kScratchRegister);
   1632     j(not_equal, on_smi_result, near_jump);
   1633     movq(src, kScratchRegister);
   1634   } else {
   1635     movq(dst, src);
   1636     neg(dst);
   1637     cmpq(dst, src);
   1638     // If the result is zero or Smi::kMinValue, negation failed to create a smi.
   1639     j(not_equal, on_smi_result, near_jump);
   1640   }
   1641 }
   1642 
   1643 
   1644 void MacroAssembler::SmiAdd(Register dst,
   1645                             Register src1,
   1646                             Register src2,
   1647                             Label* on_not_smi_result,
   1648                             Label::Distance near_jump) {
   1649   ASSERT_NOT_NULL(on_not_smi_result);
   1650   ASSERT(!dst.is(src2));
   1651   if (dst.is(src1)) {
   1652     movq(kScratchRegister, src1);
   1653     addq(kScratchRegister, src2);
   1654     j(overflow, on_not_smi_result, near_jump);
   1655     movq(dst, kScratchRegister);
   1656   } else {
   1657     movq(dst, src1);
   1658     addq(dst, src2);
   1659     j(overflow, on_not_smi_result, near_jump);
   1660   }
   1661 }
   1662 
   1663 
   1664 void MacroAssembler::SmiAdd(Register dst,
   1665                             Register src1,
   1666                             const Operand& src2,
   1667                             Label* on_not_smi_result,
   1668                             Label::Distance near_jump) {
   1669   ASSERT_NOT_NULL(on_not_smi_result);
   1670   if (dst.is(src1)) {
   1671     movq(kScratchRegister, src1);
   1672     addq(kScratchRegister, src2);
   1673     j(overflow, on_not_smi_result, near_jump);
   1674     movq(dst, kScratchRegister);
   1675   } else {
   1676     ASSERT(!src2.AddressUsesRegister(dst));
   1677     movq(dst, src1);
   1678     addq(dst, src2);
   1679     j(overflow, on_not_smi_result, near_jump);
   1680   }
   1681 }
   1682 
   1683 
   1684 void MacroAssembler::SmiAdd(Register dst,
   1685                             Register src1,
   1686                             Register src2) {
   1687   // No overflow checking. Use only when it's known that
   1688   // overflowing is impossible.
   1689   if (!dst.is(src1)) {
   1690     if (emit_debug_code()) {
   1691       movq(kScratchRegister, src1);
   1692       addq(kScratchRegister, src2);
   1693       Check(no_overflow, kSmiAdditionOverflow);
   1694     }
   1695     lea(dst, Operand(src1, src2, times_1, 0));
   1696   } else {
   1697     addq(dst, src2);
   1698     Assert(no_overflow, kSmiAdditionOverflow);
   1699   }
   1700 }
   1701 
   1702 
   1703 void MacroAssembler::SmiSub(Register dst,
   1704                             Register src1,
   1705                             Register src2,
   1706                             Label* on_not_smi_result,
   1707                             Label::Distance near_jump) {
   1708   ASSERT_NOT_NULL(on_not_smi_result);
   1709   ASSERT(!dst.is(src2));
   1710   if (dst.is(src1)) {
   1711     cmpq(dst, src2);
   1712     j(overflow, on_not_smi_result, near_jump);
   1713     subq(dst, src2);
   1714   } else {
   1715     movq(dst, src1);
   1716     subq(dst, src2);
   1717     j(overflow, on_not_smi_result, near_jump);
   1718   }
   1719 }
   1720 
   1721 
   1722 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
   1723   // No overflow checking. Use only when it's known that
   1724   // overflowing is impossible (e.g., subtracting two positive smis).
   1725   ASSERT(!dst.is(src2));
   1726   if (!dst.is(src1)) {
   1727     movq(dst, src1);
   1728   }
   1729   subq(dst, src2);
   1730   Assert(no_overflow, kSmiSubtractionOverflow);
   1731 }
   1732 
   1733 
   1734 void MacroAssembler::SmiSub(Register dst,
   1735                             Register src1,
   1736                             const Operand& src2,
   1737                             Label* on_not_smi_result,
   1738                             Label::Distance near_jump) {
   1739   ASSERT_NOT_NULL(on_not_smi_result);
   1740   if (dst.is(src1)) {
   1741     movq(kScratchRegister, src2);
   1742     cmpq(src1, kScratchRegister);
   1743     j(overflow, on_not_smi_result, near_jump);
   1744     subq(src1, kScratchRegister);
   1745   } else {
   1746     movq(dst, src1);
   1747     subq(dst, src2);
   1748     j(overflow, on_not_smi_result, near_jump);
   1749   }
   1750 }
   1751 
   1752 
   1753 void MacroAssembler::SmiSub(Register dst,
   1754                             Register src1,
   1755                             const Operand& src2) {
   1756   // No overflow checking. Use only when it's known that
   1757   // overflowing is impossible (e.g., subtracting two positive smis).
   1758   if (!dst.is(src1)) {
   1759     movq(dst, src1);
   1760   }
   1761   subq(dst, src2);
   1762   Assert(no_overflow, kSmiSubtractionOverflow);
   1763 }
   1764 
   1765 
   1766 void MacroAssembler::SmiMul(Register dst,
   1767                             Register src1,
   1768                             Register src2,
   1769                             Label* on_not_smi_result,
   1770                             Label::Distance near_jump) {
   1771   ASSERT(!dst.is(src2));
   1772   ASSERT(!dst.is(kScratchRegister));
   1773   ASSERT(!src1.is(kScratchRegister));
   1774   ASSERT(!src2.is(kScratchRegister));
   1775 
   1776   if (dst.is(src1)) {
   1777     Label failure, zero_correct_result;
   1778     movq(kScratchRegister, src1);  // Create backup for later testing.
   1779     SmiToInteger64(dst, src1);
   1780     imul(dst, src2);
   1781     j(overflow, &failure, Label::kNear);
   1782 
   1783     // Check for negative zero result.  If product is zero, and one
   1784     // argument is negative, go to slow case.
   1785     Label correct_result;
   1786     testq(dst, dst);
   1787     j(not_zero, &correct_result, Label::kNear);
   1788 
   1789     movq(dst, kScratchRegister);
   1790     xor_(dst, src2);
   1791     // Result was positive zero.
   1792     j(positive, &zero_correct_result, Label::kNear);
   1793 
   1794     bind(&failure);  // Reused failure exit, restores src1.
   1795     movq(src1, kScratchRegister);
   1796     jmp(on_not_smi_result, near_jump);
   1797 
   1798     bind(&zero_correct_result);
   1799     Set(dst, 0);
   1800 
   1801     bind(&correct_result);
   1802   } else {
   1803     SmiToInteger64(dst, src1);
   1804     imul(dst, src2);
   1805     j(overflow, on_not_smi_result, near_jump);
   1806     // Check for negative zero result.  If product is zero, and one
   1807     // argument is negative, go to slow case.
   1808     Label correct_result;
   1809     testq(dst, dst);
   1810     j(not_zero, &correct_result, Label::kNear);
   1811     // One of src1 and src2 is zero, the check whether the other is
   1812     // negative.
   1813     movq(kScratchRegister, src1);
   1814     xor_(kScratchRegister, src2);
   1815     j(negative, on_not_smi_result, near_jump);
   1816     bind(&correct_result);
   1817   }
   1818 }
   1819 
   1820 
   1821 void MacroAssembler::SmiDiv(Register dst,
   1822                             Register src1,
   1823                             Register src2,
   1824                             Label* on_not_smi_result,
   1825                             Label::Distance near_jump) {
   1826   ASSERT(!src1.is(kScratchRegister));
   1827   ASSERT(!src2.is(kScratchRegister));
   1828   ASSERT(!dst.is(kScratchRegister));
   1829   ASSERT(!src2.is(rax));
   1830   ASSERT(!src2.is(rdx));
   1831   ASSERT(!src1.is(rdx));
   1832 
   1833   // Check for 0 divisor (result is +/-Infinity).
   1834   testq(src2, src2);
   1835   j(zero, on_not_smi_result, near_jump);
   1836 
   1837   if (src1.is(rax)) {
   1838     movq(kScratchRegister, src1);
   1839   }
   1840   SmiToInteger32(rax, src1);
   1841   // We need to rule out dividing Smi::kMinValue by -1, since that would
   1842   // overflow in idiv and raise an exception.
   1843   // We combine this with negative zero test (negative zero only happens
   1844   // when dividing zero by a negative number).
   1845 
   1846   // We overshoot a little and go to slow case if we divide min-value
   1847   // by any negative value, not just -1.
   1848   Label safe_div;
   1849   testl(rax, Immediate(0x7fffffff));
   1850   j(not_zero, &safe_div, Label::kNear);
   1851   testq(src2, src2);
   1852   if (src1.is(rax)) {
   1853     j(positive, &safe_div, Label::kNear);
   1854     movq(src1, kScratchRegister);
   1855     jmp(on_not_smi_result, near_jump);
   1856   } else {
   1857     j(negative, on_not_smi_result, near_jump);
   1858   }
   1859   bind(&safe_div);
   1860 
   1861   SmiToInteger32(src2, src2);
   1862   // Sign extend src1 into edx:eax.
   1863   cdq();
   1864   idivl(src2);
   1865   Integer32ToSmi(src2, src2);
   1866   // Check that the remainder is zero.
   1867   testl(rdx, rdx);
   1868   if (src1.is(rax)) {
   1869     Label smi_result;
   1870     j(zero, &smi_result, Label::kNear);
   1871     movq(src1, kScratchRegister);
   1872     jmp(on_not_smi_result, near_jump);
   1873     bind(&smi_result);
   1874   } else {
   1875     j(not_zero, on_not_smi_result, near_jump);
   1876   }
   1877   if (!dst.is(src1) && src1.is(rax)) {
   1878     movq(src1, kScratchRegister);
   1879   }
   1880   Integer32ToSmi(dst, rax);
   1881 }
   1882 
   1883 
   1884 void MacroAssembler::SmiMod(Register dst,
   1885                             Register src1,
   1886                             Register src2,
   1887                             Label* on_not_smi_result,
   1888                             Label::Distance near_jump) {
   1889   ASSERT(!dst.is(kScratchRegister));
   1890   ASSERT(!src1.is(kScratchRegister));
   1891   ASSERT(!src2.is(kScratchRegister));
   1892   ASSERT(!src2.is(rax));
   1893   ASSERT(!src2.is(rdx));
   1894   ASSERT(!src1.is(rdx));
   1895   ASSERT(!src1.is(src2));
   1896 
   1897   testq(src2, src2);
   1898   j(zero, on_not_smi_result, near_jump);
   1899 
   1900   if (src1.is(rax)) {
   1901     movq(kScratchRegister, src1);
   1902   }
   1903   SmiToInteger32(rax, src1);
   1904   SmiToInteger32(src2, src2);
   1905 
   1906   // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
   1907   Label safe_div;
   1908   cmpl(rax, Immediate(Smi::kMinValue));
   1909   j(not_equal, &safe_div, Label::kNear);
   1910   cmpl(src2, Immediate(-1));
   1911   j(not_equal, &safe_div, Label::kNear);
   1912   // Retag inputs and go slow case.
   1913   Integer32ToSmi(src2, src2);
   1914   if (src1.is(rax)) {
   1915     movq(src1, kScratchRegister);
   1916   }
   1917   jmp(on_not_smi_result, near_jump);
   1918   bind(&safe_div);
   1919 
   1920   // Sign extend eax into edx:eax.
   1921   cdq();
   1922   idivl(src2);
   1923   // Restore smi tags on inputs.
   1924   Integer32ToSmi(src2, src2);
   1925   if (src1.is(rax)) {
   1926     movq(src1, kScratchRegister);
   1927   }
   1928   // Check for a negative zero result.  If the result is zero, and the
   1929   // dividend is negative, go slow to return a floating point negative zero.
   1930   Label smi_result;
   1931   testl(rdx, rdx);
   1932   j(not_zero, &smi_result, Label::kNear);
   1933   testq(src1, src1);
   1934   j(negative, on_not_smi_result, near_jump);
   1935   bind(&smi_result);
   1936   Integer32ToSmi(dst, rdx);
   1937 }
   1938 
   1939 
   1940 void MacroAssembler::SmiNot(Register dst, Register src) {
   1941   ASSERT(!dst.is(kScratchRegister));
   1942   ASSERT(!src.is(kScratchRegister));
   1943   // Set tag and padding bits before negating, so that they are zero afterwards.
   1944   movl(kScratchRegister, Immediate(~0));
   1945   if (dst.is(src)) {
   1946     xor_(dst, kScratchRegister);
   1947   } else {
   1948     lea(dst, Operand(src, kScratchRegister, times_1, 0));
   1949   }
   1950   not_(dst);
   1951 }
   1952 
   1953 
   1954 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
   1955   ASSERT(!dst.is(src2));
   1956   if (!dst.is(src1)) {
   1957     movq(dst, src1);
   1958   }
   1959   and_(dst, src2);
   1960 }
   1961 
   1962 
   1963 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
   1964   if (constant->value() == 0) {
   1965     Set(dst, 0);
   1966   } else if (dst.is(src)) {
   1967     ASSERT(!dst.is(kScratchRegister));
   1968     Register constant_reg = GetSmiConstant(constant);
   1969     and_(dst, constant_reg);
   1970   } else {
   1971     LoadSmiConstant(dst, constant);
   1972     and_(dst, src);
   1973   }
   1974 }
   1975 
   1976 
   1977 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   1978   if (!dst.is(src1)) {
   1979     ASSERT(!src1.is(src2));
   1980     movq(dst, src1);
   1981   }
   1982   or_(dst, src2);
   1983 }
   1984 
   1985 
   1986 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
   1987   if (dst.is(src)) {
   1988     ASSERT(!dst.is(kScratchRegister));
   1989     Register constant_reg = GetSmiConstant(constant);
   1990     or_(dst, constant_reg);
   1991   } else {
   1992     LoadSmiConstant(dst, constant);
   1993     or_(dst, src);
   1994   }
   1995 }
   1996 
   1997 
   1998 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   1999   if (!dst.is(src1)) {
   2000     ASSERT(!src1.is(src2));
   2001     movq(dst, src1);
   2002   }
   2003   xor_(dst, src2);
   2004 }
   2005 
   2006 
   2007 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
   2008   if (dst.is(src)) {
   2009     ASSERT(!dst.is(kScratchRegister));
   2010     Register constant_reg = GetSmiConstant(constant);
   2011     xor_(dst, constant_reg);
   2012   } else {
   2013     LoadSmiConstant(dst, constant);
   2014     xor_(dst, src);
   2015   }
   2016 }
   2017 
   2018 
   2019 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
   2020                                                      Register src,
   2021                                                      int shift_value) {
   2022   ASSERT(is_uint5(shift_value));
   2023   if (shift_value > 0) {
   2024     if (dst.is(src)) {
   2025       sar(dst, Immediate(shift_value + kSmiShift));
   2026       shl(dst, Immediate(kSmiShift));
   2027     } else {
   2028       UNIMPLEMENTED();  // Not used.
   2029     }
   2030   }
   2031 }
   2032 
   2033 
   2034 void MacroAssembler::SmiShiftLeftConstant(Register dst,
   2035                                           Register src,
   2036                                           int shift_value) {
   2037   if (!dst.is(src)) {
   2038     movq(dst, src);
   2039   }
   2040   if (shift_value > 0) {
   2041     shl(dst, Immediate(shift_value));
   2042   }
   2043 }
   2044 
   2045 
   2046 void MacroAssembler::SmiShiftLogicalRightConstant(
   2047     Register dst, Register src, int shift_value,
   2048     Label* on_not_smi_result, Label::Distance near_jump) {
   2049   // Logic right shift interprets its result as an *unsigned* number.
   2050   if (dst.is(src)) {
   2051     UNIMPLEMENTED();  // Not used.
   2052   } else {
   2053     movq(dst, src);
   2054     if (shift_value == 0) {
   2055       testq(dst, dst);
   2056       j(negative, on_not_smi_result, near_jump);
   2057     }
   2058     shr(dst, Immediate(shift_value + kSmiShift));
   2059     shl(dst, Immediate(kSmiShift));
   2060   }
   2061 }
   2062 
   2063 
   2064 void MacroAssembler::SmiShiftLeft(Register dst,
   2065                                   Register src1,
   2066                                   Register src2) {
   2067   ASSERT(!dst.is(rcx));
   2068   // Untag shift amount.
   2069   if (!dst.is(src1)) {
   2070     movq(dst, src1);
   2071   }
   2072   SmiToInteger32(rcx, src2);
   2073   // Shift amount specified by lower 5 bits, not six as the shl opcode.
   2074   and_(rcx, Immediate(0x1f));
   2075   shl_cl(dst);
   2076 }
   2077 
   2078 
   2079 void MacroAssembler::SmiShiftLogicalRight(Register dst,
   2080                                           Register src1,
   2081                                           Register src2,
   2082                                           Label* on_not_smi_result,
   2083                                           Label::Distance near_jump) {
   2084   ASSERT(!dst.is(kScratchRegister));
   2085   ASSERT(!src1.is(kScratchRegister));
   2086   ASSERT(!src2.is(kScratchRegister));
   2087   ASSERT(!dst.is(rcx));
   2088   // dst and src1 can be the same, because the one case that bails out
   2089   // is a shift by 0, which leaves dst, and therefore src1, unchanged.
   2090   if (src1.is(rcx) || src2.is(rcx)) {
   2091     movq(kScratchRegister, rcx);
   2092   }
   2093   if (!dst.is(src1)) {
   2094     movq(dst, src1);
   2095   }
   2096   SmiToInteger32(rcx, src2);
   2097   orl(rcx, Immediate(kSmiShift));
   2098   shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
   2099   shl(dst, Immediate(kSmiShift));
   2100   testq(dst, dst);
   2101   if (src1.is(rcx) || src2.is(rcx)) {
   2102     Label positive_result;
   2103     j(positive, &positive_result, Label::kNear);
   2104     if (src1.is(rcx)) {
   2105       movq(src1, kScratchRegister);
   2106     } else {
   2107       movq(src2, kScratchRegister);
   2108     }
   2109     jmp(on_not_smi_result, near_jump);
   2110     bind(&positive_result);
   2111   } else {
   2112     // src2 was zero and src1 negative.
   2113     j(negative, on_not_smi_result, near_jump);
   2114   }
   2115 }
   2116 
   2117 
   2118 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
   2119                                              Register src1,
   2120                                              Register src2) {
   2121   ASSERT(!dst.is(kScratchRegister));
   2122   ASSERT(!src1.is(kScratchRegister));
   2123   ASSERT(!src2.is(kScratchRegister));
   2124   ASSERT(!dst.is(rcx));
   2125   if (src1.is(rcx)) {
   2126     movq(kScratchRegister, src1);
   2127   } else if (src2.is(rcx)) {
   2128     movq(kScratchRegister, src2);
   2129   }
   2130   if (!dst.is(src1)) {
   2131     movq(dst, src1);
   2132   }
   2133   SmiToInteger32(rcx, src2);
   2134   orl(rcx, Immediate(kSmiShift));
   2135   sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
   2136   shl(dst, Immediate(kSmiShift));
   2137   if (src1.is(rcx)) {
   2138     movq(src1, kScratchRegister);
   2139   } else if (src2.is(rcx)) {
   2140     movq(src2, kScratchRegister);
   2141   }
   2142 }
   2143 
   2144 
   2145 void MacroAssembler::SelectNonSmi(Register dst,
   2146                                   Register src1,
   2147                                   Register src2,
   2148                                   Label* on_not_smis,
   2149                                   Label::Distance near_jump) {
   2150   ASSERT(!dst.is(kScratchRegister));
   2151   ASSERT(!src1.is(kScratchRegister));
   2152   ASSERT(!src2.is(kScratchRegister));
   2153   ASSERT(!dst.is(src1));
   2154   ASSERT(!dst.is(src2));
   2155   // Both operands must not be smis.
   2156 #ifdef DEBUG
   2157   if (allow_stub_calls()) {  // Check contains a stub call.
   2158     Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
   2159     Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
   2160   }
   2161 #endif
   2162   STATIC_ASSERT(kSmiTag == 0);
   2163   ASSERT_EQ(0, Smi::FromInt(0));
   2164   movl(kScratchRegister, Immediate(kSmiTagMask));
   2165   and_(kScratchRegister, src1);
   2166   testl(kScratchRegister, src2);
   2167   // If non-zero then both are smis.
   2168   j(not_zero, on_not_smis, near_jump);
   2169 
   2170   // Exactly one operand is a smi.
   2171   ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
   2172   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
   2173   subq(kScratchRegister, Immediate(1));
   2174   // If src1 is a smi, then scratch register all 1s, else it is all 0s.
   2175   movq(dst, src1);
   2176   xor_(dst, src2);
   2177   and_(dst, kScratchRegister);
   2178   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
   2179   xor_(dst, src1);
   2180   // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
   2181 }
   2182 
   2183 
   2184 SmiIndex MacroAssembler::SmiToIndex(Register dst,
   2185                                     Register src,
   2186                                     int shift) {
   2187   ASSERT(is_uint6(shift));
   2188   // There is a possible optimization if shift is in the range 60-63, but that
   2189   // will (and must) never happen.
   2190   if (!dst.is(src)) {
   2191     movq(dst, src);
   2192   }
   2193   if (shift < kSmiShift) {
   2194     sar(dst, Immediate(kSmiShift - shift));
   2195   } else {
   2196     shl(dst, Immediate(shift - kSmiShift));
   2197   }
   2198   return SmiIndex(dst, times_1);
   2199 }
   2200 
   2201 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
   2202                                             Register src,
   2203                                             int shift) {
   2204   // Register src holds a positive smi.
   2205   ASSERT(is_uint6(shift));
   2206   if (!dst.is(src)) {
   2207     movq(dst, src);
   2208   }
   2209   neg(dst);
   2210   if (shift < kSmiShift) {
   2211     sar(dst, Immediate(kSmiShift - shift));
   2212   } else {
   2213     shl(dst, Immediate(shift - kSmiShift));
   2214   }
   2215   return SmiIndex(dst, times_1);
   2216 }
   2217 
   2218 
   2219 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
   2220   ASSERT_EQ(0, kSmiShift % kBitsPerByte);
   2221   addl(dst, Operand(src, kSmiShift / kBitsPerByte));
   2222 }
   2223 
   2224 
   2225 void MacroAssembler::JumpIfNotString(Register object,
   2226                                      Register object_map,
   2227                                      Label* not_string,
   2228                                      Label::Distance near_jump) {
   2229   Condition is_smi = CheckSmi(object);
   2230   j(is_smi, not_string, near_jump);
   2231   CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
   2232   j(above_equal, not_string, near_jump);
   2233 }
   2234 
   2235 
   2236 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
   2237     Register first_object,
   2238     Register second_object,
   2239     Register scratch1,
   2240     Register scratch2,
   2241     Label* on_fail,
   2242     Label::Distance near_jump) {
   2243   // Check that both objects are not smis.
   2244   Condition either_smi = CheckEitherSmi(first_object, second_object);
   2245   j(either_smi, on_fail, near_jump);
   2246 
   2247   // Load instance type for both strings.
   2248   movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
   2249   movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
   2250   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   2251   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
   2252 
   2253   // Check that both are flat ASCII strings.
   2254   ASSERT(kNotStringTag != 0);
   2255   const int kFlatAsciiStringMask =
   2256       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2257   const int kFlatAsciiStringTag =
   2258       kStringTag | kOneByteStringTag | kSeqStringTag;
   2259 
   2260   andl(scratch1, Immediate(kFlatAsciiStringMask));
   2261   andl(scratch2, Immediate(kFlatAsciiStringMask));
   2262   // Interleave the bits to check both scratch1 and scratch2 in one test.
   2263   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
   2264   lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
   2265   cmpl(scratch1,
   2266        Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
   2267   j(not_equal, on_fail, near_jump);
   2268 }
   2269 
   2270 
   2271 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
   2272     Register instance_type,
   2273     Register scratch,
   2274     Label* failure,
   2275     Label::Distance near_jump) {
   2276   if (!scratch.is(instance_type)) {
   2277     movl(scratch, instance_type);
   2278   }
   2279 
   2280   const int kFlatAsciiStringMask =
   2281       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2282 
   2283   andl(scratch, Immediate(kFlatAsciiStringMask));
   2284   cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
   2285   j(not_equal, failure, near_jump);
   2286 }
   2287 
   2288 
   2289 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
   2290     Register first_object_instance_type,
   2291     Register second_object_instance_type,
   2292     Register scratch1,
   2293     Register scratch2,
   2294     Label* on_fail,
   2295     Label::Distance near_jump) {
   2296   // Load instance type for both strings.
   2297   movq(scratch1, first_object_instance_type);
   2298   movq(scratch2, second_object_instance_type);
   2299 
   2300   // Check that both are flat ASCII strings.
   2301   ASSERT(kNotStringTag != 0);
   2302   const int kFlatAsciiStringMask =
   2303       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2304   const int kFlatAsciiStringTag =
   2305       kStringTag | kOneByteStringTag | kSeqStringTag;
   2306 
   2307   andl(scratch1, Immediate(kFlatAsciiStringMask));
   2308   andl(scratch2, Immediate(kFlatAsciiStringMask));
   2309   // Interleave the bits to check both scratch1 and scratch2 in one test.
   2310   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
   2311   lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
   2312   cmpl(scratch1,
   2313        Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
   2314   j(not_equal, on_fail, near_jump);
   2315 }
   2316 
   2317 
   2318 template<class T>
   2319 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
   2320                                       T operand_or_register,
   2321                                       Label* not_unique_name,
   2322                                       Label::Distance distance) {
   2323   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   2324   Label succeed;
   2325   masm->testb(operand_or_register,
   2326               Immediate(kIsNotStringMask | kIsNotInternalizedMask));
   2327   masm->j(zero, &succeed, Label::kNear);
   2328   masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
   2329   masm->j(not_equal, not_unique_name, distance);
   2330 
   2331   masm->bind(&succeed);
   2332 }
   2333 
   2334 
   2335 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
   2336                                          Label* not_unique_name,
   2337                                          Label::Distance distance) {
   2338   JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
   2339 }
   2340 
   2341 
   2342 void MacroAssembler::JumpIfNotUniqueName(Register reg,
   2343                                          Label* not_unique_name,
   2344                                          Label::Distance distance) {
   2345   JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
   2346 }
   2347 
   2348 
   2349 void MacroAssembler::Move(Register dst, Register src) {
   2350   if (!dst.is(src)) {
   2351     movq(dst, src);
   2352   }
   2353 }
   2354 
   2355 
   2356 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   2357   AllowDeferredHandleDereference smi_check;
   2358   if (source->IsSmi()) {
   2359     Move(dst, Smi::cast(*source));
   2360   } else {
   2361     ASSERT(source->IsHeapObject());
   2362     movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
   2363   }
   2364 }
   2365 
   2366 
   2367 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
   2368   AllowDeferredHandleDereference smi_check;
   2369   if (source->IsSmi()) {
   2370     Move(dst, Smi::cast(*source));
   2371   } else {
   2372     ASSERT(source->IsHeapObject());
   2373     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   2374     movq(dst, kScratchRegister);
   2375   }
   2376 }
   2377 
   2378 
   2379 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
   2380   AllowDeferredHandleDereference smi_check;
   2381   if (source->IsSmi()) {
   2382     Cmp(dst, Smi::cast(*source));
   2383   } else {
   2384     ASSERT(source->IsHeapObject());
   2385     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   2386     cmpq(dst, kScratchRegister);
   2387   }
   2388 }
   2389 
   2390 
   2391 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
   2392   AllowDeferredHandleDereference smi_check;
   2393   if (source->IsSmi()) {
   2394     Cmp(dst, Smi::cast(*source));
   2395   } else {
   2396     ASSERT(source->IsHeapObject());
   2397     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   2398     cmpq(dst, kScratchRegister);
   2399   }
   2400 }
   2401 
   2402 
   2403 void MacroAssembler::Push(Handle<Object> source) {
   2404   AllowDeferredHandleDereference smi_check;
   2405   if (source->IsSmi()) {
   2406     Push(Smi::cast(*source));
   2407   } else {
   2408     ASSERT(source->IsHeapObject());
   2409     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   2410     push(kScratchRegister);
   2411   }
   2412 }
   2413 
   2414 
   2415 void MacroAssembler::LoadHeapObject(Register result,
   2416                                     Handle<HeapObject> object) {
   2417   AllowDeferredHandleDereference using_raw_address;
   2418   if (isolate()->heap()->InNewSpace(*object)) {
   2419     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   2420     movq(result, cell, RelocInfo::CELL);
   2421     movq(result, Operand(result, 0));
   2422   } else {
   2423     Move(result, object);
   2424   }
   2425 }
   2426 
   2427 
   2428 void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
   2429   AllowDeferredHandleDereference using_raw_address;
   2430   if (isolate()->heap()->InNewSpace(*object)) {
   2431     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   2432     movq(kScratchRegister, cell, RelocInfo::CELL);
   2433     cmpq(reg, Operand(kScratchRegister, 0));
   2434   } else {
   2435     Cmp(reg, object);
   2436   }
   2437 }
   2438 
   2439 
   2440 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
   2441   AllowDeferredHandleDereference using_raw_address;
   2442   if (isolate()->heap()->InNewSpace(*object)) {
   2443     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   2444     movq(kScratchRegister, cell, RelocInfo::CELL);
   2445     movq(kScratchRegister, Operand(kScratchRegister, 0));
   2446     push(kScratchRegister);
   2447   } else {
   2448     Push(object);
   2449   }
   2450 }
   2451 
   2452 
   2453 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
   2454   if (dst.is(rax)) {
   2455     AllowDeferredHandleDereference embedding_raw_address;
   2456     load_rax(cell.location(), RelocInfo::CELL);
   2457   } else {
   2458     movq(dst, cell, RelocInfo::CELL);
   2459     movq(dst, Operand(dst, 0));
   2460   }
   2461 }
   2462 
   2463 
   2464 void MacroAssembler::Push(Smi* source) {
   2465   intptr_t smi = reinterpret_cast<intptr_t>(source);
   2466   if (is_int32(smi)) {
   2467     push(Immediate(static_cast<int32_t>(smi)));
   2468   } else {
   2469     Register constant = GetSmiConstant(source);
   2470     push(constant);
   2471   }
   2472 }
   2473 
   2474 
   2475 void MacroAssembler::Drop(int stack_elements) {
   2476   if (stack_elements > 0) {
   2477     addq(rsp, Immediate(stack_elements * kPointerSize));
   2478   }
   2479 }
   2480 
   2481 
   2482 void MacroAssembler::Test(const Operand& src, Smi* source) {
   2483   testl(Operand(src, kIntSize), Immediate(source->value()));
   2484 }
   2485 
   2486 
   2487 void MacroAssembler::TestBit(const Operand& src, int bits) {
   2488   int byte_offset = bits / kBitsPerByte;
   2489   int bit_in_byte = bits & (kBitsPerByte - 1);
   2490   testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
   2491 }
   2492 
   2493 
   2494 void MacroAssembler::Jump(ExternalReference ext) {
   2495   LoadAddress(kScratchRegister, ext);
   2496   jmp(kScratchRegister);
   2497 }
   2498 
   2499 
   2500 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
   2501   movq(kScratchRegister, destination, rmode);
   2502   jmp(kScratchRegister);
   2503 }
   2504 
   2505 
   2506 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
   2507   // TODO(X64): Inline this
   2508   jmp(code_object, rmode);
   2509 }
   2510 
   2511 
   2512 int MacroAssembler::CallSize(ExternalReference ext) {
   2513   // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
   2514   return LoadAddressSize(ext) +
   2515          Assembler::kCallScratchRegisterInstructionLength;
   2516 }
   2517 
   2518 
   2519 void MacroAssembler::Call(ExternalReference ext) {
   2520 #ifdef DEBUG
   2521   int end_position = pc_offset() + CallSize(ext);
   2522 #endif
   2523   LoadAddress(kScratchRegister, ext);
   2524   call(kScratchRegister);
   2525 #ifdef DEBUG
   2526   CHECK_EQ(end_position, pc_offset());
   2527 #endif
   2528 }
   2529 
   2530 
   2531 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
   2532 #ifdef DEBUG
   2533   int end_position = pc_offset() + CallSize(destination, rmode);
   2534 #endif
   2535   movq(kScratchRegister, destination, rmode);
   2536   call(kScratchRegister);
   2537 #ifdef DEBUG
   2538   CHECK_EQ(pc_offset(), end_position);
   2539 #endif
   2540 }
   2541 
   2542 
   2543 void MacroAssembler::Call(Handle<Code> code_object,
   2544                           RelocInfo::Mode rmode,
   2545                           TypeFeedbackId ast_id) {
   2546 #ifdef DEBUG
   2547   int end_position = pc_offset() + CallSize(code_object);
   2548 #endif
   2549   ASSERT(RelocInfo::IsCodeTarget(rmode));
   2550   call(code_object, rmode, ast_id);
   2551 #ifdef DEBUG
   2552   CHECK_EQ(end_position, pc_offset());
   2553 #endif
   2554 }
   2555 
   2556 
   2557 void MacroAssembler::Pushad() {
   2558   push(rax);
   2559   push(rcx);
   2560   push(rdx);
   2561   push(rbx);
   2562   // Not pushing rsp or rbp.
   2563   push(rsi);
   2564   push(rdi);
   2565   push(r8);
   2566   push(r9);
   2567   // r10 is kScratchRegister.
   2568   push(r11);
   2569   // r12 is kSmiConstantRegister.
   2570   // r13 is kRootRegister.
   2571   push(r14);
   2572   push(r15);
   2573   STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
   2574   // Use lea for symmetry with Popad.
   2575   int sp_delta =
   2576       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
   2577   lea(rsp, Operand(rsp, -sp_delta));
   2578 }
   2579 
   2580 
   2581 void MacroAssembler::Popad() {
   2582   // Popad must not change the flags, so use lea instead of addq.
   2583   int sp_delta =
   2584       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
   2585   lea(rsp, Operand(rsp, sp_delta));
   2586   pop(r15);
   2587   pop(r14);
   2588   pop(r11);
   2589   pop(r9);
   2590   pop(r8);
   2591   pop(rdi);
   2592   pop(rsi);
   2593   pop(rbx);
   2594   pop(rdx);
   2595   pop(rcx);
   2596   pop(rax);
   2597 }
   2598 
   2599 
   2600 void MacroAssembler::Dropad() {
   2601   addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
   2602 }
   2603 
   2604 
   2605 // Order general registers are pushed by Pushad:
   2606 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
   2607 const int
   2608 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
   2609     0,
   2610     1,
   2611     2,
   2612     3,
   2613     -1,
   2614     -1,
   2615     4,
   2616     5,
   2617     6,
   2618     7,
   2619     -1,
   2620     8,
   2621     -1,
   2622     -1,
   2623     9,
   2624     10
   2625 };
   2626 
   2627 
   2628 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
   2629                                                   const Immediate& imm) {
   2630   movq(SafepointRegisterSlot(dst), imm);
   2631 }
   2632 
   2633 
   2634 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
   2635   movq(SafepointRegisterSlot(dst), src);
   2636 }
   2637 
   2638 
   2639 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
   2640   movq(dst, SafepointRegisterSlot(src));
   2641 }
   2642 
   2643 
   2644 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
   2645   return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
   2646 }
   2647 
   2648 
   2649 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
   2650                                     int handler_index) {
   2651   // Adjust this code if not the case.
   2652   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   2653   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   2654   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   2655   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   2656   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   2657   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   2658 
   2659   // We will build up the handler from the bottom by pushing on the stack.
   2660   // First push the frame pointer and context.
   2661   if (kind == StackHandler::JS_ENTRY) {
   2662     // The frame pointer does not point to a JS frame so we save NULL for
   2663     // rbp. We expect the code throwing an exception to check rbp before
   2664     // dereferencing it to restore the context.
   2665     push(Immediate(0));  // NULL frame pointer.
   2666     Push(Smi::FromInt(0));  // No context.
   2667   } else {
   2668     push(rbp);
   2669     push(rsi);
   2670   }
   2671 
   2672   // Push the state and the code object.
   2673   unsigned state =
   2674       StackHandler::IndexField::encode(handler_index) |
   2675       StackHandler::KindField::encode(kind);
   2676   push(Immediate(state));
   2677   Push(CodeObject());
   2678 
   2679   // Link the current handler as the next handler.
   2680   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   2681   push(ExternalOperand(handler_address));
   2682   // Set this new handler as the current one.
   2683   movq(ExternalOperand(handler_address), rsp);
   2684 }
   2685 
   2686 
   2687 void MacroAssembler::PopTryHandler() {
   2688   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   2689   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   2690   pop(ExternalOperand(handler_address));
   2691   addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
   2692 }
   2693 
   2694 
   2695 void MacroAssembler::JumpToHandlerEntry() {
   2696   // Compute the handler entry address and jump to it.  The handler table is
   2697   // a fixed array of (smi-tagged) code offsets.
   2698   // rax = exception, rdi = code object, rdx = state.
   2699   movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
   2700   shr(rdx, Immediate(StackHandler::kKindWidth));
   2701   movq(rdx,
   2702        FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
   2703   SmiToInteger64(rdx, rdx);
   2704   lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
   2705   jmp(rdi);
   2706 }
   2707 
   2708 
   2709 void MacroAssembler::Throw(Register value) {
   2710   // Adjust this code if not the case.
   2711   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   2712   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   2713   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   2714   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   2715   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   2716   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   2717 
   2718   // The exception is expected in rax.
   2719   if (!value.is(rax)) {
   2720     movq(rax, value);
   2721   }
   2722   // Drop the stack pointer to the top of the top handler.
   2723   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   2724   movq(rsp, ExternalOperand(handler_address));
   2725   // Restore the next handler.
   2726   pop(ExternalOperand(handler_address));
   2727 
   2728   // Remove the code object and state, compute the handler address in rdi.
   2729   pop(rdi);  // Code object.
   2730   pop(rdx);  // Offset and state.
   2731 
   2732   // Restore the context and frame pointer.
   2733   pop(rsi);  // Context.
   2734   pop(rbp);  // Frame pointer.
   2735 
   2736   // If the handler is a JS frame, restore the context to the frame.
   2737   // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
   2738   // rbp or rsi.
   2739   Label skip;
   2740   testq(rsi, rsi);
   2741   j(zero, &skip, Label::kNear);
   2742   movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
   2743   bind(&skip);
   2744 
   2745   JumpToHandlerEntry();
   2746 }
   2747 
   2748 
   2749 void MacroAssembler::ThrowUncatchable(Register value) {
   2750   // Adjust this code if not the case.
   2751   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   2752   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   2753   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   2754   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   2755   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   2756   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   2757 
   2758   // The exception is expected in rax.
   2759   if (!value.is(rax)) {
   2760     movq(rax, value);
   2761   }
   2762   // Drop the stack pointer to the top of the top stack handler.
   2763   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   2764   Load(rsp, handler_address);
   2765 
   2766   // Unwind the handlers until the top ENTRY handler is found.
   2767   Label fetch_next, check_kind;
   2768   jmp(&check_kind, Label::kNear);
   2769   bind(&fetch_next);
   2770   movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
   2771 
   2772   bind(&check_kind);
   2773   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
   2774   testl(Operand(rsp, StackHandlerConstants::kStateOffset),
   2775         Immediate(StackHandler::KindField::kMask));
   2776   j(not_zero, &fetch_next);
   2777 
   2778   // Set the top handler address to next handler past the top ENTRY handler.
   2779   pop(ExternalOperand(handler_address));
   2780 
   2781   // Remove the code object and state, compute the handler address in rdi.
   2782   pop(rdi);  // Code object.
   2783   pop(rdx);  // Offset and state.
   2784 
   2785   // Clear the context pointer and frame pointer (0 was saved in the handler).
   2786   pop(rsi);
   2787   pop(rbp);
   2788 
   2789   JumpToHandlerEntry();
   2790 }
   2791 
   2792 
   2793 void MacroAssembler::Ret() {
   2794   ret(0);
   2795 }
   2796 
   2797 
   2798 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
   2799   if (is_uint16(bytes_dropped)) {
   2800     ret(bytes_dropped);
   2801   } else {
   2802     PopReturnAddressTo(scratch);
   2803     addq(rsp, Immediate(bytes_dropped));
   2804     PushReturnAddressFrom(scratch);
   2805     ret(0);
   2806   }
   2807 }
   2808 
   2809 
   2810 void MacroAssembler::FCmp() {
   2811   fucomip();
   2812   fstp(0);
   2813 }
   2814 
   2815 
   2816 void MacroAssembler::CmpObjectType(Register heap_object,
   2817                                    InstanceType type,
   2818                                    Register map) {
   2819   movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   2820   CmpInstanceType(map, type);
   2821 }
   2822 
   2823 
   2824 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
   2825   cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
   2826        Immediate(static_cast<int8_t>(type)));
   2827 }
   2828 
   2829 
   2830 void MacroAssembler::CheckFastElements(Register map,
   2831                                        Label* fail,
   2832                                        Label::Distance distance) {
   2833   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2834   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2835   STATIC_ASSERT(FAST_ELEMENTS == 2);
   2836   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   2837   cmpb(FieldOperand(map, Map::kBitField2Offset),
   2838        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
   2839   j(above, fail, distance);
   2840 }
   2841 
   2842 
   2843 void MacroAssembler::CheckFastObjectElements(Register map,
   2844                                              Label* fail,
   2845                                              Label::Distance distance) {
   2846   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2847   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2848   STATIC_ASSERT(FAST_ELEMENTS == 2);
   2849   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   2850   cmpb(FieldOperand(map, Map::kBitField2Offset),
   2851        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
   2852   j(below_equal, fail, distance);
   2853   cmpb(FieldOperand(map, Map::kBitField2Offset),
   2854        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
   2855   j(above, fail, distance);
   2856 }
   2857 
   2858 
   2859 void MacroAssembler::CheckFastSmiElements(Register map,
   2860                                           Label* fail,
   2861                                           Label::Distance distance) {
   2862   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2863   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2864   cmpb(FieldOperand(map, Map::kBitField2Offset),
   2865        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
   2866   j(above, fail, distance);
   2867 }
   2868 
   2869 
   2870 void MacroAssembler::StoreNumberToDoubleElements(
   2871     Register maybe_number,
   2872     Register elements,
   2873     Register index,
   2874     XMMRegister xmm_scratch,
   2875     Label* fail,
   2876     int elements_offset) {
   2877   Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
   2878 
   2879   JumpIfSmi(maybe_number, &smi_value, Label::kNear);
   2880 
   2881   CheckMap(maybe_number,
   2882            isolate()->factory()->heap_number_map(),
   2883            fail,
   2884            DONT_DO_SMI_CHECK);
   2885 
   2886   // Double value, canonicalize NaN.
   2887   uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
   2888   cmpl(FieldOperand(maybe_number, offset),
   2889        Immediate(kNaNOrInfinityLowerBoundUpper32));
   2890   j(greater_equal, &maybe_nan, Label::kNear);
   2891 
   2892   bind(&not_nan);
   2893   movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
   2894   bind(&have_double_value);
   2895   movsd(FieldOperand(elements, index, times_8,
   2896                      FixedDoubleArray::kHeaderSize - elements_offset),
   2897         xmm_scratch);
   2898   jmp(&done);
   2899 
   2900   bind(&maybe_nan);
   2901   // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
   2902   // it's an Infinity, and the non-NaN code path applies.
   2903   j(greater, &is_nan, Label::kNear);
   2904   cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
   2905   j(zero, &not_nan);
   2906   bind(&is_nan);
   2907   // Convert all NaNs to the same canonical NaN value when they are stored in
   2908   // the double array.
   2909   Set(kScratchRegister, BitCast<uint64_t>(
   2910       FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
   2911   movq(xmm_scratch, kScratchRegister);
   2912   jmp(&have_double_value, Label::kNear);
   2913 
   2914   bind(&smi_value);
   2915   // Value is a smi. convert to a double and store.
   2916   // Preserve original value.
   2917   SmiToInteger32(kScratchRegister, maybe_number);
   2918   cvtlsi2sd(xmm_scratch, kScratchRegister);
   2919   movsd(FieldOperand(elements, index, times_8,
   2920                      FixedDoubleArray::kHeaderSize - elements_offset),
   2921         xmm_scratch);
   2922   bind(&done);
   2923 }
   2924 
   2925 
   2926 void MacroAssembler::CompareMap(Register obj,
   2927                                 Handle<Map> map,
   2928                                 Label* early_success) {
   2929   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
   2930 }
   2931 
   2932 
   2933 void MacroAssembler::CheckMap(Register obj,
   2934                               Handle<Map> map,
   2935                               Label* fail,
   2936                               SmiCheckType smi_check_type) {
   2937   if (smi_check_type == DO_SMI_CHECK) {
   2938     JumpIfSmi(obj, fail);
   2939   }
   2940 
   2941   Label success;
   2942   CompareMap(obj, map, &success);
   2943   j(not_equal, fail);
   2944   bind(&success);
   2945 }
   2946 
   2947 
   2948 void MacroAssembler::ClampUint8(Register reg) {
   2949   Label done;
   2950   testl(reg, Immediate(0xFFFFFF00));
   2951   j(zero, &done, Label::kNear);
   2952   setcc(negative, reg);  // 1 if negative, 0 if positive.
   2953   decb(reg);  // 0 if negative, 255 if positive.
   2954   bind(&done);
   2955 }
   2956 
   2957 
   2958 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
   2959                                         XMMRegister temp_xmm_reg,
   2960                                         Register result_reg) {
   2961   Label done;
   2962   Label conv_failure;
   2963   xorps(temp_xmm_reg, temp_xmm_reg);
   2964   cvtsd2si(result_reg, input_reg);
   2965   testl(result_reg, Immediate(0xFFFFFF00));
   2966   j(zero, &done, Label::kNear);
   2967   cmpl(result_reg, Immediate(0x80000000));
   2968   j(equal, &conv_failure, Label::kNear);
   2969   movl(result_reg, Immediate(0));
   2970   setcc(above, result_reg);
   2971   subl(result_reg, Immediate(1));
   2972   andl(result_reg, Immediate(255));
   2973   jmp(&done, Label::kNear);
   2974   bind(&conv_failure);
   2975   Set(result_reg, 0);
   2976   ucomisd(input_reg, temp_xmm_reg);
   2977   j(below, &done, Label::kNear);
   2978   Set(result_reg, 255);
   2979   bind(&done);
   2980 }
   2981 
   2982 
   2983 void MacroAssembler::LoadUint32(XMMRegister dst,
   2984                                 Register src,
   2985                                 XMMRegister scratch) {
   2986   if (FLAG_debug_code) {
   2987     cmpq(src, Immediate(0xffffffff));
   2988     Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
   2989   }
   2990   cvtqsi2sd(dst, src);
   2991 }
   2992 
   2993 
   2994 void MacroAssembler::LoadInstanceDescriptors(Register map,
   2995                                              Register descriptors) {
   2996   movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
   2997 }
   2998 
   2999 
   3000 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
   3001   movq(dst, FieldOperand(map, Map::kBitField3Offset));
   3002   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
   3003 }
   3004 
   3005 
   3006 void MacroAssembler::EnumLength(Register dst, Register map) {
   3007   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
   3008   movq(dst, FieldOperand(map, Map::kBitField3Offset));
   3009   Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
   3010   and_(dst, kScratchRegister);
   3011 }
   3012 
   3013 
   3014 void MacroAssembler::DispatchMap(Register obj,
   3015                                  Register unused,
   3016                                  Handle<Map> map,
   3017                                  Handle<Code> success,
   3018                                  SmiCheckType smi_check_type) {
   3019   Label fail;
   3020   if (smi_check_type == DO_SMI_CHECK) {
   3021     JumpIfSmi(obj, &fail);
   3022   }
   3023   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
   3024   j(equal, success, RelocInfo::CODE_TARGET);
   3025 
   3026   bind(&fail);
   3027 }
   3028 
   3029 
   3030 void MacroAssembler::AssertNumber(Register object) {
   3031   if (emit_debug_code()) {
   3032     Label ok;
   3033     Condition is_smi = CheckSmi(object);
   3034     j(is_smi, &ok, Label::kNear);
   3035     Cmp(FieldOperand(object, HeapObject::kMapOffset),
   3036         isolate()->factory()->heap_number_map());
   3037     Check(equal, kOperandIsNotANumber);
   3038     bind(&ok);
   3039   }
   3040 }
   3041 
   3042 
   3043 void MacroAssembler::AssertNotSmi(Register object) {
   3044   if (emit_debug_code()) {
   3045     Condition is_smi = CheckSmi(object);
   3046     Check(NegateCondition(is_smi), kOperandIsASmi);
   3047   }
   3048 }
   3049 
   3050 
   3051 void MacroAssembler::AssertSmi(Register object) {
   3052   if (emit_debug_code()) {
   3053     Condition is_smi = CheckSmi(object);
   3054     Check(is_smi, kOperandIsNotASmi);
   3055   }
   3056 }
   3057 
   3058 
   3059 void MacroAssembler::AssertSmi(const Operand& object) {
   3060   if (emit_debug_code()) {
   3061     Condition is_smi = CheckSmi(object);
   3062     Check(is_smi, kOperandIsNotASmi);
   3063   }
   3064 }
   3065 
   3066 
   3067 void MacroAssembler::AssertZeroExtended(Register int32_register) {
   3068   if (emit_debug_code()) {
   3069     ASSERT(!int32_register.is(kScratchRegister));
   3070     movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
   3071     cmpq(kScratchRegister, int32_register);
   3072     Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
   3073   }
   3074 }
   3075 
   3076 
   3077 void MacroAssembler::AssertString(Register object) {
   3078   if (emit_debug_code()) {
   3079     testb(object, Immediate(kSmiTagMask));
   3080     Check(not_equal, kOperandIsASmiAndNotAString);
   3081     push(object);
   3082     movq(object, FieldOperand(object, HeapObject::kMapOffset));
   3083     CmpInstanceType(object, FIRST_NONSTRING_TYPE);
   3084     pop(object);
   3085     Check(below, kOperandIsNotAString);
   3086   }
   3087 }
   3088 
   3089 
   3090 void MacroAssembler::AssertName(Register object) {
   3091   if (emit_debug_code()) {
   3092     testb(object, Immediate(kSmiTagMask));
   3093     Check(not_equal, kOperandIsASmiAndNotAName);
   3094     push(object);
   3095     movq(object, FieldOperand(object, HeapObject::kMapOffset));
   3096     CmpInstanceType(object, LAST_NAME_TYPE);
   3097     pop(object);
   3098     Check(below_equal, kOperandIsNotAName);
   3099   }
   3100 }
   3101 
   3102 
   3103 void MacroAssembler::AssertRootValue(Register src,
   3104                                      Heap::RootListIndex root_value_index,
   3105                                      BailoutReason reason) {
   3106   if (emit_debug_code()) {
   3107     ASSERT(!src.is(kScratchRegister));
   3108     LoadRoot(kScratchRegister, root_value_index);
   3109     cmpq(src, kScratchRegister);
   3110     Check(equal, reason);
   3111   }
   3112 }
   3113 
   3114 
   3115 
   3116 Condition MacroAssembler::IsObjectStringType(Register heap_object,
   3117                                              Register map,
   3118                                              Register instance_type) {
   3119   movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   3120   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
   3121   STATIC_ASSERT(kNotStringTag != 0);
   3122   testb(instance_type, Immediate(kIsNotStringMask));
   3123   return zero;
   3124 }
   3125 
   3126 
   3127 Condition MacroAssembler::IsObjectNameType(Register heap_object,
   3128                                            Register map,
   3129                                            Register instance_type) {
   3130   movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   3131   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
   3132   cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
   3133   return below_equal;
   3134 }
   3135 
   3136 
   3137 void MacroAssembler::TryGetFunctionPrototype(Register function,
   3138                                              Register result,
   3139                                              Label* miss,
   3140                                              bool miss_on_bound_function) {
   3141   // Check that the receiver isn't a smi.
   3142   testl(function, Immediate(kSmiTagMask));
   3143   j(zero, miss);
   3144 
   3145   // Check that the function really is a function.
   3146   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   3147   j(not_equal, miss);
   3148 
   3149   if (miss_on_bound_function) {
   3150     movq(kScratchRegister,
   3151          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3152     // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
   3153     // field).
   3154     TestBit(FieldOperand(kScratchRegister,
   3155                          SharedFunctionInfo::kCompilerHintsOffset),
   3156             SharedFunctionInfo::kBoundFunction);
   3157     j(not_zero, miss);
   3158   }
   3159 
   3160   // Make sure that the function has an instance prototype.
   3161   Label non_instance;
   3162   testb(FieldOperand(result, Map::kBitFieldOffset),
   3163         Immediate(1 << Map::kHasNonInstancePrototype));
   3164   j(not_zero, &non_instance, Label::kNear);
   3165 
   3166   // Get the prototype or initial map from the function.
   3167   movq(result,
   3168        FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   3169 
   3170   // If the prototype or initial map is the hole, don't return it and
   3171   // simply miss the cache instead. This will allow us to allocate a
   3172   // prototype object on-demand in the runtime system.
   3173   CompareRoot(result, Heap::kTheHoleValueRootIndex);
   3174   j(equal, miss);
   3175 
   3176   // If the function does not have an initial map, we're done.
   3177   Label done;
   3178   CmpObjectType(result, MAP_TYPE, kScratchRegister);
   3179   j(not_equal, &done, Label::kNear);
   3180 
   3181   // Get the prototype from the initial map.
   3182   movq(result, FieldOperand(result, Map::kPrototypeOffset));
   3183   jmp(&done, Label::kNear);
   3184 
   3185   // Non-instance prototype: Fetch prototype from constructor field
   3186   // in initial map.
   3187   bind(&non_instance);
   3188   movq(result, FieldOperand(result, Map::kConstructorOffset));
   3189 
   3190   // All done.
   3191   bind(&done);
   3192 }
   3193 
   3194 
   3195 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   3196   if (FLAG_native_code_counters && counter->Enabled()) {
   3197     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   3198     movl(counter_operand, Immediate(value));
   3199   }
   3200 }
   3201 
   3202 
   3203 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
   3204   ASSERT(value > 0);
   3205   if (FLAG_native_code_counters && counter->Enabled()) {
   3206     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   3207     if (value == 1) {
   3208       incl(counter_operand);
   3209     } else {
   3210       addl(counter_operand, Immediate(value));
   3211     }
   3212   }
   3213 }
   3214 
   3215 
   3216 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
   3217   ASSERT(value > 0);
   3218   if (FLAG_native_code_counters && counter->Enabled()) {
   3219     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   3220     if (value == 1) {
   3221       decl(counter_operand);
   3222     } else {
   3223       subl(counter_operand, Immediate(value));
   3224     }
   3225   }
   3226 }
   3227 
   3228 
   3229 #ifdef ENABLE_DEBUGGER_SUPPORT
   3230 void MacroAssembler::DebugBreak() {
   3231   Set(rax, 0);  // No arguments.
   3232   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
   3233   CEntryStub ces(1);
   3234   ASSERT(AllowThisStubCall(&ces));
   3235   Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
   3236 }
   3237 #endif  // ENABLE_DEBUGGER_SUPPORT
   3238 
   3239 
   3240 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
   3241   // This macro takes the dst register to make the code more readable
   3242   // at the call sites. However, the dst register has to be rcx to
   3243   // follow the calling convention which requires the call type to be
   3244   // in rcx.
   3245   ASSERT(dst.is(rcx));
   3246   if (call_kind == CALL_AS_FUNCTION) {
   3247     LoadSmiConstant(dst, Smi::FromInt(1));
   3248   } else {
   3249     LoadSmiConstant(dst, Smi::FromInt(0));
   3250   }
   3251 }
   3252 
   3253 
   3254 void MacroAssembler::InvokeCode(Register code,
   3255                                 const ParameterCount& expected,
   3256                                 const ParameterCount& actual,
   3257                                 InvokeFlag flag,
   3258                                 const CallWrapper& call_wrapper,
   3259                                 CallKind call_kind) {
   3260   // You can't call a function without a valid frame.
   3261   ASSERT(flag == JUMP_FUNCTION || has_frame());
   3262 
   3263   Label done;
   3264   bool definitely_mismatches = false;
   3265   InvokePrologue(expected,
   3266                  actual,
   3267                  Handle<Code>::null(),
   3268                  code,
   3269                  &done,
   3270                  &definitely_mismatches,
   3271                  flag,
   3272                  Label::kNear,
   3273                  call_wrapper,
   3274                  call_kind);
   3275   if (!definitely_mismatches) {
   3276     if (flag == CALL_FUNCTION) {
   3277       call_wrapper.BeforeCall(CallSize(code));
   3278       SetCallKind(rcx, call_kind);
   3279       call(code);
   3280       call_wrapper.AfterCall();
   3281     } else {
   3282       ASSERT(flag == JUMP_FUNCTION);
   3283       SetCallKind(rcx, call_kind);
   3284       jmp(code);
   3285     }
   3286     bind(&done);
   3287   }
   3288 }
   3289 
   3290 
   3291 void MacroAssembler::InvokeCode(Handle<Code> code,
   3292                                 const ParameterCount& expected,
   3293                                 const ParameterCount& actual,
   3294                                 RelocInfo::Mode rmode,
   3295                                 InvokeFlag flag,
   3296                                 const CallWrapper& call_wrapper,
   3297                                 CallKind call_kind) {
   3298   // You can't call a function without a valid frame.
   3299   ASSERT(flag == JUMP_FUNCTION || has_frame());
   3300 
   3301   Label done;
   3302   bool definitely_mismatches = false;
   3303   Register dummy = rax;
   3304   InvokePrologue(expected,
   3305                  actual,
   3306                  code,
   3307                  dummy,
   3308                  &done,
   3309                  &definitely_mismatches,
   3310                  flag,
   3311                  Label::kNear,
   3312                  call_wrapper,
   3313                  call_kind);
   3314   if (!definitely_mismatches) {
   3315     if (flag == CALL_FUNCTION) {
   3316       call_wrapper.BeforeCall(CallSize(code));
   3317       SetCallKind(rcx, call_kind);
   3318       Call(code, rmode);
   3319       call_wrapper.AfterCall();
   3320     } else {
   3321       ASSERT(flag == JUMP_FUNCTION);
   3322       SetCallKind(rcx, call_kind);
   3323       Jump(code, rmode);
   3324     }
   3325     bind(&done);
   3326   }
   3327 }
   3328 
   3329 
   3330 void MacroAssembler::InvokeFunction(Register function,
   3331                                     const ParameterCount& actual,
   3332                                     InvokeFlag flag,
   3333                                     const CallWrapper& call_wrapper,
   3334                                     CallKind call_kind) {
   3335   // You can't call a function without a valid frame.
   3336   ASSERT(flag == JUMP_FUNCTION || has_frame());
   3337 
   3338   ASSERT(function.is(rdi));
   3339   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3340   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
   3341   movsxlq(rbx,
   3342           FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
   3343   // Advances rdx to the end of the Code object header, to the start of
   3344   // the executable code.
   3345   movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
   3346 
   3347   ParameterCount expected(rbx);
   3348   InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
   3349 }
   3350 
   3351 
   3352 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
   3353                                     const ParameterCount& expected,
   3354                                     const ParameterCount& actual,
   3355                                     InvokeFlag flag,
   3356                                     const CallWrapper& call_wrapper,
   3357                                     CallKind call_kind) {
   3358   // You can't call a function without a valid frame.
   3359   ASSERT(flag == JUMP_FUNCTION || has_frame());
   3360 
   3361   // Get the function and setup the context.
   3362   LoadHeapObject(rdi, function);
   3363   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
   3364 
   3365   // We call indirectly through the code field in the function to
   3366   // allow recompilation to take effect without changing any of the
   3367   // call sites.
   3368   movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
   3369   InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
   3370 }
   3371 
   3372 
   3373 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
   3374                                     const ParameterCount& actual,
   3375                                     Handle<Code> code_constant,
   3376                                     Register code_register,
   3377                                     Label* done,
   3378                                     bool* definitely_mismatches,
   3379                                     InvokeFlag flag,
   3380                                     Label::Distance near_jump,
   3381                                     const CallWrapper& call_wrapper,
   3382                                     CallKind call_kind) {
   3383   bool definitely_matches = false;
   3384   *definitely_mismatches = false;
   3385   Label invoke;
   3386   if (expected.is_immediate()) {
   3387     ASSERT(actual.is_immediate());
   3388     if (expected.immediate() == actual.immediate()) {
   3389       definitely_matches = true;
   3390     } else {
   3391       Set(rax, actual.immediate());
   3392       if (expected.immediate() ==
   3393               SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
   3394         // Don't worry about adapting arguments for built-ins that
   3395         // don't want that done. Skip adaption code by making it look
   3396         // like we have a match between expected and actual number of
   3397         // arguments.
   3398         definitely_matches = true;
   3399       } else {
   3400         *definitely_mismatches = true;
   3401         Set(rbx, expected.immediate());
   3402       }
   3403     }
   3404   } else {
   3405     if (actual.is_immediate()) {
   3406       // Expected is in register, actual is immediate. This is the
   3407       // case when we invoke function values without going through the
   3408       // IC mechanism.
   3409       cmpq(expected.reg(), Immediate(actual.immediate()));
   3410       j(equal, &invoke, Label::kNear);
   3411       ASSERT(expected.reg().is(rbx));
   3412       Set(rax, actual.immediate());
   3413     } else if (!expected.reg().is(actual.reg())) {
   3414       // Both expected and actual are in (different) registers. This
   3415       // is the case when we invoke functions using call and apply.
   3416       cmpq(expected.reg(), actual.reg());
   3417       j(equal, &invoke, Label::kNear);
   3418       ASSERT(actual.reg().is(rax));
   3419       ASSERT(expected.reg().is(rbx));
   3420     }
   3421   }
   3422 
   3423   if (!definitely_matches) {
   3424     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
   3425     if (!code_constant.is_null()) {
   3426       movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
   3427       addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
   3428     } else if (!code_register.is(rdx)) {
   3429       movq(rdx, code_register);
   3430     }
   3431 
   3432     if (flag == CALL_FUNCTION) {
   3433       call_wrapper.BeforeCall(CallSize(adaptor));
   3434       SetCallKind(rcx, call_kind);
   3435       Call(adaptor, RelocInfo::CODE_TARGET);
   3436       call_wrapper.AfterCall();
   3437       if (!*definitely_mismatches) {
   3438         jmp(done, near_jump);
   3439       }
   3440     } else {
   3441       SetCallKind(rcx, call_kind);
   3442       Jump(adaptor, RelocInfo::CODE_TARGET);
   3443     }
   3444     bind(&invoke);
   3445   }
   3446 }
   3447 
   3448 
   3449 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   3450   push(rbp);
   3451   movq(rbp, rsp);
   3452   push(rsi);  // Context.
   3453   Push(Smi::FromInt(type));
   3454   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   3455   push(kScratchRegister);
   3456   if (emit_debug_code()) {
   3457     movq(kScratchRegister,
   3458          isolate()->factory()->undefined_value(),
   3459          RelocInfo::EMBEDDED_OBJECT);
   3460     cmpq(Operand(rsp, 0), kScratchRegister);
   3461     Check(not_equal, kCodeObjectNotProperlyPatched);
   3462   }
   3463 }
   3464 
   3465 
   3466 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   3467   if (emit_debug_code()) {
   3468     Move(kScratchRegister, Smi::FromInt(type));
   3469     cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
   3470     Check(equal, kStackFrameTypesMustMatch);
   3471   }
   3472   movq(rsp, rbp);
   3473   pop(rbp);
   3474 }
   3475 
   3476 
   3477 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
   3478   // Set up the frame structure on the stack.
   3479   // All constants are relative to the frame pointer of the exit frame.
   3480   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   3481   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
   3482   ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   3483   push(rbp);
   3484   movq(rbp, rsp);
   3485 
   3486   // Reserve room for entry stack pointer and push the code object.
   3487   ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
   3488   push(Immediate(0));  // Saved entry sp, patched before call.
   3489   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   3490   push(kScratchRegister);  // Accessed from EditFrame::code_slot.
   3491 
   3492   // Save the frame pointer and the context in top.
   3493   if (save_rax) {
   3494     movq(r14, rax);  // Backup rax in callee-save register.
   3495   }
   3496 
   3497   Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
   3498   Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
   3499 }
   3500 
   3501 
   3502 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
   3503                                             bool save_doubles) {
   3504 #ifdef _WIN64
   3505   const int kShadowSpace = 4;
   3506   arg_stack_space += kShadowSpace;
   3507 #endif
   3508   // Optionally save all XMM registers.
   3509   if (save_doubles) {
   3510     int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
   3511         arg_stack_space * kPointerSize;
   3512     subq(rsp, Immediate(space));
   3513     int offset = -2 * kPointerSize;
   3514     for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
   3515       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
   3516       movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
   3517     }
   3518   } else if (arg_stack_space > 0) {
   3519     subq(rsp, Immediate(arg_stack_space * kPointerSize));
   3520   }
   3521 
   3522   // Get the required frame alignment for the OS.
   3523   const int kFrameAlignment = OS::ActivationFrameAlignment();
   3524   if (kFrameAlignment > 0) {
   3525     ASSERT(IsPowerOf2(kFrameAlignment));
   3526     ASSERT(is_int8(kFrameAlignment));
   3527     and_(rsp, Immediate(-kFrameAlignment));
   3528   }
   3529 
   3530   // Patch the saved entry sp.
   3531   movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
   3532 }
   3533 
   3534 
   3535 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
   3536   EnterExitFramePrologue(true);
   3537 
   3538   // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
   3539   // so it must be retained across the C-call.
   3540   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
   3541   lea(r15, Operand(rbp, r14, times_pointer_size, offset));
   3542 
   3543   EnterExitFrameEpilogue(arg_stack_space, save_doubles);
   3544 }
   3545 
   3546 
   3547 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
   3548   EnterExitFramePrologue(false);
   3549   EnterExitFrameEpilogue(arg_stack_space, false);
   3550 }
   3551 
   3552 
   3553 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
   3554   // Registers:
   3555   // r15 : argv
   3556   if (save_doubles) {
   3557     int offset = -2 * kPointerSize;
   3558     for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
   3559       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
   3560       movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
   3561     }
   3562   }
   3563   // Get the return address from the stack and restore the frame pointer.
   3564   movq(rcx, Operand(rbp, 1 * kPointerSize));
   3565   movq(rbp, Operand(rbp, 0 * kPointerSize));
   3566 
   3567   // Drop everything up to and including the arguments and the receiver
   3568   // from the caller stack.
   3569   lea(rsp, Operand(r15, 1 * kPointerSize));
   3570 
   3571   PushReturnAddressFrom(rcx);
   3572 
   3573   LeaveExitFrameEpilogue();
   3574 }
   3575 
   3576 
   3577 void MacroAssembler::LeaveApiExitFrame() {
   3578   movq(rsp, rbp);
   3579   pop(rbp);
   3580 
   3581   LeaveExitFrameEpilogue();
   3582 }
   3583 
   3584 
   3585 void MacroAssembler::LeaveExitFrameEpilogue() {
   3586   // Restore current context from top and clear it in debug mode.
   3587   ExternalReference context_address(Isolate::kContextAddress, isolate());
   3588   Operand context_operand = ExternalOperand(context_address);
   3589   movq(rsi, context_operand);
   3590 #ifdef DEBUG
   3591   movq(context_operand, Immediate(0));
   3592 #endif
   3593 
   3594   // Clear the top frame.
   3595   ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
   3596                                        isolate());
   3597   Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
   3598   movq(c_entry_fp_operand, Immediate(0));
   3599 }
   3600 
   3601 
   3602 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
   3603                                             Register scratch,
   3604                                             Label* miss) {
   3605   Label same_contexts;
   3606 
   3607   ASSERT(!holder_reg.is(scratch));
   3608   ASSERT(!scratch.is(kScratchRegister));
   3609   // Load current lexical context from the stack frame.
   3610   movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
   3611 
   3612   // When generating debug code, make sure the lexical context is set.
   3613   if (emit_debug_code()) {
   3614     cmpq(scratch, Immediate(0));
   3615     Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
   3616   }
   3617   // Load the native context of the current context.
   3618   int offset =
   3619       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
   3620   movq(scratch, FieldOperand(scratch, offset));
   3621   movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
   3622 
   3623   // Check the context is a native context.
   3624   if (emit_debug_code()) {
   3625     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
   3626         isolate()->factory()->native_context_map());
   3627     Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
   3628   }
   3629 
   3630   // Check if both contexts are the same.
   3631   cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   3632   j(equal, &same_contexts);
   3633 
   3634   // Compare security tokens.
   3635   // Check that the security token in the calling global object is
   3636   // compatible with the security token in the receiving global
   3637   // object.
   3638 
   3639   // Check the context is a native context.
   3640   if (emit_debug_code()) {
   3641     // Preserve original value of holder_reg.
   3642     push(holder_reg);
   3643     movq(holder_reg,
   3644          FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   3645     CompareRoot(holder_reg, Heap::kNullValueRootIndex);
   3646     Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
   3647 
   3648     // Read the first word and compare to native_context_map(),
   3649     movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
   3650     CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
   3651     Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
   3652     pop(holder_reg);
   3653   }
   3654 
   3655   movq(kScratchRegister,
   3656        FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   3657   int token_offset =
   3658       Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
   3659   movq(scratch, FieldOperand(scratch, token_offset));
   3660   cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
   3661   j(not_equal, miss);
   3662 
   3663   bind(&same_contexts);
   3664 }
   3665 
   3666 
   3667 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
   3668   // First of all we assign the hash seed to scratch.
   3669   LoadRoot(scratch, Heap::kHashSeedRootIndex);
   3670   SmiToInteger32(scratch, scratch);
   3671 
   3672   // Xor original key with a seed.
   3673   xorl(r0, scratch);
   3674 
   3675   // Compute the hash code from the untagged key.  This must be kept in sync
   3676   // with ComputeIntegerHash in utils.h.
   3677   //
   3678   // hash = ~hash + (hash << 15);
   3679   movl(scratch, r0);
   3680   notl(r0);
   3681   shll(scratch, Immediate(15));
   3682   addl(r0, scratch);
   3683   // hash = hash ^ (hash >> 12);
   3684   movl(scratch, r0);
   3685   shrl(scratch, Immediate(12));
   3686   xorl(r0, scratch);
   3687   // hash = hash + (hash << 2);
   3688   leal(r0, Operand(r0, r0, times_4, 0));
   3689   // hash = hash ^ (hash >> 4);
   3690   movl(scratch, r0);
   3691   shrl(scratch, Immediate(4));
   3692   xorl(r0, scratch);
   3693   // hash = hash * 2057;
   3694   imull(r0, r0, Immediate(2057));
   3695   // hash = hash ^ (hash >> 16);
   3696   movl(scratch, r0);
   3697   shrl(scratch, Immediate(16));
   3698   xorl(r0, scratch);
   3699 }
   3700 
   3701 
   3702 
   3703 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   3704                                               Register elements,
   3705                                               Register key,
   3706                                               Register r0,
   3707                                               Register r1,
   3708                                               Register r2,
   3709                                               Register result) {
   3710   // Register use:
   3711   //
   3712   // elements - holds the slow-case elements of the receiver on entry.
   3713   //            Unchanged unless 'result' is the same register.
   3714   //
   3715   // key      - holds the smi key on entry.
   3716   //            Unchanged unless 'result' is the same register.
   3717   //
   3718   // Scratch registers:
   3719   //
   3720   // r0 - holds the untagged key on entry and holds the hash once computed.
   3721   //
   3722   // r1 - used to hold the capacity mask of the dictionary
   3723   //
   3724   // r2 - used for the index into the dictionary.
   3725   //
   3726   // result - holds the result on exit if the load succeeded.
   3727   //          Allowed to be the same as 'key' or 'result'.
   3728   //          Unchanged on bailout so 'key' or 'result' can be used
   3729   //          in further computation.
   3730 
   3731   Label done;
   3732 
   3733   GetNumberHash(r0, r1);
   3734 
   3735   // Compute capacity mask.
   3736   SmiToInteger32(r1, FieldOperand(elements,
   3737                                   SeededNumberDictionary::kCapacityOffset));
   3738   decl(r1);
   3739 
   3740   // Generate an unrolled loop that performs a few probes before giving up.
   3741   const int kProbes = 4;
   3742   for (int i = 0; i < kProbes; i++) {
   3743     // Use r2 for index calculations and keep the hash intact in r0.
   3744     movq(r2, r0);
   3745     // Compute the masked index: (hash + i + i * i) & mask.
   3746     if (i > 0) {
   3747       addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
   3748     }
   3749     and_(r2, r1);
   3750 
   3751     // Scale the index by multiplying by the entry size.
   3752     ASSERT(SeededNumberDictionary::kEntrySize == 3);
   3753     lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
   3754 
   3755     // Check if the key matches.
   3756     cmpq(key, FieldOperand(elements,
   3757                            r2,
   3758                            times_pointer_size,
   3759                            SeededNumberDictionary::kElementsStartOffset));
   3760     if (i != (kProbes - 1)) {
   3761       j(equal, &done);
   3762     } else {
   3763       j(not_equal, miss);
   3764     }
   3765   }
   3766 
   3767   bind(&done);
   3768   // Check that the value is a normal propety.
   3769   const int kDetailsOffset =
   3770       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   3771   ASSERT_EQ(NORMAL, 0);
   3772   Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
   3773        Smi::FromInt(PropertyDetails::TypeField::kMask));
   3774   j(not_zero, miss);
   3775 
   3776   // Get the value at the masked, scaled index.
   3777   const int kValueOffset =
   3778       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
   3779   movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
   3780 }
   3781 
   3782 
   3783 void MacroAssembler::LoadAllocationTopHelper(Register result,
   3784                                              Register scratch,
   3785                                              AllocationFlags flags) {
   3786   ExternalReference allocation_top =
   3787       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   3788 
   3789   // Just return if allocation top is already known.
   3790   if ((flags & RESULT_CONTAINS_TOP) != 0) {
   3791     // No use of scratch if allocation top is provided.
   3792     ASSERT(!scratch.is_valid());
   3793 #ifdef DEBUG
   3794     // Assert that result actually contains top on entry.
   3795     Operand top_operand = ExternalOperand(allocation_top);
   3796     cmpq(result, top_operand);
   3797     Check(equal, kUnexpectedAllocationTop);
   3798 #endif
   3799     return;
   3800   }
   3801 
   3802   // Move address of new object to result. Use scratch register if available,
   3803   // and keep address in scratch until call to UpdateAllocationTopHelper.
   3804   if (scratch.is_valid()) {
   3805     LoadAddress(scratch, allocation_top);
   3806     movq(result, Operand(scratch, 0));
   3807   } else {
   3808     Load(result, allocation_top);
   3809   }
   3810 }
   3811 
   3812 
   3813 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
   3814                                                Register scratch,
   3815                                                AllocationFlags flags) {
   3816   if (emit_debug_code()) {
   3817     testq(result_end, Immediate(kObjectAlignmentMask));
   3818     Check(zero, kUnalignedAllocationInNewSpace);
   3819   }
   3820 
   3821   ExternalReference allocation_top =
   3822       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   3823 
   3824   // Update new top.
   3825   if (scratch.is_valid()) {
   3826     // Scratch already contains address of allocation top.
   3827     movq(Operand(scratch, 0), result_end);
   3828   } else {
   3829     Store(allocation_top, result_end);
   3830   }
   3831 }
   3832 
   3833 
   3834 void MacroAssembler::Allocate(int object_size,
   3835                               Register result,
   3836                               Register result_end,
   3837                               Register scratch,
   3838                               Label* gc_required,
   3839                               AllocationFlags flags) {
   3840   ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   3841   ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
   3842   if (!FLAG_inline_new) {
   3843     if (emit_debug_code()) {
   3844       // Trash the registers to simulate an allocation failure.
   3845       movl(result, Immediate(0x7091));
   3846       if (result_end.is_valid()) {
   3847         movl(result_end, Immediate(0x7191));
   3848       }
   3849       if (scratch.is_valid()) {
   3850         movl(scratch, Immediate(0x7291));
   3851       }
   3852     }
   3853     jmp(gc_required);
   3854     return;
   3855   }
   3856   ASSERT(!result.is(result_end));
   3857 
   3858   // Load address of new object into result.
   3859   LoadAllocationTopHelper(result, scratch, flags);
   3860 
   3861   // Align the next allocation. Storing the filler map without checking top is
   3862   // safe in new-space because the limit of the heap is aligned there.
   3863   if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
   3864     testq(result, Immediate(kDoubleAlignmentMask));
   3865     Check(zero, kAllocationIsNotDoubleAligned);
   3866   }
   3867 
   3868   // Calculate new top and bail out if new space is exhausted.
   3869   ExternalReference allocation_limit =
   3870       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   3871 
   3872   Register top_reg = result_end.is_valid() ? result_end : result;
   3873 
   3874   if (!top_reg.is(result)) {
   3875     movq(top_reg, result);
   3876   }
   3877   addq(top_reg, Immediate(object_size));
   3878   j(carry, gc_required);
   3879   Operand limit_operand = ExternalOperand(allocation_limit);
   3880   cmpq(top_reg, limit_operand);
   3881   j(above, gc_required);
   3882 
   3883   // Update allocation top.
   3884   UpdateAllocationTopHelper(top_reg, scratch, flags);
   3885 
   3886   bool tag_result = (flags & TAG_OBJECT) != 0;
   3887   if (top_reg.is(result)) {
   3888     if (tag_result) {
   3889       subq(result, Immediate(object_size - kHeapObjectTag));
   3890     } else {
   3891       subq(result, Immediate(object_size));
   3892     }
   3893   } else if (tag_result) {
   3894     // Tag the result if requested.
   3895     ASSERT(kHeapObjectTag == 1);
   3896     incq(result);
   3897   }
   3898 }
   3899 
   3900 
   3901 void MacroAssembler::Allocate(int header_size,
   3902                               ScaleFactor element_size,
   3903                               Register element_count,
   3904                               Register result,
   3905                               Register result_end,
   3906                               Register scratch,
   3907                               Label* gc_required,
   3908                               AllocationFlags flags) {
   3909   ASSERT((flags & SIZE_IN_WORDS) == 0);
   3910   lea(result_end, Operand(element_count, element_size, header_size));
   3911   Allocate(result_end, result, result_end, scratch, gc_required, flags);
   3912 }
   3913 
   3914 
   3915 void MacroAssembler::Allocate(Register object_size,
   3916                               Register result,
   3917                               Register result_end,
   3918                               Register scratch,
   3919                               Label* gc_required,
   3920                               AllocationFlags flags) {
   3921   ASSERT((flags & SIZE_IN_WORDS) == 0);
   3922   if (!FLAG_inline_new) {
   3923     if (emit_debug_code()) {
   3924       // Trash the registers to simulate an allocation failure.
   3925       movl(result, Immediate(0x7091));
   3926       movl(result_end, Immediate(0x7191));
   3927       if (scratch.is_valid()) {
   3928         movl(scratch, Immediate(0x7291));
   3929       }
   3930       // object_size is left unchanged by this function.
   3931     }
   3932     jmp(gc_required);
   3933     return;
   3934   }
   3935   ASSERT(!result.is(result_end));
   3936 
   3937   // Load address of new object into result.
   3938   LoadAllocationTopHelper(result, scratch, flags);
   3939 
   3940   // Align the next allocation. Storing the filler map without checking top is
   3941   // safe in new-space because the limit of the heap is aligned there.
   3942   if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
   3943     testq(result, Immediate(kDoubleAlignmentMask));
   3944     Check(zero, kAllocationIsNotDoubleAligned);
   3945   }
   3946 
   3947   // Calculate new top and bail out if new space is exhausted.
   3948   ExternalReference allocation_limit =
   3949       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   3950   if (!object_size.is(result_end)) {
   3951     movq(result_end, object_size);
   3952   }
   3953   addq(result_end, result);
   3954   j(carry, gc_required);
   3955   Operand limit_operand = ExternalOperand(allocation_limit);
   3956   cmpq(result_end, limit_operand);
   3957   j(above, gc_required);
   3958 
   3959   // Update allocation top.
   3960   UpdateAllocationTopHelper(result_end, scratch, flags);
   3961 
   3962   // Tag the result if requested.
   3963   if ((flags & TAG_OBJECT) != 0) {
   3964     addq(result, Immediate(kHeapObjectTag));
   3965   }
   3966 }
   3967 
   3968 
   3969 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
   3970   ExternalReference new_space_allocation_top =
   3971       ExternalReference::new_space_allocation_top_address(isolate());
   3972 
   3973   // Make sure the object has no tag before resetting top.
   3974   and_(object, Immediate(~kHeapObjectTagMask));
   3975   Operand top_operand = ExternalOperand(new_space_allocation_top);
   3976 #ifdef DEBUG
   3977   cmpq(object, top_operand);
   3978   Check(below, kUndoAllocationOfNonAllocatedMemory);
   3979 #endif
   3980   movq(top_operand, object);
   3981 }
   3982 
   3983 
   3984 void MacroAssembler::AllocateHeapNumber(Register result,
   3985                                         Register scratch,
   3986                                         Label* gc_required) {
   3987   // Allocate heap number in new space.
   3988   Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
   3989 
   3990   // Set the map.
   3991   LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
   3992   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   3993 }
   3994 
   3995 
   3996 void MacroAssembler::AllocateTwoByteString(Register result,
   3997                                            Register length,
   3998                                            Register scratch1,
   3999                                            Register scratch2,
   4000                                            Register scratch3,
   4001                                            Label* gc_required) {
   4002   // Calculate the number of bytes needed for the characters in the string while
   4003   // observing object alignment.
   4004   const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
   4005                                kObjectAlignmentMask;
   4006   ASSERT(kShortSize == 2);
   4007   // scratch1 = length * 2 + kObjectAlignmentMask.
   4008   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
   4009                 kHeaderAlignment));
   4010   and_(scratch1, Immediate(~kObjectAlignmentMask));
   4011   if (kHeaderAlignment > 0) {
   4012     subq(scratch1, Immediate(kHeaderAlignment));
   4013   }
   4014 
   4015   // Allocate two byte string in new space.
   4016   Allocate(SeqTwoByteString::kHeaderSize,
   4017            times_1,
   4018            scratch1,
   4019            result,
   4020            scratch2,
   4021            scratch3,
   4022            gc_required,
   4023            TAG_OBJECT);
   4024 
   4025   // Set the map, length and hash field.
   4026   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
   4027   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   4028   Integer32ToSmi(scratch1, length);
   4029   movq(FieldOperand(result, String::kLengthOffset), scratch1);
   4030   movq(FieldOperand(result, String::kHashFieldOffset),
   4031        Immediate(String::kEmptyHashField));
   4032 }
   4033 
   4034 
   4035 void MacroAssembler::AllocateAsciiString(Register result,
   4036                                          Register length,
   4037                                          Register scratch1,
   4038                                          Register scratch2,
   4039                                          Register scratch3,
   4040                                          Label* gc_required) {
   4041   // Calculate the number of bytes needed for the characters in the string while
   4042   // observing object alignment.
   4043   const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
   4044                                kObjectAlignmentMask;
   4045   movl(scratch1, length);
   4046   ASSERT(kCharSize == 1);
   4047   addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
   4048   and_(scratch1, Immediate(~kObjectAlignmentMask));
   4049   if (kHeaderAlignment > 0) {
   4050     subq(scratch1, Immediate(kHeaderAlignment));
   4051   }
   4052 
   4053   // Allocate ASCII string in new space.
   4054   Allocate(SeqOneByteString::kHeaderSize,
   4055            times_1,
   4056            scratch1,
   4057            result,
   4058            scratch2,
   4059            scratch3,
   4060            gc_required,
   4061            TAG_OBJECT);
   4062 
   4063   // Set the map, length and hash field.
   4064   LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
   4065   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   4066   Integer32ToSmi(scratch1, length);
   4067   movq(FieldOperand(result, String::kLengthOffset), scratch1);
   4068   movq(FieldOperand(result, String::kHashFieldOffset),
   4069        Immediate(String::kEmptyHashField));
   4070 }
   4071 
   4072 
   4073 void MacroAssembler::AllocateTwoByteConsString(Register result,
   4074                                         Register scratch1,
   4075                                         Register scratch2,
   4076                                         Label* gc_required) {
   4077   // Allocate heap number in new space.
   4078   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
   4079            TAG_OBJECT);
   4080 
   4081   // Set the map. The other fields are left uninitialized.
   4082   LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
   4083   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   4084 }
   4085 
   4086 
   4087 void MacroAssembler::AllocateAsciiConsString(Register result,
   4088                                              Register scratch1,
   4089                                              Register scratch2,
   4090                                              Label* gc_required) {
   4091   Label allocate_new_space, install_map;
   4092   AllocationFlags flags = TAG_OBJECT;
   4093 
   4094   ExternalReference high_promotion_mode = ExternalReference::
   4095       new_space_high_promotion_mode_active_address(isolate());
   4096 
   4097   Load(scratch1, high_promotion_mode);
   4098   testb(scratch1, Immediate(1));
   4099   j(zero, &allocate_new_space);
   4100   Allocate(ConsString::kSize,
   4101            result,
   4102            scratch1,
   4103            scratch2,
   4104            gc_required,
   4105            static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
   4106 
   4107   jmp(&install_map);
   4108 
   4109   bind(&allocate_new_space);
   4110   Allocate(ConsString::kSize,
   4111            result,
   4112            scratch1,
   4113            scratch2,
   4114            gc_required,
   4115            flags);
   4116 
   4117   bind(&install_map);
   4118 
   4119   // Set the map. The other fields are left uninitialized.
   4120   LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
   4121   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   4122 }
   4123 
   4124 
   4125 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
   4126                                           Register scratch1,
   4127                                           Register scratch2,
   4128                                           Label* gc_required) {
   4129   // Allocate heap number in new space.
   4130   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   4131            TAG_OBJECT);
   4132 
   4133   // Set the map. The other fields are left uninitialized.
   4134   LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
   4135   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   4136 }
   4137 
   4138 
   4139 void MacroAssembler::AllocateAsciiSlicedString(Register result,
   4140                                                Register scratch1,
   4141                                                Register scratch2,
   4142                                                Label* gc_required) {
   4143   // Allocate heap number in new space.
   4144   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   4145            TAG_OBJECT);
   4146 
   4147   // Set the map. The other fields are left uninitialized.
   4148   LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
   4149   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   4150 }
   4151 
   4152 
   4153 // Copy memory, byte-by-byte, from source to destination.  Not optimized for
   4154 // long or aligned copies.  The contents of scratch and length are destroyed.
   4155 // Destination is incremented by length, source, length and scratch are
   4156 // clobbered.
   4157 // A simpler loop is faster on small copies, but slower on large ones.
   4158 // The cld() instruction must have been emitted, to set the direction flag(),
   4159 // before calling this function.
   4160 void MacroAssembler::CopyBytes(Register destination,
   4161                                Register source,
   4162                                Register length,
   4163                                int min_length,
   4164                                Register scratch) {
   4165   ASSERT(min_length >= 0);
   4166   if (emit_debug_code()) {
   4167     cmpl(length, Immediate(min_length));
   4168     Assert(greater_equal, kInvalidMinLength);
   4169   }
   4170   Label loop, done, short_string, short_loop;
   4171 
   4172   const int kLongStringLimit = 20;
   4173   if (min_length <= kLongStringLimit) {
   4174     cmpl(length, Immediate(kLongStringLimit));
   4175     j(less_equal, &short_string);
   4176   }
   4177 
   4178   ASSERT(source.is(rsi));
   4179   ASSERT(destination.is(rdi));
   4180   ASSERT(length.is(rcx));
   4181 
   4182   // Because source is 8-byte aligned in our uses of this function,
   4183   // we keep source aligned for the rep movs operation by copying the odd bytes
   4184   // at the end of the ranges.
   4185   movq(scratch, length);
   4186   shrl(length, Immediate(kPointerSizeLog2));
   4187   repmovsq();
   4188   // Move remaining bytes of length.
   4189   andl(scratch, Immediate(kPointerSize - 1));
   4190   movq(length, Operand(source, scratch, times_1, -kPointerSize));
   4191   movq(Operand(destination, scratch, times_1, -kPointerSize), length);
   4192   addq(destination, scratch);
   4193 
   4194   if (min_length <= kLongStringLimit) {
   4195     jmp(&done);
   4196 
   4197     bind(&short_string);
   4198     if (min_length == 0) {
   4199       testl(length, length);
   4200       j(zero, &done);
   4201     }
   4202     lea(scratch, Operand(destination, length, times_1, 0));
   4203 
   4204     bind(&short_loop);
   4205     movb(length, Operand(source, 0));
   4206     movb(Operand(destination, 0), length);
   4207     incq(source);
   4208     incq(destination);
   4209     cmpq(destination, scratch);
   4210     j(not_equal, &short_loop);
   4211 
   4212     bind(&done);
   4213   }
   4214 }
   4215 
   4216 
   4217 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
   4218                                                 Register end_offset,
   4219                                                 Register filler) {
   4220   Label loop, entry;
   4221   jmp(&entry);
   4222   bind(&loop);
   4223   movq(Operand(start_offset, 0), filler);
   4224   addq(start_offset, Immediate(kPointerSize));
   4225   bind(&entry);
   4226   cmpq(start_offset, end_offset);
   4227   j(less, &loop);
   4228 }
   4229 
   4230 
   4231 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   4232   if (context_chain_length > 0) {
   4233     // Move up the chain of contexts to the context containing the slot.
   4234     movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   4235     for (int i = 1; i < context_chain_length; i++) {
   4236       movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   4237     }
   4238   } else {
   4239     // Slot is in the current function context.  Move it into the
   4240     // destination register in case we store into it (the write barrier
   4241     // cannot be allowed to destroy the context in rsi).
   4242     movq(dst, rsi);
   4243   }
   4244 
   4245   // We should not have found a with context by walking the context
   4246   // chain (i.e., the static scope chain and runtime context chain do
   4247   // not agree).  A variable occurring in such a scope should have
   4248   // slot type LOOKUP and not CONTEXT.
   4249   if (emit_debug_code()) {
   4250     CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
   4251                 Heap::kWithContextMapRootIndex);
   4252     Check(not_equal, kVariableResolvedToWithContext);
   4253   }
   4254 }
   4255 
   4256 
   4257 void MacroAssembler::LoadTransitionedArrayMapConditional(
   4258     ElementsKind expected_kind,
   4259     ElementsKind transitioned_kind,
   4260     Register map_in_out,
   4261     Register scratch,
   4262     Label* no_map_match) {
   4263   // Load the global or builtins object from the current context.
   4264   movq(scratch,
   4265        Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   4266   movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
   4267 
   4268   // Check that the function's map is the same as the expected cached map.
   4269   movq(scratch, Operand(scratch,
   4270                         Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
   4271 
   4272   int offset = expected_kind * kPointerSize +
   4273       FixedArrayBase::kHeaderSize;
   4274   cmpq(map_in_out, FieldOperand(scratch, offset));
   4275   j(not_equal, no_map_match);
   4276 
   4277   // Use the transitioned cached map.
   4278   offset = transitioned_kind * kPointerSize +
   4279       FixedArrayBase::kHeaderSize;
   4280   movq(map_in_out, FieldOperand(scratch, offset));
   4281 }
   4282 
   4283 
   4284 void MacroAssembler::LoadInitialArrayMap(
   4285     Register function_in, Register scratch,
   4286     Register map_out, bool can_have_holes) {
   4287   ASSERT(!function_in.is(map_out));
   4288   Label done;
   4289   movq(map_out, FieldOperand(function_in,
   4290                              JSFunction::kPrototypeOrInitialMapOffset));
   4291   if (!FLAG_smi_only_arrays) {
   4292     ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
   4293     LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
   4294                                         kind,
   4295                                         map_out,
   4296                                         scratch,
   4297                                         &done);
   4298   } else if (can_have_holes) {
   4299     LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
   4300                                         FAST_HOLEY_SMI_ELEMENTS,
   4301                                         map_out,
   4302                                         scratch,
   4303                                         &done);
   4304   }
   4305   bind(&done);
   4306 }
   4307 
   4308 #ifdef _WIN64
   4309 static const int kRegisterPassedArguments = 4;
   4310 #else
   4311 static const int kRegisterPassedArguments = 6;
   4312 #endif
   4313 
   4314 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   4315   // Load the global or builtins object from the current context.
   4316   movq(function,
   4317        Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   4318   // Load the native context from the global or builtins object.
   4319   movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
   4320   // Load the function from the native context.
   4321   movq(function, Operand(function, Context::SlotOffset(index)));
   4322 }
   4323 
   4324 
   4325 void MacroAssembler::LoadArrayFunction(Register function) {
   4326   movq(function,
   4327        Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   4328   movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
   4329   movq(function,
   4330        Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
   4331 }
   4332 
   4333 
   4334 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
   4335                                                   Register map) {
   4336   // Load the initial map.  The global functions all have initial maps.
   4337   movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   4338   if (emit_debug_code()) {
   4339     Label ok, fail;
   4340     CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
   4341     jmp(&ok);
   4342     bind(&fail);
   4343     Abort(kGlobalFunctionsMustHaveInitialMap);
   4344     bind(&ok);
   4345   }
   4346 }
   4347 
   4348 
   4349 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
   4350   // On Windows 64 stack slots are reserved by the caller for all arguments
   4351   // including the ones passed in registers, and space is always allocated for
   4352   // the four register arguments even if the function takes fewer than four
   4353   // arguments.
   4354   // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
   4355   // and the caller does not reserve stack slots for them.
   4356   ASSERT(num_arguments >= 0);
   4357 #ifdef _WIN64
   4358   const int kMinimumStackSlots = kRegisterPassedArguments;
   4359   if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
   4360   return num_arguments;
   4361 #else
   4362   if (num_arguments < kRegisterPassedArguments) return 0;
   4363   return num_arguments - kRegisterPassedArguments;
   4364 #endif
   4365 }
   4366 
   4367 
   4368 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
   4369   int frame_alignment = OS::ActivationFrameAlignment();
   4370   ASSERT(frame_alignment != 0);
   4371   ASSERT(num_arguments >= 0);
   4372 
   4373   // Make stack end at alignment and allocate space for arguments and old rsp.
   4374   movq(kScratchRegister, rsp);
   4375   ASSERT(IsPowerOf2(frame_alignment));
   4376   int argument_slots_on_stack =
   4377       ArgumentStackSlotsForCFunctionCall(num_arguments);
   4378   subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
   4379   and_(rsp, Immediate(-frame_alignment));
   4380   movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
   4381 }
   4382 
   4383 
   4384 void MacroAssembler::CallCFunction(ExternalReference function,
   4385                                    int num_arguments) {
   4386   LoadAddress(rax, function);
   4387   CallCFunction(rax, num_arguments);
   4388 }
   4389 
   4390 
   4391 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
   4392   ASSERT(has_frame());
   4393   // Check stack alignment.
   4394   if (emit_debug_code()) {
   4395     CheckStackAlignment();
   4396   }
   4397 
   4398   call(function);
   4399   ASSERT(OS::ActivationFrameAlignment() != 0);
   4400   ASSERT(num_arguments >= 0);
   4401   int argument_slots_on_stack =
   4402       ArgumentStackSlotsForCFunctionCall(num_arguments);
   4403   movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
   4404 }
   4405 
   4406 
   4407 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
   4408   if (r1.is(r2)) return true;
   4409   if (r1.is(r3)) return true;
   4410   if (r1.is(r4)) return true;
   4411   if (r2.is(r3)) return true;
   4412   if (r2.is(r4)) return true;
   4413   if (r3.is(r4)) return true;
   4414   return false;
   4415 }
   4416 
   4417 
   4418 CodePatcher::CodePatcher(byte* address, int size)
   4419     : address_(address),
   4420       size_(size),
   4421       masm_(NULL, address, size + Assembler::kGap) {
   4422   // Create a new macro assembler pointing to the address of the code to patch.
   4423   // The size is adjusted with kGap on order for the assembler to generate size
   4424   // bytes of instructions without failing with buffer size constraints.
   4425   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   4426 }
   4427 
   4428 
   4429 CodePatcher::~CodePatcher() {
   4430   // Indicate that code has changed.
   4431   CPU::FlushICache(address_, size_);
   4432 
   4433   // Check that the code was patched as expected.
   4434   ASSERT(masm_.pc_ == address_ + size_);
   4435   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   4436 }
   4437 
   4438 
   4439 void MacroAssembler::CheckPageFlag(
   4440     Register object,
   4441     Register scratch,
   4442     int mask,
   4443     Condition cc,
   4444     Label* condition_met,
   4445     Label::Distance condition_met_distance) {
   4446   ASSERT(cc == zero || cc == not_zero);
   4447   if (scratch.is(object)) {
   4448     and_(scratch, Immediate(~Page::kPageAlignmentMask));
   4449   } else {
   4450     movq(scratch, Immediate(~Page::kPageAlignmentMask));
   4451     and_(scratch, object);
   4452   }
   4453   if (mask < (1 << kBitsPerByte)) {
   4454     testb(Operand(scratch, MemoryChunk::kFlagsOffset),
   4455           Immediate(static_cast<uint8_t>(mask)));
   4456   } else {
   4457     testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
   4458   }
   4459   j(cc, condition_met, condition_met_distance);
   4460 }
   4461 
   4462 
   4463 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
   4464                                         Register scratch,
   4465                                         Label* if_deprecated) {
   4466   if (map->CanBeDeprecated()) {
   4467     Move(scratch, map);
   4468     movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
   4469     SmiToInteger32(scratch, scratch);
   4470     and_(scratch, Immediate(Map::Deprecated::kMask));
   4471     j(not_zero, if_deprecated);
   4472   }
   4473 }
   4474 
   4475 
   4476 void MacroAssembler::JumpIfBlack(Register object,
   4477                                  Register bitmap_scratch,
   4478                                  Register mask_scratch,
   4479                                  Label* on_black,
   4480                                  Label::Distance on_black_distance) {
   4481   ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
   4482   GetMarkBits(object, bitmap_scratch, mask_scratch);
   4483 
   4484   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   4485   // The mask_scratch register contains a 1 at the position of the first bit
   4486   // and a 0 at all other positions, including the position of the second bit.
   4487   movq(rcx, mask_scratch);
   4488   // Make rcx into a mask that covers both marking bits using the operation
   4489   // rcx = mask | (mask << 1).
   4490   lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
   4491   // Note that we are using a 4-byte aligned 8-byte load.
   4492   and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
   4493   cmpq(mask_scratch, rcx);
   4494   j(equal, on_black, on_black_distance);
   4495 }
   4496 
   4497 
   4498 // Detect some, but not all, common pointer-free objects.  This is used by the
   4499 // incremental write barrier which doesn't care about oddballs (they are always
   4500 // marked black immediately so this code is not hit).
   4501 void MacroAssembler::JumpIfDataObject(
   4502     Register value,
   4503     Register scratch,
   4504     Label* not_data_object,
   4505     Label::Distance not_data_object_distance) {
   4506   Label is_data_object;
   4507   movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
   4508   CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4509   j(equal, &is_data_object, Label::kNear);
   4510   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
   4511   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   4512   // If it's a string and it's not a cons string then it's an object containing
   4513   // no GC pointers.
   4514   testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
   4515         Immediate(kIsIndirectStringMask | kIsNotStringMask));
   4516   j(not_zero, not_data_object, not_data_object_distance);
   4517   bind(&is_data_object);
   4518 }
   4519 
   4520 
   4521 void MacroAssembler::GetMarkBits(Register addr_reg,
   4522                                  Register bitmap_reg,
   4523                                  Register mask_reg) {
   4524   ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
   4525   movq(bitmap_reg, addr_reg);
   4526   // Sign extended 32 bit immediate.
   4527   and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
   4528   movq(rcx, addr_reg);
   4529   int shift =
   4530       Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
   4531   shrl(rcx, Immediate(shift));
   4532   and_(rcx,
   4533        Immediate((Page::kPageAlignmentMask >> shift) &
   4534                  ~(Bitmap::kBytesPerCell - 1)));
   4535 
   4536   addq(bitmap_reg, rcx);
   4537   movq(rcx, addr_reg);
   4538   shrl(rcx, Immediate(kPointerSizeLog2));
   4539   and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
   4540   movl(mask_reg, Immediate(1));
   4541   shl_cl(mask_reg);
   4542 }
   4543 
   4544 
   4545 void MacroAssembler::EnsureNotWhite(
   4546     Register value,
   4547     Register bitmap_scratch,
   4548     Register mask_scratch,
   4549     Label* value_is_white_and_not_data,
   4550     Label::Distance distance) {
   4551   ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
   4552   GetMarkBits(value, bitmap_scratch, mask_scratch);
   4553 
   4554   // If the value is black or grey we don't need to do anything.
   4555   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   4556   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   4557   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
   4558   ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
   4559 
   4560   Label done;
   4561 
   4562   // Since both black and grey have a 1 in the first position and white does
   4563   // not have a 1 there we only need to check one bit.
   4564   testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
   4565   j(not_zero, &done, Label::kNear);
   4566 
   4567   if (emit_debug_code()) {
   4568     // Check for impossible bit pattern.
   4569     Label ok;
   4570     push(mask_scratch);
   4571     // shl.  May overflow making the check conservative.
   4572     addq(mask_scratch, mask_scratch);
   4573     testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
   4574     j(zero, &ok, Label::kNear);
   4575     int3();
   4576     bind(&ok);
   4577     pop(mask_scratch);
   4578   }
   4579 
   4580   // Value is white.  We check whether it is data that doesn't need scanning.
   4581   // Currently only checks for HeapNumber and non-cons strings.
   4582   Register map = rcx;  // Holds map while checking type.
   4583   Register length = rcx;  // Holds length of object after checking type.
   4584   Label not_heap_number;
   4585   Label is_data_object;
   4586 
   4587   // Check for heap-number
   4588   movq(map, FieldOperand(value, HeapObject::kMapOffset));
   4589   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   4590   j(not_equal, &not_heap_number, Label::kNear);
   4591   movq(length, Immediate(HeapNumber::kSize));
   4592   jmp(&is_data_object, Label::kNear);
   4593 
   4594   bind(&not_heap_number);
   4595   // Check for strings.
   4596   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
   4597   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   4598   // If it's a string and it's not a cons string then it's an object containing
   4599   // no GC pointers.
   4600   Register instance_type = rcx;
   4601   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
   4602   testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
   4603   j(not_zero, value_is_white_and_not_data);
   4604   // It's a non-indirect (non-cons and non-slice) string.
   4605   // If it's external, the length is just ExternalString::kSize.
   4606   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
   4607   Label not_external;
   4608   // External strings are the only ones with the kExternalStringTag bit
   4609   // set.
   4610   ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
   4611   ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
   4612   testb(instance_type, Immediate(kExternalStringTag));
   4613   j(zero, &not_external, Label::kNear);
   4614   movq(length, Immediate(ExternalString::kSize));
   4615   jmp(&is_data_object, Label::kNear);
   4616 
   4617   bind(&not_external);
   4618   // Sequential string, either ASCII or UC16.
   4619   ASSERT(kOneByteStringTag == 0x04);
   4620   and_(length, Immediate(kStringEncodingMask));
   4621   xor_(length, Immediate(kStringEncodingMask));
   4622   addq(length, Immediate(0x04));
   4623   // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
   4624   imul(length, FieldOperand(value, String::kLengthOffset));
   4625   shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
   4626   addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
   4627   and_(length, Immediate(~kObjectAlignmentMask));
   4628 
   4629   bind(&is_data_object);
   4630   // Value is a data object, and it is white.  Mark it black.  Since we know
   4631   // that the object is white we can make it black by flipping one bit.
   4632   or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
   4633 
   4634   and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
   4635   addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
   4636 
   4637   bind(&done);
   4638 }
   4639 
   4640 
   4641 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
   4642   Label next, start;
   4643   Register empty_fixed_array_value = r8;
   4644   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   4645   movq(rcx, rax);
   4646 
   4647   // Check if the enum length field is properly initialized, indicating that
   4648   // there is an enum cache.
   4649   movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
   4650 
   4651   EnumLength(rdx, rbx);
   4652   Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
   4653   j(equal, call_runtime);
   4654 
   4655   jmp(&start);
   4656 
   4657   bind(&next);
   4658 
   4659   movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
   4660 
   4661   // For all objects but the receiver, check that the cache is empty.
   4662   EnumLength(rdx, rbx);
   4663   Cmp(rdx, Smi::FromInt(0));
   4664   j(not_equal, call_runtime);
   4665 
   4666   bind(&start);
   4667 
   4668   // Check that there are no elements. Register rcx contains the current JS
   4669   // object we've reached through the prototype chain.
   4670   cmpq(empty_fixed_array_value,
   4671        FieldOperand(rcx, JSObject::kElementsOffset));
   4672   j(not_equal, call_runtime);
   4673 
   4674   movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
   4675   cmpq(rcx, null_value);
   4676   j(not_equal, &next);
   4677 }
   4678 
   4679 void MacroAssembler::TestJSArrayForAllocationMemento(
   4680     Register receiver_reg,
   4681     Register scratch_reg) {
   4682   Label no_memento_available;
   4683   ExternalReference new_space_start =
   4684       ExternalReference::new_space_start(isolate());
   4685   ExternalReference new_space_allocation_top =
   4686       ExternalReference::new_space_allocation_top_address(isolate());
   4687 
   4688   lea(scratch_reg, Operand(receiver_reg,
   4689       JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
   4690   movq(kScratchRegister, new_space_start);
   4691   cmpq(scratch_reg, kScratchRegister);
   4692   j(less, &no_memento_available);
   4693   cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
   4694   j(greater, &no_memento_available);
   4695   CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
   4696               Heap::kAllocationMementoMapRootIndex);
   4697   bind(&no_memento_available);
   4698 }
   4699 
   4700 
   4701 } }  // namespace v8::internal
   4702 
   4703 #endif  // V8_TARGET_ARCH_X64
   4704