Home | History | Annotate | Download | only in x64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if defined(V8_TARGET_ARCH_X64)
     31 
     32 #include "bootstrapper.h"
     33 #include "codegen.h"
     34 #include "assembler-x64.h"
     35 #include "macro-assembler-x64.h"
     36 #include "serialize.h"
     37 #include "debug.h"
     38 #include "heap.h"
     39 
     40 namespace v8 {
     41 namespace internal {
     42 
     43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     44     : Assembler(arg_isolate, buffer, size),
     45       generating_stub_(false),
     46       allow_stub_calls_(true),
     47       has_frame_(false),
     48       root_array_available_(true) {
     49   if (isolate() != NULL) {
     50     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
     51                                   isolate());
     52   }
     53 }
     54 
     55 
     56 static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
     57   Address roots_register_value = kRootRegisterBias +
     58       reinterpret_cast<Address>(isolate->heap()->roots_array_start());
     59   intptr_t delta = other.address() - roots_register_value;
     60   return delta;
     61 }
     62 
     63 
     64 Operand MacroAssembler::ExternalOperand(ExternalReference target,
     65                                         Register scratch) {
     66   if (root_array_available_ && !Serializer::enabled()) {
     67     intptr_t delta = RootRegisterDelta(target, isolate());
     68     if (is_int32(delta)) {
     69       Serializer::TooLateToEnableNow();
     70       return Operand(kRootRegister, static_cast<int32_t>(delta));
     71     }
     72   }
     73   movq(scratch, target);
     74   return Operand(scratch, 0);
     75 }
     76 
     77 
     78 void MacroAssembler::Load(Register destination, ExternalReference source) {
     79   if (root_array_available_ && !Serializer::enabled()) {
     80     intptr_t delta = RootRegisterDelta(source, isolate());
     81     if (is_int32(delta)) {
     82       Serializer::TooLateToEnableNow();
     83       movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
     84       return;
     85     }
     86   }
     87   // Safe code.
     88   if (destination.is(rax)) {
     89     load_rax(source);
     90   } else {
     91     movq(kScratchRegister, source);
     92     movq(destination, Operand(kScratchRegister, 0));
     93   }
     94 }
     95 
     96 
     97 void MacroAssembler::Store(ExternalReference destination, Register source) {
     98   if (root_array_available_ && !Serializer::enabled()) {
     99     intptr_t delta = RootRegisterDelta(destination, isolate());
    100     if (is_int32(delta)) {
    101       Serializer::TooLateToEnableNow();
    102       movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
    103       return;
    104     }
    105   }
    106   // Safe code.
    107   if (source.is(rax)) {
    108     store_rax(destination);
    109   } else {
    110     movq(kScratchRegister, destination);
    111     movq(Operand(kScratchRegister, 0), source);
    112   }
    113 }
    114 
    115 
    116 void MacroAssembler::LoadAddress(Register destination,
    117                                  ExternalReference source) {
    118   if (root_array_available_ && !Serializer::enabled()) {
    119     intptr_t delta = RootRegisterDelta(source, isolate());
    120     if (is_int32(delta)) {
    121       Serializer::TooLateToEnableNow();
    122       lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
    123       return;
    124     }
    125   }
    126   // Safe code.
    127   movq(destination, source);
    128 }
    129 
    130 
    131 int MacroAssembler::LoadAddressSize(ExternalReference source) {
    132   if (root_array_available_ && !Serializer::enabled()) {
    133     // This calculation depends on the internals of LoadAddress.
    134     // It's correctness is ensured by the asserts in the Call
    135     // instruction below.
    136     intptr_t delta = RootRegisterDelta(source, isolate());
    137     if (is_int32(delta)) {
    138       Serializer::TooLateToEnableNow();
    139       // Operand is lea(scratch, Operand(kRootRegister, delta));
    140       // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
    141       int size = 4;
    142       if (!is_int8(static_cast<int32_t>(delta))) {
    143         size += 3;  // Need full four-byte displacement in lea.
    144       }
    145       return size;
    146     }
    147   }
    148   // Size of movq(destination, src);
    149   return 10;
    150 }
    151 
    152 
    153 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
    154   ASSERT(root_array_available_);
    155   movq(destination, Operand(kRootRegister,
    156                             (index << kPointerSizeLog2) - kRootRegisterBias));
    157 }
    158 
    159 
    160 void MacroAssembler::LoadRootIndexed(Register destination,
    161                                      Register variable_offset,
    162                                      int fixed_offset) {
    163   ASSERT(root_array_available_);
    164   movq(destination,
    165        Operand(kRootRegister,
    166                variable_offset, times_pointer_size,
    167                (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
    168 }
    169 
    170 
    171 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
    172   ASSERT(root_array_available_);
    173   movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
    174        source);
    175 }
    176 
    177 
    178 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
    179   ASSERT(root_array_available_);
    180   push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
    181 }
    182 
    183 
    184 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
    185   ASSERT(root_array_available_);
    186   cmpq(with, Operand(kRootRegister,
    187                      (index << kPointerSizeLog2) - kRootRegisterBias));
    188 }
    189 
    190 
    191 void MacroAssembler::CompareRoot(const Operand& with,
    192                                  Heap::RootListIndex index) {
    193   ASSERT(root_array_available_);
    194   ASSERT(!with.AddressUsesRegister(kScratchRegister));
    195   LoadRoot(kScratchRegister, index);
    196   cmpq(with, kScratchRegister);
    197 }
    198 
    199 
    200 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
    201                                          Register addr,
    202                                          Register scratch,
    203                                          SaveFPRegsMode save_fp,
    204                                          RememberedSetFinalAction and_then) {
    205   if (FLAG_debug_code) {
    206     Label ok;
    207     JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
    208     int3();
    209     bind(&ok);
    210   }
    211   // Load store buffer top.
    212   LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
    213   // Store pointer to buffer.
    214   movq(Operand(scratch, 0), addr);
    215   // Increment buffer top.
    216   addq(scratch, Immediate(kPointerSize));
    217   // Write back new top of buffer.
    218   StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
    219   // Call stub on end of buffer.
    220   Label done;
    221   // Check for end of buffer.
    222   testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
    223   if (and_then == kReturnAtEnd) {
    224     Label buffer_overflowed;
    225     j(not_equal, &buffer_overflowed, Label::kNear);
    226     ret(0);
    227     bind(&buffer_overflowed);
    228   } else {
    229     ASSERT(and_then == kFallThroughAtEnd);
    230     j(equal, &done, Label::kNear);
    231   }
    232   StoreBufferOverflowStub store_buffer_overflow =
    233       StoreBufferOverflowStub(save_fp);
    234   CallStub(&store_buffer_overflow);
    235   if (and_then == kReturnAtEnd) {
    236     ret(0);
    237   } else {
    238     ASSERT(and_then == kFallThroughAtEnd);
    239     bind(&done);
    240   }
    241 }
    242 
    243 
    244 void MacroAssembler::InNewSpace(Register object,
    245                                 Register scratch,
    246                                 Condition cc,
    247                                 Label* branch,
    248                                 Label::Distance distance) {
    249   if (Serializer::enabled()) {
    250     // Can't do arithmetic on external references if it might get serialized.
    251     // The mask isn't really an address.  We load it as an external reference in
    252     // case the size of the new space is different between the snapshot maker
    253     // and the running system.
    254     if (scratch.is(object)) {
    255       movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
    256       and_(scratch, kScratchRegister);
    257     } else {
    258       movq(scratch, ExternalReference::new_space_mask(isolate()));
    259       and_(scratch, object);
    260     }
    261     movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
    262     cmpq(scratch, kScratchRegister);
    263     j(cc, branch, distance);
    264   } else {
    265     ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
    266     intptr_t new_space_start =
    267         reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
    268     movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
    269     if (scratch.is(object)) {
    270       addq(scratch, kScratchRegister);
    271     } else {
    272       lea(scratch, Operand(object, kScratchRegister, times_1, 0));
    273     }
    274     and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
    275     j(cc, branch, distance);
    276   }
    277 }
    278 
    279 
    280 void MacroAssembler::RecordWriteField(
    281     Register object,
    282     int offset,
    283     Register value,
    284     Register dst,
    285     SaveFPRegsMode save_fp,
    286     RememberedSetAction remembered_set_action,
    287     SmiCheck smi_check) {
    288   // The compiled code assumes that record write doesn't change the
    289   // context register, so we check that none of the clobbered
    290   // registers are rsi.
    291   ASSERT(!value.is(rsi) && !dst.is(rsi));
    292 
    293   // First, check if a write barrier is even needed. The tests below
    294   // catch stores of Smis.
    295   Label done;
    296 
    297   // Skip barrier if writing a smi.
    298   if (smi_check == INLINE_SMI_CHECK) {
    299     JumpIfSmi(value, &done);
    300   }
    301 
    302   // Although the object register is tagged, the offset is relative to the start
    303   // of the object, so so offset must be a multiple of kPointerSize.
    304   ASSERT(IsAligned(offset, kPointerSize));
    305 
    306   lea(dst, FieldOperand(object, offset));
    307   if (emit_debug_code()) {
    308     Label ok;
    309     testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
    310     j(zero, &ok, Label::kNear);
    311     int3();
    312     bind(&ok);
    313   }
    314 
    315   RecordWrite(
    316       object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
    317 
    318   bind(&done);
    319 
    320   // Clobber clobbered input registers when running with the debug-code flag
    321   // turned on to provoke errors.
    322   if (emit_debug_code()) {
    323     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
    324     movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
    325   }
    326 }
    327 
    328 
    329 void MacroAssembler::RecordWriteArray(Register object,
    330                                       Register value,
    331                                       Register index,
    332                                       SaveFPRegsMode save_fp,
    333                                       RememberedSetAction remembered_set_action,
    334                                       SmiCheck smi_check) {
    335   // First, check if a write barrier is even needed. The tests below
    336   // catch stores of Smis.
    337   Label done;
    338 
    339   // Skip barrier if writing a smi.
    340   if (smi_check == INLINE_SMI_CHECK) {
    341     JumpIfSmi(value, &done);
    342   }
    343 
    344   // Array access: calculate the destination address. Index is not a smi.
    345   Register dst = index;
    346   lea(dst, Operand(object, index, times_pointer_size,
    347                    FixedArray::kHeaderSize - kHeapObjectTag));
    348 
    349   RecordWrite(
    350       object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
    351 
    352   bind(&done);
    353 
    354   // Clobber clobbered input registers when running with the debug-code flag
    355   // turned on to provoke errors.
    356   if (emit_debug_code()) {
    357     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
    358     movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
    359   }
    360 }
    361 
    362 
    363 void MacroAssembler::RecordWrite(Register object,
    364                                  Register address,
    365                                  Register value,
    366                                  SaveFPRegsMode fp_mode,
    367                                  RememberedSetAction remembered_set_action,
    368                                  SmiCheck smi_check) {
    369   // The compiled code assumes that record write doesn't change the
    370   // context register, so we check that none of the clobbered
    371   // registers are rsi.
    372   ASSERT(!value.is(rsi) && !address.is(rsi));
    373 
    374   ASSERT(!object.is(value));
    375   ASSERT(!object.is(address));
    376   ASSERT(!value.is(address));
    377   if (emit_debug_code()) {
    378     AbortIfSmi(object);
    379   }
    380 
    381   if (remembered_set_action == OMIT_REMEMBERED_SET &&
    382       !FLAG_incremental_marking) {
    383     return;
    384   }
    385 
    386   if (FLAG_debug_code) {
    387     Label ok;
    388     cmpq(value, Operand(address, 0));
    389     j(equal, &ok, Label::kNear);
    390     int3();
    391     bind(&ok);
    392   }
    393 
    394   // First, check if a write barrier is even needed. The tests below
    395   // catch stores of smis and stores into the young generation.
    396   Label done;
    397 
    398   if (smi_check == INLINE_SMI_CHECK) {
    399     // Skip barrier if writing a smi.
    400     JumpIfSmi(value, &done);
    401   }
    402 
    403   CheckPageFlag(value,
    404                 value,  // Used as scratch.
    405                 MemoryChunk::kPointersToHereAreInterestingMask,
    406                 zero,
    407                 &done,
    408                 Label::kNear);
    409 
    410   CheckPageFlag(object,
    411                 value,  // Used as scratch.
    412                 MemoryChunk::kPointersFromHereAreInterestingMask,
    413                 zero,
    414                 &done,
    415                 Label::kNear);
    416 
    417   RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
    418   CallStub(&stub);
    419 
    420   bind(&done);
    421 
    422   // Clobber clobbered registers when running with the debug-code flag
    423   // turned on to provoke errors.
    424   if (emit_debug_code()) {
    425     movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
    426     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
    427   }
    428 }
    429 
    430 
    431 void MacroAssembler::Assert(Condition cc, const char* msg) {
    432   if (emit_debug_code()) Check(cc, msg);
    433 }
    434 
    435 
    436 void MacroAssembler::AssertFastElements(Register elements) {
    437   if (emit_debug_code()) {
    438     Label ok;
    439     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    440                 Heap::kFixedArrayMapRootIndex);
    441     j(equal, &ok, Label::kNear);
    442     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    443                 Heap::kFixedDoubleArrayMapRootIndex);
    444     j(equal, &ok, Label::kNear);
    445     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    446                 Heap::kFixedCOWArrayMapRootIndex);
    447     j(equal, &ok, Label::kNear);
    448     Abort("JSObject with fast elements map has slow elements");
    449     bind(&ok);
    450   }
    451 }
    452 
    453 
    454 void MacroAssembler::Check(Condition cc, const char* msg) {
    455   Label L;
    456   j(cc, &L, Label::kNear);
    457   Abort(msg);
    458   // Control will not return here.
    459   bind(&L);
    460 }
    461 
    462 
    463 void MacroAssembler::CheckStackAlignment() {
    464   int frame_alignment = OS::ActivationFrameAlignment();
    465   int frame_alignment_mask = frame_alignment - 1;
    466   if (frame_alignment > kPointerSize) {
    467     ASSERT(IsPowerOf2(frame_alignment));
    468     Label alignment_as_expected;
    469     testq(rsp, Immediate(frame_alignment_mask));
    470     j(zero, &alignment_as_expected, Label::kNear);
    471     // Abort if stack is not aligned.
    472     int3();
    473     bind(&alignment_as_expected);
    474   }
    475 }
    476 
    477 
    478 void MacroAssembler::NegativeZeroTest(Register result,
    479                                       Register op,
    480                                       Label* then_label) {
    481   Label ok;
    482   testl(result, result);
    483   j(not_zero, &ok, Label::kNear);
    484   testl(op, op);
    485   j(sign, then_label);
    486   bind(&ok);
    487 }
    488 
    489 
    490 void MacroAssembler::Abort(const char* msg) {
    491   // We want to pass the msg string like a smi to avoid GC
    492   // problems, however msg is not guaranteed to be aligned
    493   // properly. Instead, we pass an aligned pointer that is
    494   // a proper v8 smi, but also pass the alignment difference
    495   // from the real pointer as a smi.
    496   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
    497   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
    498   // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
    499   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
    500 #ifdef DEBUG
    501   if (msg != NULL) {
    502     RecordComment("Abort message: ");
    503     RecordComment(msg);
    504   }
    505 #endif
    506   push(rax);
    507   movq(kScratchRegister, p0, RelocInfo::NONE);
    508   push(kScratchRegister);
    509   movq(kScratchRegister,
    510        reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
    511        RelocInfo::NONE);
    512   push(kScratchRegister);
    513 
    514   if (!has_frame_) {
    515     // We don't actually want to generate a pile of code for this, so just
    516     // claim there is a stack frame, without generating one.
    517     FrameScope scope(this, StackFrame::NONE);
    518     CallRuntime(Runtime::kAbort, 2);
    519   } else {
    520     CallRuntime(Runtime::kAbort, 2);
    521   }
    522   // Control will not return here.
    523   int3();
    524 }
    525 
    526 
    527 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
    528   ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
    529   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
    530 }
    531 
    532 
    533 void MacroAssembler::TailCallStub(CodeStub* stub) {
    534   ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
    535   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
    536 }
    537 
    538 
    539 void MacroAssembler::StubReturn(int argc) {
    540   ASSERT(argc >= 1 && generating_stub());
    541   ret((argc - 1) * kPointerSize);
    542 }
    543 
    544 
    545 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
    546   if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
    547   return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
    548 }
    549 
    550 
    551 void MacroAssembler::IllegalOperation(int num_arguments) {
    552   if (num_arguments > 0) {
    553     addq(rsp, Immediate(num_arguments * kPointerSize));
    554   }
    555   LoadRoot(rax, Heap::kUndefinedValueRootIndex);
    556 }
    557 
    558 
    559 void MacroAssembler::IndexFromHash(Register hash, Register index) {
    560   // The assert checks that the constants for the maximum number of digits
    561   // for an array index cached in the hash field and the number of bits
    562   // reserved for it does not conflict.
    563   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
    564          (1 << String::kArrayIndexValueBits));
    565   // We want the smi-tagged index in key. Even if we subsequently go to
    566   // the slow case, converting the key to a smi is always valid.
    567   // key: string key
    568   // hash: key's hash field, including its array index value.
    569   and_(hash, Immediate(String::kArrayIndexValueMask));
    570   shr(hash, Immediate(String::kHashShift));
    571   // Here we actually clobber the key which will be used if calling into
    572   // runtime later. However as the new key is the numeric value of a string key
    573   // there is no difference in using either key.
    574   Integer32ToSmi(index, hash);
    575 }
    576 
    577 
    578 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
    579   CallRuntime(Runtime::FunctionForId(id), num_arguments);
    580 }
    581 
    582 
    583 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
    584   const Runtime::Function* function = Runtime::FunctionForId(id);
    585   Set(rax, function->nargs);
    586   LoadAddress(rbx, ExternalReference(function, isolate()));
    587   CEntryStub ces(1, kSaveFPRegs);
    588   CallStub(&ces);
    589 }
    590 
    591 
    592 void MacroAssembler::CallRuntime(const Runtime::Function* f,
    593                                  int num_arguments) {
    594   // If the expected number of arguments of the runtime function is
    595   // constant, we check that the actual number of arguments match the
    596   // expectation.
    597   if (f->nargs >= 0 && f->nargs != num_arguments) {
    598     IllegalOperation(num_arguments);
    599     return;
    600   }
    601 
    602   // TODO(1236192): Most runtime routines don't need the number of
    603   // arguments passed in because it is constant. At some point we
    604   // should remove this need and make the runtime routine entry code
    605   // smarter.
    606   Set(rax, num_arguments);
    607   LoadAddress(rbx, ExternalReference(f, isolate()));
    608   CEntryStub ces(f->result_size);
    609   CallStub(&ces);
    610 }
    611 
    612 
    613 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
    614                                            int num_arguments) {
    615   Set(rax, num_arguments);
    616   LoadAddress(rbx, ext);
    617 
    618   CEntryStub stub(1);
    619   CallStub(&stub);
    620 }
    621 
    622 
    623 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
    624                                                int num_arguments,
    625                                                int result_size) {
    626   // ----------- S t a t e -------------
    627   //  -- rsp[0] : return address
    628   //  -- rsp[8] : argument num_arguments - 1
    629   //  ...
    630   //  -- rsp[8 * num_arguments] : argument 0 (receiver)
    631   // -----------------------------------
    632 
    633   // TODO(1236192): Most runtime routines don't need the number of
    634   // arguments passed in because it is constant. At some point we
    635   // should remove this need and make the runtime routine entry code
    636   // smarter.
    637   Set(rax, num_arguments);
    638   JumpToExternalReference(ext, result_size);
    639 }
    640 
    641 
    642 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
    643                                      int num_arguments,
    644                                      int result_size) {
    645   TailCallExternalReference(ExternalReference(fid, isolate()),
    646                             num_arguments,
    647                             result_size);
    648 }
    649 
    650 
    651 static int Offset(ExternalReference ref0, ExternalReference ref1) {
    652   int64_t offset = (ref0.address() - ref1.address());
    653   // Check that fits into int.
    654   ASSERT(static_cast<int>(offset) == offset);
    655   return static_cast<int>(offset);
    656 }
    657 
    658 
    659 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
    660 #ifdef _WIN64
    661   // We need to prepare a slot for result handle on stack and put
    662   // a pointer to it into 1st arg register.
    663   EnterApiExitFrame(arg_stack_space + 1);
    664 
    665   // rcx must be used to pass the pointer to the return value slot.
    666   lea(rcx, StackSpaceOperand(arg_stack_space));
    667 #else
    668   EnterApiExitFrame(arg_stack_space);
    669 #endif
    670 }
    671 
    672 
    673 void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
    674                                               int stack_space) {
    675   Label empty_result;
    676   Label prologue;
    677   Label promote_scheduled_exception;
    678   Label delete_allocated_handles;
    679   Label leave_exit_frame;
    680   Label write_back;
    681 
    682   Factory* factory = isolate()->factory();
    683   ExternalReference next_address =
    684       ExternalReference::handle_scope_next_address();
    685   const int kNextOffset = 0;
    686   const int kLimitOffset = Offset(
    687       ExternalReference::handle_scope_limit_address(),
    688       next_address);
    689   const int kLevelOffset = Offset(
    690       ExternalReference::handle_scope_level_address(),
    691       next_address);
    692   ExternalReference scheduled_exception_address =
    693       ExternalReference::scheduled_exception_address(isolate());
    694 
    695   // Allocate HandleScope in callee-save registers.
    696   Register prev_next_address_reg = r14;
    697   Register prev_limit_reg = rbx;
    698   Register base_reg = r15;
    699   movq(base_reg, next_address);
    700   movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
    701   movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
    702   addl(Operand(base_reg, kLevelOffset), Immediate(1));
    703   // Call the api function!
    704   movq(rax, reinterpret_cast<int64_t>(function_address),
    705        RelocInfo::RUNTIME_ENTRY);
    706   call(rax);
    707 
    708 #ifdef _WIN64
    709   // rax keeps a pointer to v8::Handle, unpack it.
    710   movq(rax, Operand(rax, 0));
    711 #endif
    712   // Check if the result handle holds 0.
    713   testq(rax, rax);
    714   j(zero, &empty_result);
    715   // It was non-zero.  Dereference to get the result value.
    716   movq(rax, Operand(rax, 0));
    717   bind(&prologue);
    718 
    719   // No more valid handles (the result handle was the last one). Restore
    720   // previous handle scope.
    721   subl(Operand(base_reg, kLevelOffset), Immediate(1));
    722   movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
    723   cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
    724   j(not_equal, &delete_allocated_handles);
    725   bind(&leave_exit_frame);
    726 
    727   // Check if the function scheduled an exception.
    728   movq(rsi, scheduled_exception_address);
    729   Cmp(Operand(rsi, 0), factory->the_hole_value());
    730   j(not_equal, &promote_scheduled_exception);
    731 
    732   LeaveApiExitFrame();
    733   ret(stack_space * kPointerSize);
    734 
    735   bind(&promote_scheduled_exception);
    736   TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
    737 
    738   bind(&empty_result);
    739   // It was zero; the result is undefined.
    740   Move(rax, factory->undefined_value());
    741   jmp(&prologue);
    742 
    743   // HandleScope limit has changed. Delete allocated extensions.
    744   bind(&delete_allocated_handles);
    745   movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
    746   movq(prev_limit_reg, rax);
    747 #ifdef _WIN64
    748   LoadAddress(rcx, ExternalReference::isolate_address());
    749 #else
    750   LoadAddress(rdi, ExternalReference::isolate_address());
    751 #endif
    752   LoadAddress(rax,
    753               ExternalReference::delete_handle_scope_extensions(isolate()));
    754   call(rax);
    755   movq(rax, prev_limit_reg);
    756   jmp(&leave_exit_frame);
    757 }
    758 
    759 
    760 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
    761                                              int result_size) {
    762   // Set the entry point and jump to the C entry runtime stub.
    763   LoadAddress(rbx, ext);
    764   CEntryStub ces(result_size);
    765   jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
    766 }
    767 
    768 
    769 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
    770                                    InvokeFlag flag,
    771                                    const CallWrapper& call_wrapper) {
    772   // You can't call a builtin without a valid frame.
    773   ASSERT(flag == JUMP_FUNCTION || has_frame());
    774 
    775   // Rely on the assertion to check that the number of provided
    776   // arguments match the expected number of arguments. Fake a
    777   // parameter count to avoid emitting code to do the check.
    778   ParameterCount expected(0);
    779   GetBuiltinEntry(rdx, id);
    780   InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
    781 }
    782 
    783 
    784 void MacroAssembler::GetBuiltinFunction(Register target,
    785                                         Builtins::JavaScript id) {
    786   // Load the builtins object into target register.
    787   movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
    788   movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
    789   movq(target, FieldOperand(target,
    790                             JSBuiltinsObject::OffsetOfFunctionWithId(id)));
    791 }
    792 
    793 
    794 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
    795   ASSERT(!target.is(rdi));
    796   // Load the JavaScript builtin function from the builtins object.
    797   GetBuiltinFunction(rdi, id);
    798   movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
    799 }
    800 
    801 
    802 #define REG(Name) { kRegister_ ## Name ## _Code }
    803 
    804 static const Register saved_regs[] = {
    805   REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
    806   REG(r9), REG(r10), REG(r11)
    807 };
    808 
    809 #undef REG
    810 
    811 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
    812 
    813 
    814 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
    815                                      Register exclusion1,
    816                                      Register exclusion2,
    817                                      Register exclusion3) {
    818   // We don't allow a GC during a store buffer overflow so there is no need to
    819   // store the registers in any particular way, but we do have to store and
    820   // restore them.
    821   for (int i = 0; i < kNumberOfSavedRegs; i++) {
    822     Register reg = saved_regs[i];
    823     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
    824       push(reg);
    825     }
    826   }
    827   // R12 to r15 are callee save on all platforms.
    828   if (fp_mode == kSaveFPRegs) {
    829     CpuFeatures::Scope scope(SSE2);
    830     subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
    831     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
    832       XMMRegister reg = XMMRegister::from_code(i);
    833       movsd(Operand(rsp, i * kDoubleSize), reg);
    834     }
    835   }
    836 }
    837 
    838 
    839 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
    840                                     Register exclusion1,
    841                                     Register exclusion2,
    842                                     Register exclusion3) {
    843   if (fp_mode == kSaveFPRegs) {
    844     CpuFeatures::Scope scope(SSE2);
    845     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
    846       XMMRegister reg = XMMRegister::from_code(i);
    847       movsd(reg, Operand(rsp, i * kDoubleSize));
    848     }
    849     addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
    850   }
    851   for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
    852     Register reg = saved_regs[i];
    853     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
    854       pop(reg);
    855     }
    856   }
    857 }
    858 
    859 
    860 void MacroAssembler::Set(Register dst, int64_t x) {
    861   if (x == 0) {
    862     xorl(dst, dst);
    863   } else if (is_uint32(x)) {
    864     movl(dst, Immediate(static_cast<uint32_t>(x)));
    865   } else if (is_int32(x)) {
    866     movq(dst, Immediate(static_cast<int32_t>(x)));
    867   } else {
    868     movq(dst, x, RelocInfo::NONE);
    869   }
    870 }
    871 
    872 void MacroAssembler::Set(const Operand& dst, int64_t x) {
    873   if (is_int32(x)) {
    874     movq(dst, Immediate(static_cast<int32_t>(x)));
    875   } else {
    876     Set(kScratchRegister, x);
    877     movq(dst, kScratchRegister);
    878   }
    879 }
    880 
    881 // ----------------------------------------------------------------------------
    882 // Smi tagging, untagging and tag detection.
    883 
    884 Register MacroAssembler::GetSmiConstant(Smi* source) {
    885   int value = source->value();
    886   if (value == 0) {
    887     xorl(kScratchRegister, kScratchRegister);
    888     return kScratchRegister;
    889   }
    890   if (value == 1) {
    891     return kSmiConstantRegister;
    892   }
    893   LoadSmiConstant(kScratchRegister, source);
    894   return kScratchRegister;
    895 }
    896 
    897 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
    898   if (emit_debug_code()) {
    899     movq(dst,
    900          reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
    901          RelocInfo::NONE);
    902     cmpq(dst, kSmiConstantRegister);
    903     if (allow_stub_calls()) {
    904       Assert(equal, "Uninitialized kSmiConstantRegister");
    905     } else {
    906       Label ok;
    907       j(equal, &ok, Label::kNear);
    908       int3();
    909       bind(&ok);
    910     }
    911   }
    912   int value = source->value();
    913   if (value == 0) {
    914     xorl(dst, dst);
    915     return;
    916   }
    917   bool negative = value < 0;
    918   unsigned int uvalue = negative ? -value : value;
    919 
    920   switch (uvalue) {
    921     case 9:
    922       lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
    923       break;
    924     case 8:
    925       xorl(dst, dst);
    926       lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
    927       break;
    928     case 4:
    929       xorl(dst, dst);
    930       lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
    931       break;
    932     case 5:
    933       lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
    934       break;
    935     case 3:
    936       lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
    937       break;
    938     case 2:
    939       lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
    940       break;
    941     case 1:
    942       movq(dst, kSmiConstantRegister);
    943       break;
    944     case 0:
    945       UNREACHABLE();
    946       return;
    947     default:
    948       movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
    949       return;
    950   }
    951   if (negative) {
    952     neg(dst);
    953   }
    954 }
    955 
    956 
    957 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
    958   STATIC_ASSERT(kSmiTag == 0);
    959   if (!dst.is(src)) {
    960     movl(dst, src);
    961   }
    962   shl(dst, Immediate(kSmiShift));
    963 }
    964 
    965 
    966 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
    967   if (emit_debug_code()) {
    968     testb(dst, Immediate(0x01));
    969     Label ok;
    970     j(zero, &ok, Label::kNear);
    971     if (allow_stub_calls()) {
    972       Abort("Integer32ToSmiField writing to non-smi location");
    973     } else {
    974       int3();
    975     }
    976     bind(&ok);
    977   }
    978   ASSERT(kSmiShift % kBitsPerByte == 0);
    979   movl(Operand(dst, kSmiShift / kBitsPerByte), src);
    980 }
    981 
    982 
    983 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
    984                                                 Register src,
    985                                                 int constant) {
    986   if (dst.is(src)) {
    987     addl(dst, Immediate(constant));
    988   } else {
    989     leal(dst, Operand(src, constant));
    990   }
    991   shl(dst, Immediate(kSmiShift));
    992 }
    993 
    994 
    995 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
    996   STATIC_ASSERT(kSmiTag == 0);
    997   if (!dst.is(src)) {
    998     movq(dst, src);
    999   }
   1000   shr(dst, Immediate(kSmiShift));
   1001 }
   1002 
   1003 
   1004 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
   1005   movl(dst, Operand(src, kSmiShift / kBitsPerByte));
   1006 }
   1007 
   1008 
   1009 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
   1010   STATIC_ASSERT(kSmiTag == 0);
   1011   if (!dst.is(src)) {
   1012     movq(dst, src);
   1013   }
   1014   sar(dst, Immediate(kSmiShift));
   1015 }
   1016 
   1017 
   1018 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
   1019   movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
   1020 }
   1021 
   1022 
   1023 void MacroAssembler::SmiTest(Register src) {
   1024   testq(src, src);
   1025 }
   1026 
   1027 
   1028 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
   1029   if (emit_debug_code()) {
   1030     AbortIfNotSmi(smi1);
   1031     AbortIfNotSmi(smi2);
   1032   }
   1033   cmpq(smi1, smi2);
   1034 }
   1035 
   1036 
   1037 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
   1038   if (emit_debug_code()) {
   1039     AbortIfNotSmi(dst);
   1040   }
   1041   Cmp(dst, src);
   1042 }
   1043 
   1044 
   1045 void MacroAssembler::Cmp(Register dst, Smi* src) {
   1046   ASSERT(!dst.is(kScratchRegister));
   1047   if (src->value() == 0) {
   1048     testq(dst, dst);
   1049   } else {
   1050     Register constant_reg = GetSmiConstant(src);
   1051     cmpq(dst, constant_reg);
   1052   }
   1053 }
   1054 
   1055 
   1056 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
   1057   if (emit_debug_code()) {
   1058     AbortIfNotSmi(dst);
   1059     AbortIfNotSmi(src);
   1060   }
   1061   cmpq(dst, src);
   1062 }
   1063 
   1064 
   1065 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
   1066   if (emit_debug_code()) {
   1067     AbortIfNotSmi(dst);
   1068     AbortIfNotSmi(src);
   1069   }
   1070   cmpq(dst, src);
   1071 }
   1072 
   1073 
   1074 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
   1075   if (emit_debug_code()) {
   1076     AbortIfNotSmi(dst);
   1077   }
   1078   cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
   1079 }
   1080 
   1081 
   1082 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
   1083   // The Operand cannot use the smi register.
   1084   Register smi_reg = GetSmiConstant(src);
   1085   ASSERT(!dst.AddressUsesRegister(smi_reg));
   1086   cmpq(dst, smi_reg);
   1087 }
   1088 
   1089 
   1090 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
   1091   cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
   1092 }
   1093 
   1094 
   1095 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
   1096                                                            Register src,
   1097                                                            int power) {
   1098   ASSERT(power >= 0);
   1099   ASSERT(power < 64);
   1100   if (power == 0) {
   1101     SmiToInteger64(dst, src);
   1102     return;
   1103   }
   1104   if (!dst.is(src)) {
   1105     movq(dst, src);
   1106   }
   1107   if (power < kSmiShift) {
   1108     sar(dst, Immediate(kSmiShift - power));
   1109   } else if (power > kSmiShift) {
   1110     shl(dst, Immediate(power - kSmiShift));
   1111   }
   1112 }
   1113 
   1114 
   1115 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
   1116                                                          Register src,
   1117                                                          int power) {
   1118   ASSERT((0 <= power) && (power < 32));
   1119   if (dst.is(src)) {
   1120     shr(dst, Immediate(power + kSmiShift));
   1121   } else {
   1122     UNIMPLEMENTED();  // Not used.
   1123   }
   1124 }
   1125 
   1126 
   1127 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
   1128                                  Label* on_not_smis,
   1129                                  Label::Distance near_jump) {
   1130   if (dst.is(src1) || dst.is(src2)) {
   1131     ASSERT(!src1.is(kScratchRegister));
   1132     ASSERT(!src2.is(kScratchRegister));
   1133     movq(kScratchRegister, src1);
   1134     or_(kScratchRegister, src2);
   1135     JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
   1136     movq(dst, kScratchRegister);
   1137   } else {
   1138     movq(dst, src1);
   1139     or_(dst, src2);
   1140     JumpIfNotSmi(dst, on_not_smis, near_jump);
   1141   }
   1142 }
   1143 
   1144 
   1145 Condition MacroAssembler::CheckSmi(Register src) {
   1146   STATIC_ASSERT(kSmiTag == 0);
   1147   testb(src, Immediate(kSmiTagMask));
   1148   return zero;
   1149 }
   1150 
   1151 
   1152 Condition MacroAssembler::CheckSmi(const Operand& src) {
   1153   STATIC_ASSERT(kSmiTag == 0);
   1154   testb(src, Immediate(kSmiTagMask));
   1155   return zero;
   1156 }
   1157 
   1158 
   1159 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
   1160   STATIC_ASSERT(kSmiTag == 0);
   1161   // Test that both bits of the mask 0x8000000000000001 are zero.
   1162   movq(kScratchRegister, src);
   1163   rol(kScratchRegister, Immediate(1));
   1164   testb(kScratchRegister, Immediate(3));
   1165   return zero;
   1166 }
   1167 
   1168 
   1169 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
   1170   if (first.is(second)) {
   1171     return CheckSmi(first);
   1172   }
   1173   STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
   1174   leal(kScratchRegister, Operand(first, second, times_1, 0));
   1175   testb(kScratchRegister, Immediate(0x03));
   1176   return zero;
   1177 }
   1178 
   1179 
   1180 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
   1181                                                   Register second) {
   1182   if (first.is(second)) {
   1183     return CheckNonNegativeSmi(first);
   1184   }
   1185   movq(kScratchRegister, first);
   1186   or_(kScratchRegister, second);
   1187   rol(kScratchRegister, Immediate(1));
   1188   testl(kScratchRegister, Immediate(3));
   1189   return zero;
   1190 }
   1191 
   1192 
   1193 Condition MacroAssembler::CheckEitherSmi(Register first,
   1194                                          Register second,
   1195                                          Register scratch) {
   1196   if (first.is(second)) {
   1197     return CheckSmi(first);
   1198   }
   1199   if (scratch.is(second)) {
   1200     andl(scratch, first);
   1201   } else {
   1202     if (!scratch.is(first)) {
   1203       movl(scratch, first);
   1204     }
   1205     andl(scratch, second);
   1206   }
   1207   testb(scratch, Immediate(kSmiTagMask));
   1208   return zero;
   1209 }
   1210 
   1211 
   1212 Condition MacroAssembler::CheckIsMinSmi(Register src) {
   1213   ASSERT(!src.is(kScratchRegister));
   1214   // If we overflow by subtracting one, it's the minimal smi value.
   1215   cmpq(src, kSmiConstantRegister);
   1216   return overflow;
   1217 }
   1218 
   1219 
   1220 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
   1221   // A 32-bit integer value can always be converted to a smi.
   1222   return always;
   1223 }
   1224 
   1225 
   1226 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
   1227   // An unsigned 32-bit integer value is valid as long as the high bit
   1228   // is not set.
   1229   testl(src, src);
   1230   return positive;
   1231 }
   1232 
   1233 
   1234 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
   1235   if (dst.is(src)) {
   1236     andl(dst, Immediate(kSmiTagMask));
   1237   } else {
   1238     movl(dst, Immediate(kSmiTagMask));
   1239     andl(dst, src);
   1240   }
   1241 }
   1242 
   1243 
   1244 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
   1245   if (!(src.AddressUsesRegister(dst))) {
   1246     movl(dst, Immediate(kSmiTagMask));
   1247     andl(dst, src);
   1248   } else {
   1249     movl(dst, src);
   1250     andl(dst, Immediate(kSmiTagMask));
   1251   }
   1252 }
   1253 
   1254 
   1255 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
   1256                                             Label* on_invalid,
   1257                                             Label::Distance near_jump) {
   1258   Condition is_valid = CheckInteger32ValidSmiValue(src);
   1259   j(NegateCondition(is_valid), on_invalid, near_jump);
   1260 }
   1261 
   1262 
   1263 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
   1264                                                 Label* on_invalid,
   1265                                                 Label::Distance near_jump) {
   1266   Condition is_valid = CheckUInteger32ValidSmiValue(src);
   1267   j(NegateCondition(is_valid), on_invalid, near_jump);
   1268 }
   1269 
   1270 
   1271 void MacroAssembler::JumpIfSmi(Register src,
   1272                                Label* on_smi,
   1273                                Label::Distance near_jump) {
   1274   Condition smi = CheckSmi(src);
   1275   j(smi, on_smi, near_jump);
   1276 }
   1277 
   1278 
   1279 void MacroAssembler::JumpIfNotSmi(Register src,
   1280                                   Label* on_not_smi,
   1281                                   Label::Distance near_jump) {
   1282   Condition smi = CheckSmi(src);
   1283   j(NegateCondition(smi), on_not_smi, near_jump);
   1284 }
   1285 
   1286 
   1287 void MacroAssembler::JumpUnlessNonNegativeSmi(
   1288     Register src, Label* on_not_smi_or_negative,
   1289     Label::Distance near_jump) {
   1290   Condition non_negative_smi = CheckNonNegativeSmi(src);
   1291   j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
   1292 }
   1293 
   1294 
   1295 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
   1296                                              Smi* constant,
   1297                                              Label* on_equals,
   1298                                              Label::Distance near_jump) {
   1299   SmiCompare(src, constant);
   1300   j(equal, on_equals, near_jump);
   1301 }
   1302 
   1303 
   1304 void MacroAssembler::JumpIfNotBothSmi(Register src1,
   1305                                       Register src2,
   1306                                       Label* on_not_both_smi,
   1307                                       Label::Distance near_jump) {
   1308   Condition both_smi = CheckBothSmi(src1, src2);
   1309   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
   1310 }
   1311 
   1312 
   1313 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
   1314                                                   Register src2,
   1315                                                   Label* on_not_both_smi,
   1316                                                   Label::Distance near_jump) {
   1317   Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
   1318   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
   1319 }
   1320 
   1321 
   1322 void MacroAssembler::SmiTryAddConstant(Register dst,
   1323                                        Register src,
   1324                                        Smi* constant,
   1325                                        Label* on_not_smi_result,
   1326                                        Label::Distance near_jump) {
   1327   // Does not assume that src is a smi.
   1328   ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
   1329   STATIC_ASSERT(kSmiTag == 0);
   1330   ASSERT(!dst.is(kScratchRegister));
   1331   ASSERT(!src.is(kScratchRegister));
   1332 
   1333   JumpIfNotSmi(src, on_not_smi_result, near_jump);
   1334   Register tmp = (dst.is(src) ? kScratchRegister : dst);
   1335   LoadSmiConstant(tmp, constant);
   1336   addq(tmp, src);
   1337   j(overflow, on_not_smi_result, near_jump);
   1338   if (dst.is(src)) {
   1339     movq(dst, tmp);
   1340   }
   1341 }
   1342 
   1343 
   1344 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
   1345   if (constant->value() == 0) {
   1346     if (!dst.is(src)) {
   1347       movq(dst, src);
   1348     }
   1349     return;
   1350   } else if (dst.is(src)) {
   1351     ASSERT(!dst.is(kScratchRegister));
   1352     switch (constant->value()) {
   1353       case 1:
   1354         addq(dst, kSmiConstantRegister);
   1355         return;
   1356       case 2:
   1357         lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
   1358         return;
   1359       case 4:
   1360         lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
   1361         return;
   1362       case 8:
   1363         lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
   1364         return;
   1365       default:
   1366         Register constant_reg = GetSmiConstant(constant);
   1367         addq(dst, constant_reg);
   1368         return;
   1369     }
   1370   } else {
   1371     switch (constant->value()) {
   1372       case 1:
   1373         lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
   1374         return;
   1375       case 2:
   1376         lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
   1377         return;
   1378       case 4:
   1379         lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
   1380         return;
   1381       case 8:
   1382         lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
   1383         return;
   1384       default:
   1385         LoadSmiConstant(dst, constant);
   1386         addq(dst, src);
   1387         return;
   1388     }
   1389   }
   1390 }
   1391 
   1392 
   1393 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
   1394   if (constant->value() != 0) {
   1395     addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
   1396   }
   1397 }
   1398 
   1399 
   1400 void MacroAssembler::SmiAddConstant(Register dst,
   1401                                     Register src,
   1402                                     Smi* constant,
   1403                                     Label* on_not_smi_result,
   1404                                     Label::Distance near_jump) {
   1405   if (constant->value() == 0) {
   1406     if (!dst.is(src)) {
   1407       movq(dst, src);
   1408     }
   1409   } else if (dst.is(src)) {
   1410     ASSERT(!dst.is(kScratchRegister));
   1411 
   1412     LoadSmiConstant(kScratchRegister, constant);
   1413     addq(kScratchRegister, src);
   1414     j(overflow, on_not_smi_result, near_jump);
   1415     movq(dst, kScratchRegister);
   1416   } else {
   1417     LoadSmiConstant(dst, constant);
   1418     addq(dst, src);
   1419     j(overflow, on_not_smi_result, near_jump);
   1420   }
   1421 }
   1422 
   1423 
   1424 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
   1425   if (constant->value() == 0) {
   1426     if (!dst.is(src)) {
   1427       movq(dst, src);
   1428     }
   1429   } else if (dst.is(src)) {
   1430     ASSERT(!dst.is(kScratchRegister));
   1431     Register constant_reg = GetSmiConstant(constant);
   1432     subq(dst, constant_reg);
   1433   } else {
   1434     if (constant->value() == Smi::kMinValue) {
   1435       LoadSmiConstant(dst, constant);
   1436       // Adding and subtracting the min-value gives the same result, it only
   1437       // differs on the overflow bit, which we don't check here.
   1438       addq(dst, src);
   1439     } else {
   1440       // Subtract by adding the negation.
   1441       LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
   1442       addq(dst, src);
   1443     }
   1444   }
   1445 }
   1446 
   1447 
   1448 void MacroAssembler::SmiSubConstant(Register dst,
   1449                                     Register src,
   1450                                     Smi* constant,
   1451                                     Label* on_not_smi_result,
   1452                                     Label::Distance near_jump) {
   1453   if (constant->value() == 0) {
   1454     if (!dst.is(src)) {
   1455       movq(dst, src);
   1456     }
   1457   } else if (dst.is(src)) {
   1458     ASSERT(!dst.is(kScratchRegister));
   1459     if (constant->value() == Smi::kMinValue) {
   1460       // Subtracting min-value from any non-negative value will overflow.
   1461       // We test the non-negativeness before doing the subtraction.
   1462       testq(src, src);
   1463       j(not_sign, on_not_smi_result, near_jump);
   1464       LoadSmiConstant(kScratchRegister, constant);
   1465       subq(dst, kScratchRegister);
   1466     } else {
   1467       // Subtract by adding the negation.
   1468       LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
   1469       addq(kScratchRegister, dst);
   1470       j(overflow, on_not_smi_result, near_jump);
   1471       movq(dst, kScratchRegister);
   1472     }
   1473   } else {
   1474     if (constant->value() == Smi::kMinValue) {
   1475       // Subtracting min-value from any non-negative value will overflow.
   1476       // We test the non-negativeness before doing the subtraction.
   1477       testq(src, src);
   1478       j(not_sign, on_not_smi_result, near_jump);
   1479       LoadSmiConstant(dst, constant);
   1480       // Adding and subtracting the min-value gives the same result, it only
   1481       // differs on the overflow bit, which we don't check here.
   1482       addq(dst, src);
   1483     } else {
   1484       // Subtract by adding the negation.
   1485       LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
   1486       addq(dst, src);
   1487       j(overflow, on_not_smi_result, near_jump);
   1488     }
   1489   }
   1490 }
   1491 
   1492 
   1493 void MacroAssembler::SmiNeg(Register dst,
   1494                             Register src,
   1495                             Label* on_smi_result,
   1496                             Label::Distance near_jump) {
   1497   if (dst.is(src)) {
   1498     ASSERT(!dst.is(kScratchRegister));
   1499     movq(kScratchRegister, src);
   1500     neg(dst);  // Low 32 bits are retained as zero by negation.
   1501     // Test if result is zero or Smi::kMinValue.
   1502     cmpq(dst, kScratchRegister);
   1503     j(not_equal, on_smi_result, near_jump);
   1504     movq(src, kScratchRegister);
   1505   } else {
   1506     movq(dst, src);
   1507     neg(dst);
   1508     cmpq(dst, src);
   1509     // If the result is zero or Smi::kMinValue, negation failed to create a smi.
   1510     j(not_equal, on_smi_result, near_jump);
   1511   }
   1512 }
   1513 
   1514 
   1515 void MacroAssembler::SmiAdd(Register dst,
   1516                             Register src1,
   1517                             Register src2,
   1518                             Label* on_not_smi_result,
   1519                             Label::Distance near_jump) {
   1520   ASSERT_NOT_NULL(on_not_smi_result);
   1521   ASSERT(!dst.is(src2));
   1522   if (dst.is(src1)) {
   1523     movq(kScratchRegister, src1);
   1524     addq(kScratchRegister, src2);
   1525     j(overflow, on_not_smi_result, near_jump);
   1526     movq(dst, kScratchRegister);
   1527   } else {
   1528     movq(dst, src1);
   1529     addq(dst, src2);
   1530     j(overflow, on_not_smi_result, near_jump);
   1531   }
   1532 }
   1533 
   1534 
   1535 void MacroAssembler::SmiAdd(Register dst,
   1536                             Register src1,
   1537                             const Operand& src2,
   1538                             Label* on_not_smi_result,
   1539                             Label::Distance near_jump) {
   1540   ASSERT_NOT_NULL(on_not_smi_result);
   1541   if (dst.is(src1)) {
   1542     movq(kScratchRegister, src1);
   1543     addq(kScratchRegister, src2);
   1544     j(overflow, on_not_smi_result, near_jump);
   1545     movq(dst, kScratchRegister);
   1546   } else {
   1547     ASSERT(!src2.AddressUsesRegister(dst));
   1548     movq(dst, src1);
   1549     addq(dst, src2);
   1550     j(overflow, on_not_smi_result, near_jump);
   1551   }
   1552 }
   1553 
   1554 
   1555 void MacroAssembler::SmiAdd(Register dst,
   1556                             Register src1,
   1557                             Register src2) {
   1558   // No overflow checking. Use only when it's known that
   1559   // overflowing is impossible.
   1560   if (!dst.is(src1)) {
   1561     if (emit_debug_code()) {
   1562       movq(kScratchRegister, src1);
   1563       addq(kScratchRegister, src2);
   1564       Check(no_overflow, "Smi addition overflow");
   1565     }
   1566     lea(dst, Operand(src1, src2, times_1, 0));
   1567   } else {
   1568     addq(dst, src2);
   1569     Assert(no_overflow, "Smi addition overflow");
   1570   }
   1571 }
   1572 
   1573 
   1574 void MacroAssembler::SmiSub(Register dst,
   1575                             Register src1,
   1576                             Register src2,
   1577                             Label* on_not_smi_result,
   1578                             Label::Distance near_jump) {
   1579   ASSERT_NOT_NULL(on_not_smi_result);
   1580   ASSERT(!dst.is(src2));
   1581   if (dst.is(src1)) {
   1582     cmpq(dst, src2);
   1583     j(overflow, on_not_smi_result, near_jump);
   1584     subq(dst, src2);
   1585   } else {
   1586     movq(dst, src1);
   1587     subq(dst, src2);
   1588     j(overflow, on_not_smi_result, near_jump);
   1589   }
   1590 }
   1591 
   1592 
   1593 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
   1594   // No overflow checking. Use only when it's known that
   1595   // overflowing is impossible (e.g., subtracting two positive smis).
   1596   ASSERT(!dst.is(src2));
   1597   if (!dst.is(src1)) {
   1598     movq(dst, src1);
   1599   }
   1600   subq(dst, src2);
   1601   Assert(no_overflow, "Smi subtraction overflow");
   1602 }
   1603 
   1604 
   1605 void MacroAssembler::SmiSub(Register dst,
   1606                             Register src1,
   1607                             const Operand& src2,
   1608                             Label* on_not_smi_result,
   1609                             Label::Distance near_jump) {
   1610   ASSERT_NOT_NULL(on_not_smi_result);
   1611   if (dst.is(src1)) {
   1612     movq(kScratchRegister, src2);
   1613     cmpq(src1, kScratchRegister);
   1614     j(overflow, on_not_smi_result, near_jump);
   1615     subq(src1, kScratchRegister);
   1616   } else {
   1617     movq(dst, src1);
   1618     subq(dst, src2);
   1619     j(overflow, on_not_smi_result, near_jump);
   1620   }
   1621 }
   1622 
   1623 
   1624 void MacroAssembler::SmiSub(Register dst,
   1625                             Register src1,
   1626                             const Operand& src2) {
   1627   // No overflow checking. Use only when it's known that
   1628   // overflowing is impossible (e.g., subtracting two positive smis).
   1629   if (!dst.is(src1)) {
   1630     movq(dst, src1);
   1631   }
   1632   subq(dst, src2);
   1633   Assert(no_overflow, "Smi subtraction overflow");
   1634 }
   1635 
   1636 
   1637 void MacroAssembler::SmiMul(Register dst,
   1638                             Register src1,
   1639                             Register src2,
   1640                             Label* on_not_smi_result,
   1641                             Label::Distance near_jump) {
   1642   ASSERT(!dst.is(src2));
   1643   ASSERT(!dst.is(kScratchRegister));
   1644   ASSERT(!src1.is(kScratchRegister));
   1645   ASSERT(!src2.is(kScratchRegister));
   1646 
   1647   if (dst.is(src1)) {
   1648     Label failure, zero_correct_result;
   1649     movq(kScratchRegister, src1);  // Create backup for later testing.
   1650     SmiToInteger64(dst, src1);
   1651     imul(dst, src2);
   1652     j(overflow, &failure, Label::kNear);
   1653 
   1654     // Check for negative zero result.  If product is zero, and one
   1655     // argument is negative, go to slow case.
   1656     Label correct_result;
   1657     testq(dst, dst);
   1658     j(not_zero, &correct_result, Label::kNear);
   1659 
   1660     movq(dst, kScratchRegister);
   1661     xor_(dst, src2);
   1662     // Result was positive zero.
   1663     j(positive, &zero_correct_result, Label::kNear);
   1664 
   1665     bind(&failure);  // Reused failure exit, restores src1.
   1666     movq(src1, kScratchRegister);
   1667     jmp(on_not_smi_result, near_jump);
   1668 
   1669     bind(&zero_correct_result);
   1670     Set(dst, 0);
   1671 
   1672     bind(&correct_result);
   1673   } else {
   1674     SmiToInteger64(dst, src1);
   1675     imul(dst, src2);
   1676     j(overflow, on_not_smi_result, near_jump);
   1677     // Check for negative zero result.  If product is zero, and one
   1678     // argument is negative, go to slow case.
   1679     Label correct_result;
   1680     testq(dst, dst);
   1681     j(not_zero, &correct_result, Label::kNear);
   1682     // One of src1 and src2 is zero, the check whether the other is
   1683     // negative.
   1684     movq(kScratchRegister, src1);
   1685     xor_(kScratchRegister, src2);
   1686     j(negative, on_not_smi_result, near_jump);
   1687     bind(&correct_result);
   1688   }
   1689 }
   1690 
   1691 
   1692 void MacroAssembler::SmiDiv(Register dst,
   1693                             Register src1,
   1694                             Register src2,
   1695                             Label* on_not_smi_result,
   1696                             Label::Distance near_jump) {
   1697   ASSERT(!src1.is(kScratchRegister));
   1698   ASSERT(!src2.is(kScratchRegister));
   1699   ASSERT(!dst.is(kScratchRegister));
   1700   ASSERT(!src2.is(rax));
   1701   ASSERT(!src2.is(rdx));
   1702   ASSERT(!src1.is(rdx));
   1703 
   1704   // Check for 0 divisor (result is +/-Infinity).
   1705   testq(src2, src2);
   1706   j(zero, on_not_smi_result, near_jump);
   1707 
   1708   if (src1.is(rax)) {
   1709     movq(kScratchRegister, src1);
   1710   }
   1711   SmiToInteger32(rax, src1);
   1712   // We need to rule out dividing Smi::kMinValue by -1, since that would
   1713   // overflow in idiv and raise an exception.
   1714   // We combine this with negative zero test (negative zero only happens
   1715   // when dividing zero by a negative number).
   1716 
   1717   // We overshoot a little and go to slow case if we divide min-value
   1718   // by any negative value, not just -1.
   1719   Label safe_div;
   1720   testl(rax, Immediate(0x7fffffff));
   1721   j(not_zero, &safe_div, Label::kNear);
   1722   testq(src2, src2);
   1723   if (src1.is(rax)) {
   1724     j(positive, &safe_div, Label::kNear);
   1725     movq(src1, kScratchRegister);
   1726     jmp(on_not_smi_result, near_jump);
   1727   } else {
   1728     j(negative, on_not_smi_result, near_jump);
   1729   }
   1730   bind(&safe_div);
   1731 
   1732   SmiToInteger32(src2, src2);
   1733   // Sign extend src1 into edx:eax.
   1734   cdq();
   1735   idivl(src2);
   1736   Integer32ToSmi(src2, src2);
   1737   // Check that the remainder is zero.
   1738   testl(rdx, rdx);
   1739   if (src1.is(rax)) {
   1740     Label smi_result;
   1741     j(zero, &smi_result, Label::kNear);
   1742     movq(src1, kScratchRegister);
   1743     jmp(on_not_smi_result, near_jump);
   1744     bind(&smi_result);
   1745   } else {
   1746     j(not_zero, on_not_smi_result, near_jump);
   1747   }
   1748   if (!dst.is(src1) && src1.is(rax)) {
   1749     movq(src1, kScratchRegister);
   1750   }
   1751   Integer32ToSmi(dst, rax);
   1752 }
   1753 
   1754 
   1755 void MacroAssembler::SmiMod(Register dst,
   1756                             Register src1,
   1757                             Register src2,
   1758                             Label* on_not_smi_result,
   1759                             Label::Distance near_jump) {
   1760   ASSERT(!dst.is(kScratchRegister));
   1761   ASSERT(!src1.is(kScratchRegister));
   1762   ASSERT(!src2.is(kScratchRegister));
   1763   ASSERT(!src2.is(rax));
   1764   ASSERT(!src2.is(rdx));
   1765   ASSERT(!src1.is(rdx));
   1766   ASSERT(!src1.is(src2));
   1767 
   1768   testq(src2, src2);
   1769   j(zero, on_not_smi_result, near_jump);
   1770 
   1771   if (src1.is(rax)) {
   1772     movq(kScratchRegister, src1);
   1773   }
   1774   SmiToInteger32(rax, src1);
   1775   SmiToInteger32(src2, src2);
   1776 
   1777   // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
   1778   Label safe_div;
   1779   cmpl(rax, Immediate(Smi::kMinValue));
   1780   j(not_equal, &safe_div, Label::kNear);
   1781   cmpl(src2, Immediate(-1));
   1782   j(not_equal, &safe_div, Label::kNear);
   1783   // Retag inputs and go slow case.
   1784   Integer32ToSmi(src2, src2);
   1785   if (src1.is(rax)) {
   1786     movq(src1, kScratchRegister);
   1787   }
   1788   jmp(on_not_smi_result, near_jump);
   1789   bind(&safe_div);
   1790 
   1791   // Sign extend eax into edx:eax.
   1792   cdq();
   1793   idivl(src2);
   1794   // Restore smi tags on inputs.
   1795   Integer32ToSmi(src2, src2);
   1796   if (src1.is(rax)) {
   1797     movq(src1, kScratchRegister);
   1798   }
   1799   // Check for a negative zero result.  If the result is zero, and the
   1800   // dividend is negative, go slow to return a floating point negative zero.
   1801   Label smi_result;
   1802   testl(rdx, rdx);
   1803   j(not_zero, &smi_result, Label::kNear);
   1804   testq(src1, src1);
   1805   j(negative, on_not_smi_result, near_jump);
   1806   bind(&smi_result);
   1807   Integer32ToSmi(dst, rdx);
   1808 }
   1809 
   1810 
   1811 void MacroAssembler::SmiNot(Register dst, Register src) {
   1812   ASSERT(!dst.is(kScratchRegister));
   1813   ASSERT(!src.is(kScratchRegister));
   1814   // Set tag and padding bits before negating, so that they are zero afterwards.
   1815   movl(kScratchRegister, Immediate(~0));
   1816   if (dst.is(src)) {
   1817     xor_(dst, kScratchRegister);
   1818   } else {
   1819     lea(dst, Operand(src, kScratchRegister, times_1, 0));
   1820   }
   1821   not_(dst);
   1822 }
   1823 
   1824 
   1825 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
   1826   ASSERT(!dst.is(src2));
   1827   if (!dst.is(src1)) {
   1828     movq(dst, src1);
   1829   }
   1830   and_(dst, src2);
   1831 }
   1832 
   1833 
   1834 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
   1835   if (constant->value() == 0) {
   1836     Set(dst, 0);
   1837   } else if (dst.is(src)) {
   1838     ASSERT(!dst.is(kScratchRegister));
   1839     Register constant_reg = GetSmiConstant(constant);
   1840     and_(dst, constant_reg);
   1841   } else {
   1842     LoadSmiConstant(dst, constant);
   1843     and_(dst, src);
   1844   }
   1845 }
   1846 
   1847 
   1848 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   1849   if (!dst.is(src1)) {
   1850     ASSERT(!src1.is(src2));
   1851     movq(dst, src1);
   1852   }
   1853   or_(dst, src2);
   1854 }
   1855 
   1856 
   1857 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
   1858   if (dst.is(src)) {
   1859     ASSERT(!dst.is(kScratchRegister));
   1860     Register constant_reg = GetSmiConstant(constant);
   1861     or_(dst, constant_reg);
   1862   } else {
   1863     LoadSmiConstant(dst, constant);
   1864     or_(dst, src);
   1865   }
   1866 }
   1867 
   1868 
   1869 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   1870   if (!dst.is(src1)) {
   1871     ASSERT(!src1.is(src2));
   1872     movq(dst, src1);
   1873   }
   1874   xor_(dst, src2);
   1875 }
   1876 
   1877 
   1878 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
   1879   if (dst.is(src)) {
   1880     ASSERT(!dst.is(kScratchRegister));
   1881     Register constant_reg = GetSmiConstant(constant);
   1882     xor_(dst, constant_reg);
   1883   } else {
   1884     LoadSmiConstant(dst, constant);
   1885     xor_(dst, src);
   1886   }
   1887 }
   1888 
   1889 
   1890 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
   1891                                                      Register src,
   1892                                                      int shift_value) {
   1893   ASSERT(is_uint5(shift_value));
   1894   if (shift_value > 0) {
   1895     if (dst.is(src)) {
   1896       sar(dst, Immediate(shift_value + kSmiShift));
   1897       shl(dst, Immediate(kSmiShift));
   1898     } else {
   1899       UNIMPLEMENTED();  // Not used.
   1900     }
   1901   }
   1902 }
   1903 
   1904 
   1905 void MacroAssembler::SmiShiftLeftConstant(Register dst,
   1906                                           Register src,
   1907                                           int shift_value) {
   1908   if (!dst.is(src)) {
   1909     movq(dst, src);
   1910   }
   1911   if (shift_value > 0) {
   1912     shl(dst, Immediate(shift_value));
   1913   }
   1914 }
   1915 
   1916 
   1917 void MacroAssembler::SmiShiftLogicalRightConstant(
   1918     Register dst, Register src, int shift_value,
   1919     Label* on_not_smi_result, Label::Distance near_jump) {
   1920   // Logic right shift interprets its result as an *unsigned* number.
   1921   if (dst.is(src)) {
   1922     UNIMPLEMENTED();  // Not used.
   1923   } else {
   1924     movq(dst, src);
   1925     if (shift_value == 0) {
   1926       testq(dst, dst);
   1927       j(negative, on_not_smi_result, near_jump);
   1928     }
   1929     shr(dst, Immediate(shift_value + kSmiShift));
   1930     shl(dst, Immediate(kSmiShift));
   1931   }
   1932 }
   1933 
   1934 
   1935 void MacroAssembler::SmiShiftLeft(Register dst,
   1936                                   Register src1,
   1937                                   Register src2) {
   1938   ASSERT(!dst.is(rcx));
   1939   // Untag shift amount.
   1940   if (!dst.is(src1)) {
   1941     movq(dst, src1);
   1942   }
   1943   SmiToInteger32(rcx, src2);
   1944   // Shift amount specified by lower 5 bits, not six as the shl opcode.
   1945   and_(rcx, Immediate(0x1f));
   1946   shl_cl(dst);
   1947 }
   1948 
   1949 
   1950 void MacroAssembler::SmiShiftLogicalRight(Register dst,
   1951                                           Register src1,
   1952                                           Register src2,
   1953                                           Label* on_not_smi_result,
   1954                                           Label::Distance near_jump) {
   1955   ASSERT(!dst.is(kScratchRegister));
   1956   ASSERT(!src1.is(kScratchRegister));
   1957   ASSERT(!src2.is(kScratchRegister));
   1958   ASSERT(!dst.is(rcx));
   1959   // dst and src1 can be the same, because the one case that bails out
   1960   // is a shift by 0, which leaves dst, and therefore src1, unchanged.
   1961   if (src1.is(rcx) || src2.is(rcx)) {
   1962     movq(kScratchRegister, rcx);
   1963   }
   1964   if (!dst.is(src1)) {
   1965     movq(dst, src1);
   1966   }
   1967   SmiToInteger32(rcx, src2);
   1968   orl(rcx, Immediate(kSmiShift));
   1969   shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
   1970   shl(dst, Immediate(kSmiShift));
   1971   testq(dst, dst);
   1972   if (src1.is(rcx) || src2.is(rcx)) {
   1973     Label positive_result;
   1974     j(positive, &positive_result, Label::kNear);
   1975     if (src1.is(rcx)) {
   1976       movq(src1, kScratchRegister);
   1977     } else {
   1978       movq(src2, kScratchRegister);
   1979     }
   1980     jmp(on_not_smi_result, near_jump);
   1981     bind(&positive_result);
   1982   } else {
   1983     // src2 was zero and src1 negative.
   1984     j(negative, on_not_smi_result, near_jump);
   1985   }
   1986 }
   1987 
   1988 
   1989 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
   1990                                              Register src1,
   1991                                              Register src2) {
   1992   ASSERT(!dst.is(kScratchRegister));
   1993   ASSERT(!src1.is(kScratchRegister));
   1994   ASSERT(!src2.is(kScratchRegister));
   1995   ASSERT(!dst.is(rcx));
   1996   if (src1.is(rcx)) {
   1997     movq(kScratchRegister, src1);
   1998   } else if (src2.is(rcx)) {
   1999     movq(kScratchRegister, src2);
   2000   }
   2001   if (!dst.is(src1)) {
   2002     movq(dst, src1);
   2003   }
   2004   SmiToInteger32(rcx, src2);
   2005   orl(rcx, Immediate(kSmiShift));
   2006   sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
   2007   shl(dst, Immediate(kSmiShift));
   2008   if (src1.is(rcx)) {
   2009     movq(src1, kScratchRegister);
   2010   } else if (src2.is(rcx)) {
   2011     movq(src2, kScratchRegister);
   2012   }
   2013 }
   2014 
   2015 
   2016 void MacroAssembler::SelectNonSmi(Register dst,
   2017                                   Register src1,
   2018                                   Register src2,
   2019                                   Label* on_not_smis,
   2020                                   Label::Distance near_jump) {
   2021   ASSERT(!dst.is(kScratchRegister));
   2022   ASSERT(!src1.is(kScratchRegister));
   2023   ASSERT(!src2.is(kScratchRegister));
   2024   ASSERT(!dst.is(src1));
   2025   ASSERT(!dst.is(src2));
   2026   // Both operands must not be smis.
   2027 #ifdef DEBUG
   2028   if (allow_stub_calls()) {  // Check contains a stub call.
   2029     Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
   2030     Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
   2031   }
   2032 #endif
   2033   STATIC_ASSERT(kSmiTag == 0);
   2034   ASSERT_EQ(0, Smi::FromInt(0));
   2035   movl(kScratchRegister, Immediate(kSmiTagMask));
   2036   and_(kScratchRegister, src1);
   2037   testl(kScratchRegister, src2);
   2038   // If non-zero then both are smis.
   2039   j(not_zero, on_not_smis, near_jump);
   2040 
   2041   // Exactly one operand is a smi.
   2042   ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
   2043   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
   2044   subq(kScratchRegister, Immediate(1));
   2045   // If src1 is a smi, then scratch register all 1s, else it is all 0s.
   2046   movq(dst, src1);
   2047   xor_(dst, src2);
   2048   and_(dst, kScratchRegister);
   2049   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
   2050   xor_(dst, src1);
   2051   // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
   2052 }
   2053 
   2054 
   2055 SmiIndex MacroAssembler::SmiToIndex(Register dst,
   2056                                     Register src,
   2057                                     int shift) {
   2058   ASSERT(is_uint6(shift));
   2059   // There is a possible optimization if shift is in the range 60-63, but that
   2060   // will (and must) never happen.
   2061   if (!dst.is(src)) {
   2062     movq(dst, src);
   2063   }
   2064   if (shift < kSmiShift) {
   2065     sar(dst, Immediate(kSmiShift - shift));
   2066   } else {
   2067     shl(dst, Immediate(shift - kSmiShift));
   2068   }
   2069   return SmiIndex(dst, times_1);
   2070 }
   2071 
   2072 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
   2073                                             Register src,
   2074                                             int shift) {
   2075   // Register src holds a positive smi.
   2076   ASSERT(is_uint6(shift));
   2077   if (!dst.is(src)) {
   2078     movq(dst, src);
   2079   }
   2080   neg(dst);
   2081   if (shift < kSmiShift) {
   2082     sar(dst, Immediate(kSmiShift - shift));
   2083   } else {
   2084     shl(dst, Immediate(shift - kSmiShift));
   2085   }
   2086   return SmiIndex(dst, times_1);
   2087 }
   2088 
   2089 
   2090 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
   2091   ASSERT_EQ(0, kSmiShift % kBitsPerByte);
   2092   addl(dst, Operand(src, kSmiShift / kBitsPerByte));
   2093 }
   2094 
   2095 
   2096 void MacroAssembler::JumpIfNotString(Register object,
   2097                                      Register object_map,
   2098                                      Label* not_string,
   2099                                      Label::Distance near_jump) {
   2100   Condition is_smi = CheckSmi(object);
   2101   j(is_smi, not_string, near_jump);
   2102   CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
   2103   j(above_equal, not_string, near_jump);
   2104 }
   2105 
   2106 
   2107 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
   2108     Register first_object,
   2109     Register second_object,
   2110     Register scratch1,
   2111     Register scratch2,
   2112     Label* on_fail,
   2113     Label::Distance near_jump) {
   2114   // Check that both objects are not smis.
   2115   Condition either_smi = CheckEitherSmi(first_object, second_object);
   2116   j(either_smi, on_fail, near_jump);
   2117 
   2118   // Load instance type for both strings.
   2119   movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
   2120   movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
   2121   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   2122   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
   2123 
   2124   // Check that both are flat ASCII strings.
   2125   ASSERT(kNotStringTag != 0);
   2126   const int kFlatAsciiStringMask =
   2127       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2128   const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
   2129 
   2130   andl(scratch1, Immediate(kFlatAsciiStringMask));
   2131   andl(scratch2, Immediate(kFlatAsciiStringMask));
   2132   // Interleave the bits to check both scratch1 and scratch2 in one test.
   2133   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
   2134   lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
   2135   cmpl(scratch1,
   2136        Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
   2137   j(not_equal, on_fail, near_jump);
   2138 }
   2139 
   2140 
   2141 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
   2142     Register instance_type,
   2143     Register scratch,
   2144     Label* failure,
   2145     Label::Distance near_jump) {
   2146   if (!scratch.is(instance_type)) {
   2147     movl(scratch, instance_type);
   2148   }
   2149 
   2150   const int kFlatAsciiStringMask =
   2151       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2152 
   2153   andl(scratch, Immediate(kFlatAsciiStringMask));
   2154   cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
   2155   j(not_equal, failure, near_jump);
   2156 }
   2157 
   2158 
   2159 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
   2160     Register first_object_instance_type,
   2161     Register second_object_instance_type,
   2162     Register scratch1,
   2163     Register scratch2,
   2164     Label* on_fail,
   2165     Label::Distance near_jump) {
   2166   // Load instance type for both strings.
   2167   movq(scratch1, first_object_instance_type);
   2168   movq(scratch2, second_object_instance_type);
   2169 
   2170   // Check that both are flat ASCII strings.
   2171   ASSERT(kNotStringTag != 0);
   2172   const int kFlatAsciiStringMask =
   2173       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2174   const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
   2175 
   2176   andl(scratch1, Immediate(kFlatAsciiStringMask));
   2177   andl(scratch2, Immediate(kFlatAsciiStringMask));
   2178   // Interleave the bits to check both scratch1 and scratch2 in one test.
   2179   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
   2180   lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
   2181   cmpl(scratch1,
   2182        Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
   2183   j(not_equal, on_fail, near_jump);
   2184 }
   2185 
   2186 
   2187 
   2188 void MacroAssembler::Move(Register dst, Register src) {
   2189   if (!dst.is(src)) {
   2190     movq(dst, src);
   2191   }
   2192 }
   2193 
   2194 
   2195 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   2196   ASSERT(!source->IsFailure());
   2197   if (source->IsSmi()) {
   2198     Move(dst, Smi::cast(*source));
   2199   } else {
   2200     movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
   2201   }
   2202 }
   2203 
   2204 
   2205 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
   2206   ASSERT(!source->IsFailure());
   2207   if (source->IsSmi()) {
   2208     Move(dst, Smi::cast(*source));
   2209   } else {
   2210     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   2211     movq(dst, kScratchRegister);
   2212   }
   2213 }
   2214 
   2215 
   2216 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
   2217   if (source->IsSmi()) {
   2218     Cmp(dst, Smi::cast(*source));
   2219   } else {
   2220     Move(kScratchRegister, source);
   2221     cmpq(dst, kScratchRegister);
   2222   }
   2223 }
   2224 
   2225 
   2226 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
   2227   if (source->IsSmi()) {
   2228     Cmp(dst, Smi::cast(*source));
   2229   } else {
   2230     ASSERT(source->IsHeapObject());
   2231     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   2232     cmpq(dst, kScratchRegister);
   2233   }
   2234 }
   2235 
   2236 
   2237 void MacroAssembler::Push(Handle<Object> source) {
   2238   if (source->IsSmi()) {
   2239     Push(Smi::cast(*source));
   2240   } else {
   2241     ASSERT(source->IsHeapObject());
   2242     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
   2243     push(kScratchRegister);
   2244   }
   2245 }
   2246 
   2247 
   2248 void MacroAssembler::LoadHeapObject(Register result,
   2249                                     Handle<HeapObject> object) {
   2250   if (isolate()->heap()->InNewSpace(*object)) {
   2251     Handle<JSGlobalPropertyCell> cell =
   2252         isolate()->factory()->NewJSGlobalPropertyCell(object);
   2253     movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
   2254     movq(result, Operand(result, 0));
   2255   } else {
   2256     Move(result, object);
   2257   }
   2258 }
   2259 
   2260 
   2261 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
   2262   if (isolate()->heap()->InNewSpace(*object)) {
   2263     Handle<JSGlobalPropertyCell> cell =
   2264         isolate()->factory()->NewJSGlobalPropertyCell(object);
   2265     movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
   2266     movq(kScratchRegister, Operand(kScratchRegister, 0));
   2267     push(kScratchRegister);
   2268   } else {
   2269     Push(object);
   2270   }
   2271 }
   2272 
   2273 
   2274 void MacroAssembler::LoadGlobalCell(Register dst,
   2275                                     Handle<JSGlobalPropertyCell> cell) {
   2276   if (dst.is(rax)) {
   2277     load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
   2278   } else {
   2279     movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
   2280     movq(dst, Operand(dst, 0));
   2281   }
   2282 }
   2283 
   2284 
   2285 void MacroAssembler::Push(Smi* source) {
   2286   intptr_t smi = reinterpret_cast<intptr_t>(source);
   2287   if (is_int32(smi)) {
   2288     push(Immediate(static_cast<int32_t>(smi)));
   2289   } else {
   2290     Register constant = GetSmiConstant(source);
   2291     push(constant);
   2292   }
   2293 }
   2294 
   2295 
   2296 void MacroAssembler::Drop(int stack_elements) {
   2297   if (stack_elements > 0) {
   2298     addq(rsp, Immediate(stack_elements * kPointerSize));
   2299   }
   2300 }
   2301 
   2302 
   2303 void MacroAssembler::Test(const Operand& src, Smi* source) {
   2304   testl(Operand(src, kIntSize), Immediate(source->value()));
   2305 }
   2306 
   2307 
   2308 void MacroAssembler::TestBit(const Operand& src, int bits) {
   2309   int byte_offset = bits / kBitsPerByte;
   2310   int bit_in_byte = bits & (kBitsPerByte - 1);
   2311   testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
   2312 }
   2313 
   2314 
   2315 void MacroAssembler::Jump(ExternalReference ext) {
   2316   LoadAddress(kScratchRegister, ext);
   2317   jmp(kScratchRegister);
   2318 }
   2319 
   2320 
   2321 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
   2322   movq(kScratchRegister, destination, rmode);
   2323   jmp(kScratchRegister);
   2324 }
   2325 
   2326 
   2327 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
   2328   // TODO(X64): Inline this
   2329   jmp(code_object, rmode);
   2330 }
   2331 
   2332 
   2333 int MacroAssembler::CallSize(ExternalReference ext) {
   2334   // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
   2335   const int kCallInstructionSize = 3;
   2336   return LoadAddressSize(ext) + kCallInstructionSize;
   2337 }
   2338 
   2339 
   2340 void MacroAssembler::Call(ExternalReference ext) {
   2341 #ifdef DEBUG
   2342   int end_position = pc_offset() + CallSize(ext);
   2343 #endif
   2344   LoadAddress(kScratchRegister, ext);
   2345   call(kScratchRegister);
   2346 #ifdef DEBUG
   2347   CHECK_EQ(end_position, pc_offset());
   2348 #endif
   2349 }
   2350 
   2351 
   2352 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
   2353 #ifdef DEBUG
   2354   int end_position = pc_offset() + CallSize(destination, rmode);
   2355 #endif
   2356   movq(kScratchRegister, destination, rmode);
   2357   call(kScratchRegister);
   2358 #ifdef DEBUG
   2359   CHECK_EQ(pc_offset(), end_position);
   2360 #endif
   2361 }
   2362 
   2363 
   2364 void MacroAssembler::Call(Handle<Code> code_object,
   2365                           RelocInfo::Mode rmode,
   2366                           unsigned ast_id) {
   2367 #ifdef DEBUG
   2368   int end_position = pc_offset() + CallSize(code_object);
   2369 #endif
   2370   ASSERT(RelocInfo::IsCodeTarget(rmode));
   2371   call(code_object, rmode, ast_id);
   2372 #ifdef DEBUG
   2373   CHECK_EQ(end_position, pc_offset());
   2374 #endif
   2375 }
   2376 
   2377 
   2378 void MacroAssembler::Pushad() {
   2379   push(rax);
   2380   push(rcx);
   2381   push(rdx);
   2382   push(rbx);
   2383   // Not pushing rsp or rbp.
   2384   push(rsi);
   2385   push(rdi);
   2386   push(r8);
   2387   push(r9);
   2388   // r10 is kScratchRegister.
   2389   push(r11);
   2390   // r12 is kSmiConstantRegister.
   2391   // r13 is kRootRegister.
   2392   push(r14);
   2393   push(r15);
   2394   STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
   2395   // Use lea for symmetry with Popad.
   2396   int sp_delta =
   2397       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
   2398   lea(rsp, Operand(rsp, -sp_delta));
   2399 }
   2400 
   2401 
   2402 void MacroAssembler::Popad() {
   2403   // Popad must not change the flags, so use lea instead of addq.
   2404   int sp_delta =
   2405       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
   2406   lea(rsp, Operand(rsp, sp_delta));
   2407   pop(r15);
   2408   pop(r14);
   2409   pop(r11);
   2410   pop(r9);
   2411   pop(r8);
   2412   pop(rdi);
   2413   pop(rsi);
   2414   pop(rbx);
   2415   pop(rdx);
   2416   pop(rcx);
   2417   pop(rax);
   2418 }
   2419 
   2420 
   2421 void MacroAssembler::Dropad() {
   2422   addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
   2423 }
   2424 
   2425 
   2426 // Order general registers are pushed by Pushad:
   2427 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
   2428 const int
   2429 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
   2430     0,
   2431     1,
   2432     2,
   2433     3,
   2434     -1,
   2435     -1,
   2436     4,
   2437     5,
   2438     6,
   2439     7,
   2440     -1,
   2441     8,
   2442     -1,
   2443     -1,
   2444     9,
   2445     10
   2446 };
   2447 
   2448 
   2449 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
   2450   movq(SafepointRegisterSlot(dst), src);
   2451 }
   2452 
   2453 
   2454 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
   2455   movq(dst, SafepointRegisterSlot(src));
   2456 }
   2457 
   2458 
   2459 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
   2460   return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
   2461 }
   2462 
   2463 
   2464 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
   2465                                     int handler_index) {
   2466   // Adjust this code if not the case.
   2467   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   2468   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   2469   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   2470   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   2471   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   2472   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   2473 
   2474   // We will build up the handler from the bottom by pushing on the stack.
   2475   // First push the frame pointer and context.
   2476   if (kind == StackHandler::JS_ENTRY) {
   2477     // The frame pointer does not point to a JS frame so we save NULL for
   2478     // rbp. We expect the code throwing an exception to check rbp before
   2479     // dereferencing it to restore the context.
   2480     push(Immediate(0));  // NULL frame pointer.
   2481     Push(Smi::FromInt(0));  // No context.
   2482   } else {
   2483     push(rbp);
   2484     push(rsi);
   2485   }
   2486 
   2487   // Push the state and the code object.
   2488   unsigned state =
   2489       StackHandler::IndexField::encode(handler_index) |
   2490       StackHandler::KindField::encode(kind);
   2491   push(Immediate(state));
   2492   Push(CodeObject());
   2493 
   2494   // Link the current handler as the next handler.
   2495   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   2496   push(ExternalOperand(handler_address));
   2497   // Set this new handler as the current one.
   2498   movq(ExternalOperand(handler_address), rsp);
   2499 }
   2500 
   2501 
   2502 void MacroAssembler::PopTryHandler() {
   2503   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   2504   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   2505   pop(ExternalOperand(handler_address));
   2506   addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
   2507 }
   2508 
   2509 
   2510 void MacroAssembler::JumpToHandlerEntry() {
   2511   // Compute the handler entry address and jump to it.  The handler table is
   2512   // a fixed array of (smi-tagged) code offsets.
   2513   // rax = exception, rdi = code object, rdx = state.
   2514   movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
   2515   shr(rdx, Immediate(StackHandler::kKindWidth));
   2516   movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
   2517   SmiToInteger64(rdx, rdx);
   2518   lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
   2519   jmp(rdi);
   2520 }
   2521 
   2522 
   2523 void MacroAssembler::Throw(Register value) {
   2524   // Adjust this code if not the case.
   2525   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   2526   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   2527   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   2528   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   2529   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   2530   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   2531 
   2532   // The exception is expected in rax.
   2533   if (!value.is(rax)) {
   2534     movq(rax, value);
   2535   }
   2536   // Drop the stack pointer to the top of the top handler.
   2537   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   2538   movq(rsp, ExternalOperand(handler_address));
   2539   // Restore the next handler.
   2540   pop(ExternalOperand(handler_address));
   2541 
   2542   // Remove the code object and state, compute the handler address in rdi.
   2543   pop(rdi);  // Code object.
   2544   pop(rdx);  // Offset and state.
   2545 
   2546   // Restore the context and frame pointer.
   2547   pop(rsi);  // Context.
   2548   pop(rbp);  // Frame pointer.
   2549 
   2550   // If the handler is a JS frame, restore the context to the frame.
   2551   // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
   2552   // rbp or rsi.
   2553   Label skip;
   2554   testq(rsi, rsi);
   2555   j(zero, &skip, Label::kNear);
   2556   movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
   2557   bind(&skip);
   2558 
   2559   JumpToHandlerEntry();
   2560 }
   2561 
   2562 
   2563 void MacroAssembler::ThrowUncatchable(Register value) {
   2564   // Adjust this code if not the case.
   2565   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   2566   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   2567   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   2568   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   2569   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   2570   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   2571 
   2572   // The exception is expected in rax.
   2573   if (!value.is(rax)) {
   2574     movq(rax, value);
   2575   }
   2576   // Drop the stack pointer to the top of the top stack handler.
   2577   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   2578   Load(rsp, handler_address);
   2579 
   2580   // Unwind the handlers until the top ENTRY handler is found.
   2581   Label fetch_next, check_kind;
   2582   jmp(&check_kind, Label::kNear);
   2583   bind(&fetch_next);
   2584   movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
   2585 
   2586   bind(&check_kind);
   2587   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
   2588   testl(Operand(rsp, StackHandlerConstants::kStateOffset),
   2589         Immediate(StackHandler::KindField::kMask));
   2590   j(not_zero, &fetch_next);
   2591 
   2592   // Set the top handler address to next handler past the top ENTRY handler.
   2593   pop(ExternalOperand(handler_address));
   2594 
   2595   // Remove the code object and state, compute the handler address in rdi.
   2596   pop(rdi);  // Code object.
   2597   pop(rdx);  // Offset and state.
   2598 
   2599   // Clear the context pointer and frame pointer (0 was saved in the handler).
   2600   pop(rsi);
   2601   pop(rbp);
   2602 
   2603   JumpToHandlerEntry();
   2604 }
   2605 
   2606 
   2607 void MacroAssembler::Ret() {
   2608   ret(0);
   2609 }
   2610 
   2611 
   2612 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
   2613   if (is_uint16(bytes_dropped)) {
   2614     ret(bytes_dropped);
   2615   } else {
   2616     pop(scratch);
   2617     addq(rsp, Immediate(bytes_dropped));
   2618     push(scratch);
   2619     ret(0);
   2620   }
   2621 }
   2622 
   2623 
   2624 void MacroAssembler::FCmp() {
   2625   fucomip();
   2626   fstp(0);
   2627 }
   2628 
   2629 
   2630 void MacroAssembler::CmpObjectType(Register heap_object,
   2631                                    InstanceType type,
   2632                                    Register map) {
   2633   movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   2634   CmpInstanceType(map, type);
   2635 }
   2636 
   2637 
   2638 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
   2639   cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
   2640        Immediate(static_cast<int8_t>(type)));
   2641 }
   2642 
   2643 
   2644 void MacroAssembler::CheckFastElements(Register map,
   2645                                        Label* fail,
   2646                                        Label::Distance distance) {
   2647   STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
   2648   STATIC_ASSERT(FAST_ELEMENTS == 1);
   2649   cmpb(FieldOperand(map, Map::kBitField2Offset),
   2650        Immediate(Map::kMaximumBitField2FastElementValue));
   2651   j(above, fail, distance);
   2652 }
   2653 
   2654 
   2655 void MacroAssembler::CheckFastObjectElements(Register map,
   2656                                              Label* fail,
   2657                                              Label::Distance distance) {
   2658   STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
   2659   STATIC_ASSERT(FAST_ELEMENTS == 1);
   2660   cmpb(FieldOperand(map, Map::kBitField2Offset),
   2661        Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
   2662   j(below_equal, fail, distance);
   2663   cmpb(FieldOperand(map, Map::kBitField2Offset),
   2664        Immediate(Map::kMaximumBitField2FastElementValue));
   2665   j(above, fail, distance);
   2666 }
   2667 
   2668 
   2669 void MacroAssembler::CheckFastSmiOnlyElements(Register map,
   2670                                               Label* fail,
   2671                                               Label::Distance distance) {
   2672   STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
   2673   cmpb(FieldOperand(map, Map::kBitField2Offset),
   2674        Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
   2675   j(above, fail, distance);
   2676 }
   2677 
   2678 
   2679 void MacroAssembler::StoreNumberToDoubleElements(
   2680     Register maybe_number,
   2681     Register elements,
   2682     Register index,
   2683     XMMRegister xmm_scratch,
   2684     Label* fail) {
   2685   Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
   2686 
   2687   JumpIfSmi(maybe_number, &smi_value, Label::kNear);
   2688 
   2689   CheckMap(maybe_number,
   2690            isolate()->factory()->heap_number_map(),
   2691            fail,
   2692            DONT_DO_SMI_CHECK);
   2693 
   2694   // Double value, canonicalize NaN.
   2695   uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
   2696   cmpl(FieldOperand(maybe_number, offset),
   2697        Immediate(kNaNOrInfinityLowerBoundUpper32));
   2698   j(greater_equal, &maybe_nan, Label::kNear);
   2699 
   2700   bind(&not_nan);
   2701   movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
   2702   bind(&have_double_value);
   2703   movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
   2704         xmm_scratch);
   2705   jmp(&done);
   2706 
   2707   bind(&maybe_nan);
   2708   // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
   2709   // it's an Infinity, and the non-NaN code path applies.
   2710   j(greater, &is_nan, Label::kNear);
   2711   cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
   2712   j(zero, &not_nan);
   2713   bind(&is_nan);
   2714   // Convert all NaNs to the same canonical NaN value when they are stored in
   2715   // the double array.
   2716   Set(kScratchRegister, BitCast<uint64_t>(
   2717       FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
   2718   movq(xmm_scratch, kScratchRegister);
   2719   jmp(&have_double_value, Label::kNear);
   2720 
   2721   bind(&smi_value);
   2722   // Value is a smi. convert to a double and store.
   2723   // Preserve original value.
   2724   SmiToInteger32(kScratchRegister, maybe_number);
   2725   cvtlsi2sd(xmm_scratch, kScratchRegister);
   2726   movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
   2727         xmm_scratch);
   2728   bind(&done);
   2729 }
   2730 
   2731 
   2732 void MacroAssembler::CompareMap(Register obj,
   2733                                 Handle<Map> map,
   2734                                 Label* early_success,
   2735                                 CompareMapMode mode) {
   2736   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
   2737   if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
   2738     Map* transitioned_fast_element_map(
   2739         map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
   2740     ASSERT(transitioned_fast_element_map == NULL ||
   2741            map->elements_kind() != FAST_ELEMENTS);
   2742     if (transitioned_fast_element_map != NULL) {
   2743       j(equal, early_success, Label::kNear);
   2744       Cmp(FieldOperand(obj, HeapObject::kMapOffset),
   2745           Handle<Map>(transitioned_fast_element_map));
   2746     }
   2747 
   2748     Map* transitioned_double_map(
   2749         map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
   2750     ASSERT(transitioned_double_map == NULL ||
   2751            map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
   2752     if (transitioned_double_map != NULL) {
   2753       j(equal, early_success, Label::kNear);
   2754       Cmp(FieldOperand(obj, HeapObject::kMapOffset),
   2755           Handle<Map>(transitioned_double_map));
   2756     }
   2757   }
   2758 }
   2759 
   2760 
   2761 void MacroAssembler::CheckMap(Register obj,
   2762                               Handle<Map> map,
   2763                               Label* fail,
   2764                               SmiCheckType smi_check_type,
   2765                               CompareMapMode mode) {
   2766   if (smi_check_type == DO_SMI_CHECK) {
   2767     JumpIfSmi(obj, fail);
   2768   }
   2769 
   2770   Label success;
   2771   CompareMap(obj, map, &success, mode);
   2772   j(not_equal, fail);
   2773   bind(&success);
   2774 }
   2775 
   2776 
   2777 void MacroAssembler::ClampUint8(Register reg) {
   2778   Label done;
   2779   testl(reg, Immediate(0xFFFFFF00));
   2780   j(zero, &done, Label::kNear);
   2781   setcc(negative, reg);  // 1 if negative, 0 if positive.
   2782   decb(reg);  // 0 if negative, 255 if positive.
   2783   bind(&done);
   2784 }
   2785 
   2786 
   2787 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
   2788                                         XMMRegister temp_xmm_reg,
   2789                                         Register result_reg,
   2790                                         Register temp_reg) {
   2791   Label done;
   2792   Set(result_reg, 0);
   2793   xorps(temp_xmm_reg, temp_xmm_reg);
   2794   ucomisd(input_reg, temp_xmm_reg);
   2795   j(below, &done, Label::kNear);
   2796   uint64_t one_half = BitCast<uint64_t, double>(0.5);
   2797   Set(temp_reg, one_half);
   2798   movq(temp_xmm_reg, temp_reg);
   2799   addsd(temp_xmm_reg, input_reg);
   2800   cvttsd2si(result_reg, temp_xmm_reg);
   2801   testl(result_reg, Immediate(0xFFFFFF00));
   2802   j(zero, &done, Label::kNear);
   2803   Set(result_reg, 255);
   2804   bind(&done);
   2805 }
   2806 
   2807 
   2808 void MacroAssembler::LoadInstanceDescriptors(Register map,
   2809                                              Register descriptors) {
   2810   movq(descriptors, FieldOperand(map,
   2811                                  Map::kInstanceDescriptorsOrBitField3Offset));
   2812   Label not_smi;
   2813   JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
   2814   Move(descriptors, isolate()->factory()->empty_descriptor_array());
   2815   bind(&not_smi);
   2816 }
   2817 
   2818 
   2819 void MacroAssembler::DispatchMap(Register obj,
   2820                                  Handle<Map> map,
   2821                                  Handle<Code> success,
   2822                                  SmiCheckType smi_check_type) {
   2823   Label fail;
   2824   if (smi_check_type == DO_SMI_CHECK) {
   2825     JumpIfSmi(obj, &fail);
   2826   }
   2827   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
   2828   j(equal, success, RelocInfo::CODE_TARGET);
   2829 
   2830   bind(&fail);
   2831 }
   2832 
   2833 
   2834 void MacroAssembler::AbortIfNotNumber(Register object) {
   2835   Label ok;
   2836   Condition is_smi = CheckSmi(object);
   2837   j(is_smi, &ok, Label::kNear);
   2838   Cmp(FieldOperand(object, HeapObject::kMapOffset),
   2839       isolate()->factory()->heap_number_map());
   2840   Assert(equal, "Operand not a number");
   2841   bind(&ok);
   2842 }
   2843 
   2844 
   2845 void MacroAssembler::AbortIfSmi(Register object) {
   2846   Condition is_smi = CheckSmi(object);
   2847   Assert(NegateCondition(is_smi), "Operand is a smi");
   2848 }
   2849 
   2850 
   2851 void MacroAssembler::AbortIfNotSmi(Register object) {
   2852   Condition is_smi = CheckSmi(object);
   2853   Assert(is_smi, "Operand is not a smi");
   2854 }
   2855 
   2856 
   2857 void MacroAssembler::AbortIfNotSmi(const Operand& object) {
   2858   Condition is_smi = CheckSmi(object);
   2859   Assert(is_smi, "Operand is not a smi");
   2860 }
   2861 
   2862 
   2863 void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
   2864   ASSERT(!int32_register.is(kScratchRegister));
   2865   movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
   2866   cmpq(kScratchRegister, int32_register);
   2867   Assert(above_equal, "32 bit value in register is not zero-extended");
   2868 }
   2869 
   2870 
   2871 void MacroAssembler::AbortIfNotString(Register object) {
   2872   testb(object, Immediate(kSmiTagMask));
   2873   Assert(not_equal, "Operand is not a string");
   2874   push(object);
   2875   movq(object, FieldOperand(object, HeapObject::kMapOffset));
   2876   CmpInstanceType(object, FIRST_NONSTRING_TYPE);
   2877   pop(object);
   2878   Assert(below, "Operand is not a string");
   2879 }
   2880 
   2881 
   2882 void MacroAssembler::AbortIfNotRootValue(Register src,
   2883                                          Heap::RootListIndex root_value_index,
   2884                                          const char* message) {
   2885   ASSERT(!src.is(kScratchRegister));
   2886   LoadRoot(kScratchRegister, root_value_index);
   2887   cmpq(src, kScratchRegister);
   2888   Check(equal, message);
   2889 }
   2890 
   2891 
   2892 
   2893 Condition MacroAssembler::IsObjectStringType(Register heap_object,
   2894                                              Register map,
   2895                                              Register instance_type) {
   2896   movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   2897   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
   2898   STATIC_ASSERT(kNotStringTag != 0);
   2899   testb(instance_type, Immediate(kIsNotStringMask));
   2900   return zero;
   2901 }
   2902 
   2903 
   2904 void MacroAssembler::TryGetFunctionPrototype(Register function,
   2905                                              Register result,
   2906                                              Label* miss,
   2907                                              bool miss_on_bound_function) {
   2908   // Check that the receiver isn't a smi.
   2909   testl(function, Immediate(kSmiTagMask));
   2910   j(zero, miss);
   2911 
   2912   // Check that the function really is a function.
   2913   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   2914   j(not_equal, miss);
   2915 
   2916   if (miss_on_bound_function) {
   2917     movq(kScratchRegister,
   2918          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   2919     // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
   2920     // field).
   2921     TestBit(FieldOperand(kScratchRegister,
   2922                          SharedFunctionInfo::kCompilerHintsOffset),
   2923             SharedFunctionInfo::kBoundFunction);
   2924     j(not_zero, miss);
   2925   }
   2926 
   2927   // Make sure that the function has an instance prototype.
   2928   Label non_instance;
   2929   testb(FieldOperand(result, Map::kBitFieldOffset),
   2930         Immediate(1 << Map::kHasNonInstancePrototype));
   2931   j(not_zero, &non_instance, Label::kNear);
   2932 
   2933   // Get the prototype or initial map from the function.
   2934   movq(result,
   2935        FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2936 
   2937   // If the prototype or initial map is the hole, don't return it and
   2938   // simply miss the cache instead. This will allow us to allocate a
   2939   // prototype object on-demand in the runtime system.
   2940   CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2941   j(equal, miss);
   2942 
   2943   // If the function does not have an initial map, we're done.
   2944   Label done;
   2945   CmpObjectType(result, MAP_TYPE, kScratchRegister);
   2946   j(not_equal, &done, Label::kNear);
   2947 
   2948   // Get the prototype from the initial map.
   2949   movq(result, FieldOperand(result, Map::kPrototypeOffset));
   2950   jmp(&done, Label::kNear);
   2951 
   2952   // Non-instance prototype: Fetch prototype from constructor field
   2953   // in initial map.
   2954   bind(&non_instance);
   2955   movq(result, FieldOperand(result, Map::kConstructorOffset));
   2956 
   2957   // All done.
   2958   bind(&done);
   2959 }
   2960 
   2961 
   2962 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   2963   if (FLAG_native_code_counters && counter->Enabled()) {
   2964     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   2965     movl(counter_operand, Immediate(value));
   2966   }
   2967 }
   2968 
   2969 
   2970 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
   2971   ASSERT(value > 0);
   2972   if (FLAG_native_code_counters && counter->Enabled()) {
   2973     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   2974     if (value == 1) {
   2975       incl(counter_operand);
   2976     } else {
   2977       addl(counter_operand, Immediate(value));
   2978     }
   2979   }
   2980 }
   2981 
   2982 
   2983 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
   2984   ASSERT(value > 0);
   2985   if (FLAG_native_code_counters && counter->Enabled()) {
   2986     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   2987     if (value == 1) {
   2988       decl(counter_operand);
   2989     } else {
   2990       subl(counter_operand, Immediate(value));
   2991     }
   2992   }
   2993 }
   2994 
   2995 
   2996 #ifdef ENABLE_DEBUGGER_SUPPORT
   2997 void MacroAssembler::DebugBreak() {
   2998   Set(rax, 0);  // No arguments.
   2999   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
   3000   CEntryStub ces(1);
   3001   ASSERT(AllowThisStubCall(&ces));
   3002   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
   3003 }
   3004 #endif  // ENABLE_DEBUGGER_SUPPORT
   3005 
   3006 
   3007 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
   3008   // This macro takes the dst register to make the code more readable
   3009   // at the call sites. However, the dst register has to be rcx to
   3010   // follow the calling convention which requires the call type to be
   3011   // in rcx.
   3012   ASSERT(dst.is(rcx));
   3013   if (call_kind == CALL_AS_FUNCTION) {
   3014     LoadSmiConstant(dst, Smi::FromInt(1));
   3015   } else {
   3016     LoadSmiConstant(dst, Smi::FromInt(0));
   3017   }
   3018 }
   3019 
   3020 
   3021 void MacroAssembler::InvokeCode(Register code,
   3022                                 const ParameterCount& expected,
   3023                                 const ParameterCount& actual,
   3024                                 InvokeFlag flag,
   3025                                 const CallWrapper& call_wrapper,
   3026                                 CallKind call_kind) {
   3027   // You can't call a function without a valid frame.
   3028   ASSERT(flag == JUMP_FUNCTION || has_frame());
   3029 
   3030   Label done;
   3031   bool definitely_mismatches = false;
   3032   InvokePrologue(expected,
   3033                  actual,
   3034                  Handle<Code>::null(),
   3035                  code,
   3036                  &done,
   3037                  &definitely_mismatches,
   3038                  flag,
   3039                  Label::kNear,
   3040                  call_wrapper,
   3041                  call_kind);
   3042   if (!definitely_mismatches) {
   3043     if (flag == CALL_FUNCTION) {
   3044       call_wrapper.BeforeCall(CallSize(code));
   3045       SetCallKind(rcx, call_kind);
   3046       call(code);
   3047       call_wrapper.AfterCall();
   3048     } else {
   3049       ASSERT(flag == JUMP_FUNCTION);
   3050       SetCallKind(rcx, call_kind);
   3051       jmp(code);
   3052     }
   3053     bind(&done);
   3054   }
   3055 }
   3056 
   3057 
   3058 void MacroAssembler::InvokeCode(Handle<Code> code,
   3059                                 const ParameterCount& expected,
   3060                                 const ParameterCount& actual,
   3061                                 RelocInfo::Mode rmode,
   3062                                 InvokeFlag flag,
   3063                                 const CallWrapper& call_wrapper,
   3064                                 CallKind call_kind) {
   3065   // You can't call a function without a valid frame.
   3066   ASSERT(flag == JUMP_FUNCTION || has_frame());
   3067 
   3068   Label done;
   3069   bool definitely_mismatches = false;
   3070   Register dummy = rax;
   3071   InvokePrologue(expected,
   3072                  actual,
   3073                  code,
   3074                  dummy,
   3075                  &done,
   3076                  &definitely_mismatches,
   3077                  flag,
   3078                  Label::kNear,
   3079                  call_wrapper,
   3080                  call_kind);
   3081   if (!definitely_mismatches) {
   3082     if (flag == CALL_FUNCTION) {
   3083       call_wrapper.BeforeCall(CallSize(code));
   3084       SetCallKind(rcx, call_kind);
   3085       Call(code, rmode);
   3086       call_wrapper.AfterCall();
   3087     } else {
   3088       ASSERT(flag == JUMP_FUNCTION);
   3089       SetCallKind(rcx, call_kind);
   3090       Jump(code, rmode);
   3091     }
   3092     bind(&done);
   3093   }
   3094 }
   3095 
   3096 
   3097 void MacroAssembler::InvokeFunction(Register function,
   3098                                     const ParameterCount& actual,
   3099                                     InvokeFlag flag,
   3100                                     const CallWrapper& call_wrapper,
   3101                                     CallKind call_kind) {
   3102   // You can't call a function without a valid frame.
   3103   ASSERT(flag == JUMP_FUNCTION || has_frame());
   3104 
   3105   ASSERT(function.is(rdi));
   3106   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3107   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
   3108   movsxlq(rbx,
   3109           FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
   3110   // Advances rdx to the end of the Code object header, to the start of
   3111   // the executable code.
   3112   movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
   3113 
   3114   ParameterCount expected(rbx);
   3115   InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
   3116 }
   3117 
   3118 
   3119 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
   3120                                     const ParameterCount& actual,
   3121                                     InvokeFlag flag,
   3122                                     const CallWrapper& call_wrapper,
   3123                                     CallKind call_kind) {
   3124   // You can't call a function without a valid frame.
   3125   ASSERT(flag == JUMP_FUNCTION || has_frame());
   3126 
   3127   // Get the function and setup the context.
   3128   LoadHeapObject(rdi, function);
   3129   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
   3130 
   3131   // We call indirectly through the code field in the function to
   3132   // allow recompilation to take effect without changing any of the
   3133   // call sites.
   3134   movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
   3135   ParameterCount expected(function->shared()->formal_parameter_count());
   3136   InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
   3137 }
   3138 
   3139 
   3140 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
   3141                                     const ParameterCount& actual,
   3142                                     Handle<Code> code_constant,
   3143                                     Register code_register,
   3144                                     Label* done,
   3145                                     bool* definitely_mismatches,
   3146                                     InvokeFlag flag,
   3147                                     Label::Distance near_jump,
   3148                                     const CallWrapper& call_wrapper,
   3149                                     CallKind call_kind) {
   3150   bool definitely_matches = false;
   3151   *definitely_mismatches = false;
   3152   Label invoke;
   3153   if (expected.is_immediate()) {
   3154     ASSERT(actual.is_immediate());
   3155     if (expected.immediate() == actual.immediate()) {
   3156       definitely_matches = true;
   3157     } else {
   3158       Set(rax, actual.immediate());
   3159       if (expected.immediate() ==
   3160               SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
   3161         // Don't worry about adapting arguments for built-ins that
   3162         // don't want that done. Skip adaption code by making it look
   3163         // like we have a match between expected and actual number of
   3164         // arguments.
   3165         definitely_matches = true;
   3166       } else {
   3167         *definitely_mismatches = true;
   3168         Set(rbx, expected.immediate());
   3169       }
   3170     }
   3171   } else {
   3172     if (actual.is_immediate()) {
   3173       // Expected is in register, actual is immediate. This is the
   3174       // case when we invoke function values without going through the
   3175       // IC mechanism.
   3176       cmpq(expected.reg(), Immediate(actual.immediate()));
   3177       j(equal, &invoke, Label::kNear);
   3178       ASSERT(expected.reg().is(rbx));
   3179       Set(rax, actual.immediate());
   3180     } else if (!expected.reg().is(actual.reg())) {
   3181       // Both expected and actual are in (different) registers. This
   3182       // is the case when we invoke functions using call and apply.
   3183       cmpq(expected.reg(), actual.reg());
   3184       j(equal, &invoke, Label::kNear);
   3185       ASSERT(actual.reg().is(rax));
   3186       ASSERT(expected.reg().is(rbx));
   3187     }
   3188   }
   3189 
   3190   if (!definitely_matches) {
   3191     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
   3192     if (!code_constant.is_null()) {
   3193       movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
   3194       addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
   3195     } else if (!code_register.is(rdx)) {
   3196       movq(rdx, code_register);
   3197     }
   3198 
   3199     if (flag == CALL_FUNCTION) {
   3200       call_wrapper.BeforeCall(CallSize(adaptor));
   3201       SetCallKind(rcx, call_kind);
   3202       Call(adaptor, RelocInfo::CODE_TARGET);
   3203       call_wrapper.AfterCall();
   3204       if (!*definitely_mismatches) {
   3205         jmp(done, near_jump);
   3206       }
   3207     } else {
   3208       SetCallKind(rcx, call_kind);
   3209       Jump(adaptor, RelocInfo::CODE_TARGET);
   3210     }
   3211     bind(&invoke);
   3212   }
   3213 }
   3214 
   3215 
   3216 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   3217   push(rbp);
   3218   movq(rbp, rsp);
   3219   push(rsi);  // Context.
   3220   Push(Smi::FromInt(type));
   3221   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   3222   push(kScratchRegister);
   3223   if (emit_debug_code()) {
   3224     movq(kScratchRegister,
   3225          isolate()->factory()->undefined_value(),
   3226          RelocInfo::EMBEDDED_OBJECT);
   3227     cmpq(Operand(rsp, 0), kScratchRegister);
   3228     Check(not_equal, "code object not properly patched");
   3229   }
   3230 }
   3231 
   3232 
   3233 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   3234   if (emit_debug_code()) {
   3235     Move(kScratchRegister, Smi::FromInt(type));
   3236     cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
   3237     Check(equal, "stack frame types must match");
   3238   }
   3239   movq(rsp, rbp);
   3240   pop(rbp);
   3241 }
   3242 
   3243 
   3244 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
   3245   // Set up the frame structure on the stack.
   3246   // All constants are relative to the frame pointer of the exit frame.
   3247   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   3248   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
   3249   ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   3250   push(rbp);
   3251   movq(rbp, rsp);
   3252 
   3253   // Reserve room for entry stack pointer and push the code object.
   3254   ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
   3255   push(Immediate(0));  // Saved entry sp, patched before call.
   3256   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   3257   push(kScratchRegister);  // Accessed from EditFrame::code_slot.
   3258 
   3259   // Save the frame pointer and the context in top.
   3260   if (save_rax) {
   3261     movq(r14, rax);  // Backup rax in callee-save register.
   3262   }
   3263 
   3264   Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
   3265   Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
   3266 }
   3267 
   3268 
   3269 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
   3270                                             bool save_doubles) {
   3271 #ifdef _WIN64
   3272   const int kShadowSpace = 4;
   3273   arg_stack_space += kShadowSpace;
   3274 #endif
   3275   // Optionally save all XMM registers.
   3276   if (save_doubles) {
   3277     int space = XMMRegister::kNumRegisters * kDoubleSize +
   3278         arg_stack_space * kPointerSize;
   3279     subq(rsp, Immediate(space));
   3280     int offset = -2 * kPointerSize;
   3281     for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
   3282       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
   3283       movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
   3284     }
   3285   } else if (arg_stack_space > 0) {
   3286     subq(rsp, Immediate(arg_stack_space * kPointerSize));
   3287   }
   3288 
   3289   // Get the required frame alignment for the OS.
   3290   const int kFrameAlignment = OS::ActivationFrameAlignment();
   3291   if (kFrameAlignment > 0) {
   3292     ASSERT(IsPowerOf2(kFrameAlignment));
   3293     ASSERT(is_int8(kFrameAlignment));
   3294     and_(rsp, Immediate(-kFrameAlignment));
   3295   }
   3296 
   3297   // Patch the saved entry sp.
   3298   movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
   3299 }
   3300 
   3301 
   3302 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
   3303   EnterExitFramePrologue(true);
   3304 
   3305   // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
   3306   // so it must be retained across the C-call.
   3307   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
   3308   lea(r15, Operand(rbp, r14, times_pointer_size, offset));
   3309 
   3310   EnterExitFrameEpilogue(arg_stack_space, save_doubles);
   3311 }
   3312 
   3313 
   3314 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
   3315   EnterExitFramePrologue(false);
   3316   EnterExitFrameEpilogue(arg_stack_space, false);
   3317 }
   3318 
   3319 
   3320 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
   3321   // Registers:
   3322   // r15 : argv
   3323   if (save_doubles) {
   3324     int offset = -2 * kPointerSize;
   3325     for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
   3326       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
   3327       movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
   3328     }
   3329   }
   3330   // Get the return address from the stack and restore the frame pointer.
   3331   movq(rcx, Operand(rbp, 1 * kPointerSize));
   3332   movq(rbp, Operand(rbp, 0 * kPointerSize));
   3333 
   3334   // Drop everything up to and including the arguments and the receiver
   3335   // from the caller stack.
   3336   lea(rsp, Operand(r15, 1 * kPointerSize));
   3337 
   3338   // Push the return address to get ready to return.
   3339   push(rcx);
   3340 
   3341   LeaveExitFrameEpilogue();
   3342 }
   3343 
   3344 
   3345 void MacroAssembler::LeaveApiExitFrame() {
   3346   movq(rsp, rbp);
   3347   pop(rbp);
   3348 
   3349   LeaveExitFrameEpilogue();
   3350 }
   3351 
   3352 
   3353 void MacroAssembler::LeaveExitFrameEpilogue() {
   3354   // Restore current context from top and clear it in debug mode.
   3355   ExternalReference context_address(Isolate::kContextAddress, isolate());
   3356   Operand context_operand = ExternalOperand(context_address);
   3357   movq(rsi, context_operand);
   3358 #ifdef DEBUG
   3359   movq(context_operand, Immediate(0));
   3360 #endif
   3361 
   3362   // Clear the top frame.
   3363   ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
   3364                                        isolate());
   3365   Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
   3366   movq(c_entry_fp_operand, Immediate(0));
   3367 }
   3368 
   3369 
   3370 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
   3371                                             Register scratch,
   3372                                             Label* miss) {
   3373   Label same_contexts;
   3374 
   3375   ASSERT(!holder_reg.is(scratch));
   3376   ASSERT(!scratch.is(kScratchRegister));
   3377   // Load current lexical context from the stack frame.
   3378   movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
   3379 
   3380   // When generating debug code, make sure the lexical context is set.
   3381   if (emit_debug_code()) {
   3382     cmpq(scratch, Immediate(0));
   3383     Check(not_equal, "we should not have an empty lexical context");
   3384   }
   3385   // Load the global context of the current context.
   3386   int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
   3387   movq(scratch, FieldOperand(scratch, offset));
   3388   movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
   3389 
   3390   // Check the context is a global context.
   3391   if (emit_debug_code()) {
   3392     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
   3393         isolate()->factory()->global_context_map());
   3394     Check(equal, "JSGlobalObject::global_context should be a global context.");
   3395   }
   3396 
   3397   // Check if both contexts are the same.
   3398   cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
   3399   j(equal, &same_contexts);
   3400 
   3401   // Compare security tokens.
   3402   // Check that the security token in the calling global object is
   3403   // compatible with the security token in the receiving global
   3404   // object.
   3405 
   3406   // Check the context is a global context.
   3407   if (emit_debug_code()) {
   3408     // Preserve original value of holder_reg.
   3409     push(holder_reg);
   3410     movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
   3411     CompareRoot(holder_reg, Heap::kNullValueRootIndex);
   3412     Check(not_equal, "JSGlobalProxy::context() should not be null.");
   3413 
   3414     // Read the first word and compare to global_context_map(),
   3415     movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
   3416     CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
   3417     Check(equal, "JSGlobalObject::global_context should be a global context.");
   3418     pop(holder_reg);
   3419   }
   3420 
   3421   movq(kScratchRegister,
   3422        FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
   3423   int token_offset =
   3424       Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
   3425   movq(scratch, FieldOperand(scratch, token_offset));
   3426   cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
   3427   j(not_equal, miss);
   3428 
   3429   bind(&same_contexts);
   3430 }
   3431 
   3432 
   3433 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
   3434   // First of all we assign the hash seed to scratch.
   3435   LoadRoot(scratch, Heap::kHashSeedRootIndex);
   3436   SmiToInteger32(scratch, scratch);
   3437 
   3438   // Xor original key with a seed.
   3439   xorl(r0, scratch);
   3440 
   3441   // Compute the hash code from the untagged key.  This must be kept in sync
   3442   // with ComputeIntegerHash in utils.h.
   3443   //
   3444   // hash = ~hash + (hash << 15);
   3445   movl(scratch, r0);
   3446   notl(r0);
   3447   shll(scratch, Immediate(15));
   3448   addl(r0, scratch);
   3449   // hash = hash ^ (hash >> 12);
   3450   movl(scratch, r0);
   3451   shrl(scratch, Immediate(12));
   3452   xorl(r0, scratch);
   3453   // hash = hash + (hash << 2);
   3454   leal(r0, Operand(r0, r0, times_4, 0));
   3455   // hash = hash ^ (hash >> 4);
   3456   movl(scratch, r0);
   3457   shrl(scratch, Immediate(4));
   3458   xorl(r0, scratch);
   3459   // hash = hash * 2057;
   3460   imull(r0, r0, Immediate(2057));
   3461   // hash = hash ^ (hash >> 16);
   3462   movl(scratch, r0);
   3463   shrl(scratch, Immediate(16));
   3464   xorl(r0, scratch);
   3465 }
   3466 
   3467 
   3468 
   3469 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   3470                                               Register elements,
   3471                                               Register key,
   3472                                               Register r0,
   3473                                               Register r1,
   3474                                               Register r2,
   3475                                               Register result) {
   3476   // Register use:
   3477   //
   3478   // elements - holds the slow-case elements of the receiver on entry.
   3479   //            Unchanged unless 'result' is the same register.
   3480   //
   3481   // key      - holds the smi key on entry.
   3482   //            Unchanged unless 'result' is the same register.
   3483   //
   3484   // Scratch registers:
   3485   //
   3486   // r0 - holds the untagged key on entry and holds the hash once computed.
   3487   //
   3488   // r1 - used to hold the capacity mask of the dictionary
   3489   //
   3490   // r2 - used for the index into the dictionary.
   3491   //
   3492   // result - holds the result on exit if the load succeeded.
   3493   //          Allowed to be the same as 'key' or 'result'.
   3494   //          Unchanged on bailout so 'key' or 'result' can be used
   3495   //          in further computation.
   3496 
   3497   Label done;
   3498 
   3499   GetNumberHash(r0, r1);
   3500 
   3501   // Compute capacity mask.
   3502   SmiToInteger32(r1, FieldOperand(elements,
   3503                                   SeededNumberDictionary::kCapacityOffset));
   3504   decl(r1);
   3505 
   3506   // Generate an unrolled loop that performs a few probes before giving up.
   3507   const int kProbes = 4;
   3508   for (int i = 0; i < kProbes; i++) {
   3509     // Use r2 for index calculations and keep the hash intact in r0.
   3510     movq(r2, r0);
   3511     // Compute the masked index: (hash + i + i * i) & mask.
   3512     if (i > 0) {
   3513       addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
   3514     }
   3515     and_(r2, r1);
   3516 
   3517     // Scale the index by multiplying by the entry size.
   3518     ASSERT(SeededNumberDictionary::kEntrySize == 3);
   3519     lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
   3520 
   3521     // Check if the key matches.
   3522     cmpq(key, FieldOperand(elements,
   3523                            r2,
   3524                            times_pointer_size,
   3525                            SeededNumberDictionary::kElementsStartOffset));
   3526     if (i != (kProbes - 1)) {
   3527       j(equal, &done);
   3528     } else {
   3529       j(not_equal, miss);
   3530     }
   3531   }
   3532 
   3533   bind(&done);
   3534   // Check that the value is a normal propety.
   3535   const int kDetailsOffset =
   3536       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   3537   ASSERT_EQ(NORMAL, 0);
   3538   Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
   3539        Smi::FromInt(PropertyDetails::TypeField::kMask));
   3540   j(not_zero, miss);
   3541 
   3542   // Get the value at the masked, scaled index.
   3543   const int kValueOffset =
   3544       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
   3545   movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
   3546 }
   3547 
   3548 
   3549 void MacroAssembler::LoadAllocationTopHelper(Register result,
   3550                                              Register scratch,
   3551                                              AllocationFlags flags) {
   3552   ExternalReference new_space_allocation_top =
   3553       ExternalReference::new_space_allocation_top_address(isolate());
   3554 
   3555   // Just return if allocation top is already known.
   3556   if ((flags & RESULT_CONTAINS_TOP) != 0) {
   3557     // No use of scratch if allocation top is provided.
   3558     ASSERT(!scratch.is_valid());
   3559 #ifdef DEBUG
   3560     // Assert that result actually contains top on entry.
   3561     Operand top_operand = ExternalOperand(new_space_allocation_top);
   3562     cmpq(result, top_operand);
   3563     Check(equal, "Unexpected allocation top");
   3564 #endif
   3565     return;
   3566   }
   3567 
   3568   // Move address of new object to result. Use scratch register if available,
   3569   // and keep address in scratch until call to UpdateAllocationTopHelper.
   3570   if (scratch.is_valid()) {
   3571     LoadAddress(scratch, new_space_allocation_top);
   3572     movq(result, Operand(scratch, 0));
   3573   } else {
   3574     Load(result, new_space_allocation_top);
   3575   }
   3576 }
   3577 
   3578 
   3579 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
   3580                                                Register scratch) {
   3581   if (emit_debug_code()) {
   3582     testq(result_end, Immediate(kObjectAlignmentMask));
   3583     Check(zero, "Unaligned allocation in new space");
   3584   }
   3585 
   3586   ExternalReference new_space_allocation_top =
   3587       ExternalReference::new_space_allocation_top_address(isolate());
   3588 
   3589   // Update new top.
   3590   if (scratch.is_valid()) {
   3591     // Scratch already contains address of allocation top.
   3592     movq(Operand(scratch, 0), result_end);
   3593   } else {
   3594     Store(new_space_allocation_top, result_end);
   3595   }
   3596 }
   3597 
   3598 
   3599 void MacroAssembler::AllocateInNewSpace(int object_size,
   3600                                         Register result,
   3601                                         Register result_end,
   3602                                         Register scratch,
   3603                                         Label* gc_required,
   3604                                         AllocationFlags flags) {
   3605   if (!FLAG_inline_new) {
   3606     if (emit_debug_code()) {
   3607       // Trash the registers to simulate an allocation failure.
   3608       movl(result, Immediate(0x7091));
   3609       if (result_end.is_valid()) {
   3610         movl(result_end, Immediate(0x7191));
   3611       }
   3612       if (scratch.is_valid()) {
   3613         movl(scratch, Immediate(0x7291));
   3614       }
   3615     }
   3616     jmp(gc_required);
   3617     return;
   3618   }
   3619   ASSERT(!result.is(result_end));
   3620 
   3621   // Load address of new object into result.
   3622   LoadAllocationTopHelper(result, scratch, flags);
   3623 
   3624   // Calculate new top and bail out if new space is exhausted.
   3625   ExternalReference new_space_allocation_limit =
   3626       ExternalReference::new_space_allocation_limit_address(isolate());
   3627 
   3628   Register top_reg = result_end.is_valid() ? result_end : result;
   3629 
   3630   if (!top_reg.is(result)) {
   3631     movq(top_reg, result);
   3632   }
   3633   addq(top_reg, Immediate(object_size));
   3634   j(carry, gc_required);
   3635   Operand limit_operand = ExternalOperand(new_space_allocation_limit);
   3636   cmpq(top_reg, limit_operand);
   3637   j(above, gc_required);
   3638 
   3639   // Update allocation top.
   3640   UpdateAllocationTopHelper(top_reg, scratch);
   3641 
   3642   if (top_reg.is(result)) {
   3643     if ((flags & TAG_OBJECT) != 0) {
   3644       subq(result, Immediate(object_size - kHeapObjectTag));
   3645     } else {
   3646       subq(result, Immediate(object_size));
   3647     }
   3648   } else if ((flags & TAG_OBJECT) != 0) {
   3649     // Tag the result if requested.
   3650     addq(result, Immediate(kHeapObjectTag));
   3651   }
   3652 }
   3653 
   3654 
   3655 void MacroAssembler::AllocateInNewSpace(int header_size,
   3656                                         ScaleFactor element_size,
   3657                                         Register element_count,
   3658                                         Register result,
   3659                                         Register result_end,
   3660                                         Register scratch,
   3661                                         Label* gc_required,
   3662                                         AllocationFlags flags) {
   3663   if (!FLAG_inline_new) {
   3664     if (emit_debug_code()) {
   3665       // Trash the registers to simulate an allocation failure.
   3666       movl(result, Immediate(0x7091));
   3667       movl(result_end, Immediate(0x7191));
   3668       if (scratch.is_valid()) {
   3669         movl(scratch, Immediate(0x7291));
   3670       }
   3671       // Register element_count is not modified by the function.
   3672     }
   3673     jmp(gc_required);
   3674     return;
   3675   }
   3676   ASSERT(!result.is(result_end));
   3677 
   3678   // Load address of new object into result.
   3679   LoadAllocationTopHelper(result, scratch, flags);
   3680 
   3681   // Calculate new top and bail out if new space is exhausted.
   3682   ExternalReference new_space_allocation_limit =
   3683       ExternalReference::new_space_allocation_limit_address(isolate());
   3684 
   3685   // We assume that element_count*element_size + header_size does not
   3686   // overflow.
   3687   lea(result_end, Operand(element_count, element_size, header_size));
   3688   addq(result_end, result);
   3689   j(carry, gc_required);
   3690   Operand limit_operand = ExternalOperand(new_space_allocation_limit);
   3691   cmpq(result_end, limit_operand);
   3692   j(above, gc_required);
   3693 
   3694   // Update allocation top.
   3695   UpdateAllocationTopHelper(result_end, scratch);
   3696 
   3697   // Tag the result if requested.
   3698   if ((flags & TAG_OBJECT) != 0) {
   3699     addq(result, Immediate(kHeapObjectTag));
   3700   }
   3701 }
   3702 
   3703 
   3704 void MacroAssembler::AllocateInNewSpace(Register object_size,
   3705                                         Register result,
   3706                                         Register result_end,
   3707                                         Register scratch,
   3708                                         Label* gc_required,
   3709                                         AllocationFlags flags) {
   3710   if (!FLAG_inline_new) {
   3711     if (emit_debug_code()) {
   3712       // Trash the registers to simulate an allocation failure.
   3713       movl(result, Immediate(0x7091));
   3714       movl(result_end, Immediate(0x7191));
   3715       if (scratch.is_valid()) {
   3716         movl(scratch, Immediate(0x7291));
   3717       }
   3718       // object_size is left unchanged by this function.
   3719     }
   3720     jmp(gc_required);
   3721     return;
   3722   }
   3723   ASSERT(!result.is(result_end));
   3724 
   3725   // Load address of new object into result.
   3726   LoadAllocationTopHelper(result, scratch, flags);
   3727 
   3728   // Calculate new top and bail out if new space is exhausted.
   3729   ExternalReference new_space_allocation_limit =
   3730       ExternalReference::new_space_allocation_limit_address(isolate());
   3731   if (!object_size.is(result_end)) {
   3732     movq(result_end, object_size);
   3733   }
   3734   addq(result_end, result);
   3735   j(carry, gc_required);
   3736   Operand limit_operand = ExternalOperand(new_space_allocation_limit);
   3737   cmpq(result_end, limit_operand);
   3738   j(above, gc_required);
   3739 
   3740   // Update allocation top.
   3741   UpdateAllocationTopHelper(result_end, scratch);
   3742 
   3743   // Tag the result if requested.
   3744   if ((flags & TAG_OBJECT) != 0) {
   3745     addq(result, Immediate(kHeapObjectTag));
   3746   }
   3747 }
   3748 
   3749 
   3750 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
   3751   ExternalReference new_space_allocation_top =
   3752       ExternalReference::new_space_allocation_top_address(isolate());
   3753 
   3754   // Make sure the object has no tag before resetting top.
   3755   and_(object, Immediate(~kHeapObjectTagMask));
   3756   Operand top_operand = ExternalOperand(new_space_allocation_top);
   3757 #ifdef DEBUG
   3758   cmpq(object, top_operand);
   3759   Check(below, "Undo allocation of non allocated memory");
   3760 #endif
   3761   movq(top_operand, object);
   3762 }
   3763 
   3764 
   3765 void MacroAssembler::AllocateHeapNumber(Register result,
   3766                                         Register scratch,
   3767                                         Label* gc_required) {
   3768   // Allocate heap number in new space.
   3769   AllocateInNewSpace(HeapNumber::kSize,
   3770                      result,
   3771                      scratch,
   3772                      no_reg,
   3773                      gc_required,
   3774                      TAG_OBJECT);
   3775 
   3776   // Set the map.
   3777   LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
   3778   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   3779 }
   3780 
   3781 
   3782 void MacroAssembler::AllocateTwoByteString(Register result,
   3783                                            Register length,
   3784                                            Register scratch1,
   3785                                            Register scratch2,
   3786                                            Register scratch3,
   3787                                            Label* gc_required) {
   3788   // Calculate the number of bytes needed for the characters in the string while
   3789   // observing object alignment.
   3790   const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
   3791                                kObjectAlignmentMask;
   3792   ASSERT(kShortSize == 2);
   3793   // scratch1 = length * 2 + kObjectAlignmentMask.
   3794   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
   3795                 kHeaderAlignment));
   3796   and_(scratch1, Immediate(~kObjectAlignmentMask));
   3797   if (kHeaderAlignment > 0) {
   3798     subq(scratch1, Immediate(kHeaderAlignment));
   3799   }
   3800 
   3801   // Allocate two byte string in new space.
   3802   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
   3803                      times_1,
   3804                      scratch1,
   3805                      result,
   3806                      scratch2,
   3807                      scratch3,
   3808                      gc_required,
   3809                      TAG_OBJECT);
   3810 
   3811   // Set the map, length and hash field.
   3812   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
   3813   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   3814   Integer32ToSmi(scratch1, length);
   3815   movq(FieldOperand(result, String::kLengthOffset), scratch1);
   3816   movq(FieldOperand(result, String::kHashFieldOffset),
   3817        Immediate(String::kEmptyHashField));
   3818 }
   3819 
   3820 
   3821 void MacroAssembler::AllocateAsciiString(Register result,
   3822                                          Register length,
   3823                                          Register scratch1,
   3824                                          Register scratch2,
   3825                                          Register scratch3,
   3826                                          Label* gc_required) {
   3827   // Calculate the number of bytes needed for the characters in the string while
   3828   // observing object alignment.
   3829   const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
   3830                                kObjectAlignmentMask;
   3831   movl(scratch1, length);
   3832   ASSERT(kCharSize == 1);
   3833   addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
   3834   and_(scratch1, Immediate(~kObjectAlignmentMask));
   3835   if (kHeaderAlignment > 0) {
   3836     subq(scratch1, Immediate(kHeaderAlignment));
   3837   }
   3838 
   3839   // Allocate ASCII string in new space.
   3840   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
   3841                      times_1,
   3842                      scratch1,
   3843                      result,
   3844                      scratch2,
   3845                      scratch3,
   3846                      gc_required,
   3847                      TAG_OBJECT);
   3848 
   3849   // Set the map, length and hash field.
   3850   LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
   3851   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   3852   Integer32ToSmi(scratch1, length);
   3853   movq(FieldOperand(result, String::kLengthOffset), scratch1);
   3854   movq(FieldOperand(result, String::kHashFieldOffset),
   3855        Immediate(String::kEmptyHashField));
   3856 }
   3857 
   3858 
   3859 void MacroAssembler::AllocateTwoByteConsString(Register result,
   3860                                         Register scratch1,
   3861                                         Register scratch2,
   3862                                         Label* gc_required) {
   3863   // Allocate heap number in new space.
   3864   AllocateInNewSpace(ConsString::kSize,
   3865                      result,
   3866                      scratch1,
   3867                      scratch2,
   3868                      gc_required,
   3869                      TAG_OBJECT);
   3870 
   3871   // Set the map. The other fields are left uninitialized.
   3872   LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
   3873   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   3874 }
   3875 
   3876 
   3877 void MacroAssembler::AllocateAsciiConsString(Register result,
   3878                                              Register scratch1,
   3879                                              Register scratch2,
   3880                                              Label* gc_required) {
   3881   // Allocate heap number in new space.
   3882   AllocateInNewSpace(ConsString::kSize,
   3883                      result,
   3884                      scratch1,
   3885                      scratch2,
   3886                      gc_required,
   3887                      TAG_OBJECT);
   3888 
   3889   // Set the map. The other fields are left uninitialized.
   3890   LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
   3891   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   3892 }
   3893 
   3894 
   3895 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
   3896                                           Register scratch1,
   3897                                           Register scratch2,
   3898                                           Label* gc_required) {
   3899   // Allocate heap number in new space.
   3900   AllocateInNewSpace(SlicedString::kSize,
   3901                      result,
   3902                      scratch1,
   3903                      scratch2,
   3904                      gc_required,
   3905                      TAG_OBJECT);
   3906 
   3907   // Set the map. The other fields are left uninitialized.
   3908   LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
   3909   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   3910 }
   3911 
   3912 
   3913 void MacroAssembler::AllocateAsciiSlicedString(Register result,
   3914                                                Register scratch1,
   3915                                                Register scratch2,
   3916                                                Label* gc_required) {
   3917   // Allocate heap number in new space.
   3918   AllocateInNewSpace(SlicedString::kSize,
   3919                      result,
   3920                      scratch1,
   3921                      scratch2,
   3922                      gc_required,
   3923                      TAG_OBJECT);
   3924 
   3925   // Set the map. The other fields are left uninitialized.
   3926   LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
   3927   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   3928 }
   3929 
   3930 
   3931 // Copy memory, byte-by-byte, from source to destination.  Not optimized for
   3932 // long or aligned copies.  The contents of scratch and length are destroyed.
   3933 // Destination is incremented by length, source, length and scratch are
   3934 // clobbered.
   3935 // A simpler loop is faster on small copies, but slower on large ones.
   3936 // The cld() instruction must have been emitted, to set the direction flag(),
   3937 // before calling this function.
   3938 void MacroAssembler::CopyBytes(Register destination,
   3939                                Register source,
   3940                                Register length,
   3941                                int min_length,
   3942                                Register scratch) {
   3943   ASSERT(min_length >= 0);
   3944   if (FLAG_debug_code) {
   3945     cmpl(length, Immediate(min_length));
   3946     Assert(greater_equal, "Invalid min_length");
   3947   }
   3948   Label loop, done, short_string, short_loop;
   3949 
   3950   const int kLongStringLimit = 20;
   3951   if (min_length <= kLongStringLimit) {
   3952     cmpl(length, Immediate(kLongStringLimit));
   3953     j(less_equal, &short_string);
   3954   }
   3955 
   3956   ASSERT(source.is(rsi));
   3957   ASSERT(destination.is(rdi));
   3958   ASSERT(length.is(rcx));
   3959 
   3960   // Because source is 8-byte aligned in our uses of this function,
   3961   // we keep source aligned for the rep movs operation by copying the odd bytes
   3962   // at the end of the ranges.
   3963   movq(scratch, length);
   3964   shrl(length, Immediate(3));
   3965   repmovsq();
   3966   // Move remaining bytes of length.
   3967   andl(scratch, Immediate(0x7));
   3968   movq(length, Operand(source, scratch, times_1, -8));
   3969   movq(Operand(destination, scratch, times_1, -8), length);
   3970   addq(destination, scratch);
   3971 
   3972   if (min_length <= kLongStringLimit) {
   3973     jmp(&done);
   3974 
   3975     bind(&short_string);
   3976     if (min_length == 0) {
   3977       testl(length, length);
   3978       j(zero, &done);
   3979     }
   3980     lea(scratch, Operand(destination, length, times_1, 0));
   3981 
   3982     bind(&short_loop);
   3983     movb(length, Operand(source, 0));
   3984     movb(Operand(destination, 0), length);
   3985     incq(source);
   3986     incq(destination);
   3987     cmpq(destination, scratch);
   3988     j(not_equal, &short_loop);
   3989 
   3990     bind(&done);
   3991   }
   3992 }
   3993 
   3994 
   3995 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
   3996                                                 Register end_offset,
   3997                                                 Register filler) {
   3998   Label loop, entry;
   3999   jmp(&entry);
   4000   bind(&loop);
   4001   movq(Operand(start_offset, 0), filler);
   4002   addq(start_offset, Immediate(kPointerSize));
   4003   bind(&entry);
   4004   cmpq(start_offset, end_offset);
   4005   j(less, &loop);
   4006 }
   4007 
   4008 
   4009 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   4010   if (context_chain_length > 0) {
   4011     // Move up the chain of contexts to the context containing the slot.
   4012     movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   4013     for (int i = 1; i < context_chain_length; i++) {
   4014       movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   4015     }
   4016   } else {
   4017     // Slot is in the current function context.  Move it into the
   4018     // destination register in case we store into it (the write barrier
   4019     // cannot be allowed to destroy the context in rsi).
   4020     movq(dst, rsi);
   4021   }
   4022 
   4023   // We should not have found a with context by walking the context
   4024   // chain (i.e., the static scope chain and runtime context chain do
   4025   // not agree).  A variable occurring in such a scope should have
   4026   // slot type LOOKUP and not CONTEXT.
   4027   if (emit_debug_code()) {
   4028     CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
   4029                 Heap::kWithContextMapRootIndex);
   4030     Check(not_equal, "Variable resolved to with context.");
   4031   }
   4032 }
   4033 
   4034 
   4035 void MacroAssembler::LoadTransitionedArrayMapConditional(
   4036     ElementsKind expected_kind,
   4037     ElementsKind transitioned_kind,
   4038     Register map_in_out,
   4039     Register scratch,
   4040     Label* no_map_match) {
   4041   // Load the global or builtins object from the current context.
   4042   movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   4043   movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
   4044 
   4045   // Check that the function's map is the same as the expected cached map.
   4046   int expected_index =
   4047       Context::GetContextMapIndexFromElementsKind(expected_kind);
   4048   cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
   4049   j(not_equal, no_map_match);
   4050 
   4051   // Use the transitioned cached map.
   4052   int trans_index =
   4053       Context::GetContextMapIndexFromElementsKind(transitioned_kind);
   4054   movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
   4055 }
   4056 
   4057 
   4058 void MacroAssembler::LoadInitialArrayMap(
   4059     Register function_in, Register scratch, Register map_out) {
   4060   ASSERT(!function_in.is(map_out));
   4061   Label done;
   4062   movq(map_out, FieldOperand(function_in,
   4063                              JSFunction::kPrototypeOrInitialMapOffset));
   4064   if (!FLAG_smi_only_arrays) {
   4065     LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
   4066                                         FAST_ELEMENTS,
   4067                                         map_out,
   4068                                         scratch,
   4069                                         &done);
   4070   }
   4071   bind(&done);
   4072 }
   4073 
   4074 #ifdef _WIN64
   4075 static const int kRegisterPassedArguments = 4;
   4076 #else
   4077 static const int kRegisterPassedArguments = 6;
   4078 #endif
   4079 
   4080 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   4081   // Load the global or builtins object from the current context.
   4082   movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   4083   // Load the global context from the global or builtins object.
   4084   movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
   4085   // Load the function from the global context.
   4086   movq(function, Operand(function, Context::SlotOffset(index)));
   4087 }
   4088 
   4089 
   4090 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
   4091                                                   Register map) {
   4092   // Load the initial map.  The global functions all have initial maps.
   4093   movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   4094   if (emit_debug_code()) {
   4095     Label ok, fail;
   4096     CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
   4097     jmp(&ok);
   4098     bind(&fail);
   4099     Abort("Global functions must have initial map");
   4100     bind(&ok);
   4101   }
   4102 }
   4103 
   4104 
   4105 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
   4106   // On Windows 64 stack slots are reserved by the caller for all arguments
   4107   // including the ones passed in registers, and space is always allocated for
   4108   // the four register arguments even if the function takes fewer than four
   4109   // arguments.
   4110   // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
   4111   // and the caller does not reserve stack slots for them.
   4112   ASSERT(num_arguments >= 0);
   4113 #ifdef _WIN64
   4114   const int kMinimumStackSlots = kRegisterPassedArguments;
   4115   if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
   4116   return num_arguments;
   4117 #else
   4118   if (num_arguments < kRegisterPassedArguments) return 0;
   4119   return num_arguments - kRegisterPassedArguments;
   4120 #endif
   4121 }
   4122 
   4123 
   4124 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
   4125   int frame_alignment = OS::ActivationFrameAlignment();
   4126   ASSERT(frame_alignment != 0);
   4127   ASSERT(num_arguments >= 0);
   4128 
   4129   // Make stack end at alignment and allocate space for arguments and old rsp.
   4130   movq(kScratchRegister, rsp);
   4131   ASSERT(IsPowerOf2(frame_alignment));
   4132   int argument_slots_on_stack =
   4133       ArgumentStackSlotsForCFunctionCall(num_arguments);
   4134   subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
   4135   and_(rsp, Immediate(-frame_alignment));
   4136   movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
   4137 }
   4138 
   4139 
   4140 void MacroAssembler::CallCFunction(ExternalReference function,
   4141                                    int num_arguments) {
   4142   LoadAddress(rax, function);
   4143   CallCFunction(rax, num_arguments);
   4144 }
   4145 
   4146 
   4147 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
   4148   ASSERT(has_frame());
   4149   // Check stack alignment.
   4150   if (emit_debug_code()) {
   4151     CheckStackAlignment();
   4152   }
   4153 
   4154   call(function);
   4155   ASSERT(OS::ActivationFrameAlignment() != 0);
   4156   ASSERT(num_arguments >= 0);
   4157   int argument_slots_on_stack =
   4158       ArgumentStackSlotsForCFunctionCall(num_arguments);
   4159   movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
   4160 }
   4161 
   4162 
   4163 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
   4164   if (r1.is(r2)) return true;
   4165   if (r1.is(r3)) return true;
   4166   if (r1.is(r4)) return true;
   4167   if (r2.is(r3)) return true;
   4168   if (r2.is(r4)) return true;
   4169   if (r3.is(r4)) return true;
   4170   return false;
   4171 }
   4172 
   4173 
   4174 CodePatcher::CodePatcher(byte* address, int size)
   4175     : address_(address),
   4176       size_(size),
   4177       masm_(Isolate::Current(), address, size + Assembler::kGap) {
   4178   // Create a new macro assembler pointing to the address of the code to patch.
   4179   // The size is adjusted with kGap on order for the assembler to generate size
   4180   // bytes of instructions without failing with buffer size constraints.
   4181   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   4182 }
   4183 
   4184 
   4185 CodePatcher::~CodePatcher() {
   4186   // Indicate that code has changed.
   4187   CPU::FlushICache(address_, size_);
   4188 
   4189   // Check that the code was patched as expected.
   4190   ASSERT(masm_.pc_ == address_ + size_);
   4191   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   4192 }
   4193 
   4194 
   4195 void MacroAssembler::CheckPageFlag(
   4196     Register object,
   4197     Register scratch,
   4198     int mask,
   4199     Condition cc,
   4200     Label* condition_met,
   4201     Label::Distance condition_met_distance) {
   4202   ASSERT(cc == zero || cc == not_zero);
   4203   if (scratch.is(object)) {
   4204     and_(scratch, Immediate(~Page::kPageAlignmentMask));
   4205   } else {
   4206     movq(scratch, Immediate(~Page::kPageAlignmentMask));
   4207     and_(scratch, object);
   4208   }
   4209   if (mask < (1 << kBitsPerByte)) {
   4210     testb(Operand(scratch, MemoryChunk::kFlagsOffset),
   4211           Immediate(static_cast<uint8_t>(mask)));
   4212   } else {
   4213     testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
   4214   }
   4215   j(cc, condition_met, condition_met_distance);
   4216 }
   4217 
   4218 
   4219 void MacroAssembler::JumpIfBlack(Register object,
   4220                                  Register bitmap_scratch,
   4221                                  Register mask_scratch,
   4222                                  Label* on_black,
   4223                                  Label::Distance on_black_distance) {
   4224   ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
   4225   GetMarkBits(object, bitmap_scratch, mask_scratch);
   4226 
   4227   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   4228   // The mask_scratch register contains a 1 at the position of the first bit
   4229   // and a 0 at all other positions, including the position of the second bit.
   4230   movq(rcx, mask_scratch);
   4231   // Make rcx into a mask that covers both marking bits using the operation
   4232   // rcx = mask | (mask << 1).
   4233   lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
   4234   // Note that we are using a 4-byte aligned 8-byte load.
   4235   and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
   4236   cmpq(mask_scratch, rcx);
   4237   j(equal, on_black, on_black_distance);
   4238 }
   4239 
   4240 
   4241 // Detect some, but not all, common pointer-free objects.  This is used by the
   4242 // incremental write barrier which doesn't care about oddballs (they are always
   4243 // marked black immediately so this code is not hit).
   4244 void MacroAssembler::JumpIfDataObject(
   4245     Register value,
   4246     Register scratch,
   4247     Label* not_data_object,
   4248     Label::Distance not_data_object_distance) {
   4249   Label is_data_object;
   4250   movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
   4251   CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4252   j(equal, &is_data_object, Label::kNear);
   4253   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
   4254   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   4255   // If it's a string and it's not a cons string then it's an object containing
   4256   // no GC pointers.
   4257   testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
   4258         Immediate(kIsIndirectStringMask | kIsNotStringMask));
   4259   j(not_zero, not_data_object, not_data_object_distance);
   4260   bind(&is_data_object);
   4261 }
   4262 
   4263 
   4264 void MacroAssembler::GetMarkBits(Register addr_reg,
   4265                                  Register bitmap_reg,
   4266                                  Register mask_reg) {
   4267   ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
   4268   movq(bitmap_reg, addr_reg);
   4269   // Sign extended 32 bit immediate.
   4270   and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
   4271   movq(rcx, addr_reg);
   4272   int shift =
   4273       Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
   4274   shrl(rcx, Immediate(shift));
   4275   and_(rcx,
   4276        Immediate((Page::kPageAlignmentMask >> shift) &
   4277                  ~(Bitmap::kBytesPerCell - 1)));
   4278 
   4279   addq(bitmap_reg, rcx);
   4280   movq(rcx, addr_reg);
   4281   shrl(rcx, Immediate(kPointerSizeLog2));
   4282   and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
   4283   movl(mask_reg, Immediate(1));
   4284   shl_cl(mask_reg);
   4285 }
   4286 
   4287 
   4288 void MacroAssembler::EnsureNotWhite(
   4289     Register value,
   4290     Register bitmap_scratch,
   4291     Register mask_scratch,
   4292     Label* value_is_white_and_not_data,
   4293     Label::Distance distance) {
   4294   ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
   4295   GetMarkBits(value, bitmap_scratch, mask_scratch);
   4296 
   4297   // If the value is black or grey we don't need to do anything.
   4298   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   4299   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   4300   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
   4301   ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
   4302 
   4303   Label done;
   4304 
   4305   // Since both black and grey have a 1 in the first position and white does
   4306   // not have a 1 there we only need to check one bit.
   4307   testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
   4308   j(not_zero, &done, Label::kNear);
   4309 
   4310   if (FLAG_debug_code) {
   4311     // Check for impossible bit pattern.
   4312     Label ok;
   4313     push(mask_scratch);
   4314     // shl.  May overflow making the check conservative.
   4315     addq(mask_scratch, mask_scratch);
   4316     testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
   4317     j(zero, &ok, Label::kNear);
   4318     int3();
   4319     bind(&ok);
   4320     pop(mask_scratch);
   4321   }
   4322 
   4323   // Value is white.  We check whether it is data that doesn't need scanning.
   4324   // Currently only checks for HeapNumber and non-cons strings.
   4325   Register map = rcx;  // Holds map while checking type.
   4326   Register length = rcx;  // Holds length of object after checking type.
   4327   Label not_heap_number;
   4328   Label is_data_object;
   4329 
   4330   // Check for heap-number
   4331   movq(map, FieldOperand(value, HeapObject::kMapOffset));
   4332   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   4333   j(not_equal, &not_heap_number, Label::kNear);
   4334   movq(length, Immediate(HeapNumber::kSize));
   4335   jmp(&is_data_object, Label::kNear);
   4336 
   4337   bind(&not_heap_number);
   4338   // Check for strings.
   4339   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
   4340   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   4341   // If it's a string and it's not a cons string then it's an object containing
   4342   // no GC pointers.
   4343   Register instance_type = rcx;
   4344   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
   4345   testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
   4346   j(not_zero, value_is_white_and_not_data);
   4347   // It's a non-indirect (non-cons and non-slice) string.
   4348   // If it's external, the length is just ExternalString::kSize.
   4349   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
   4350   Label not_external;
   4351   // External strings are the only ones with the kExternalStringTag bit
   4352   // set.
   4353   ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
   4354   ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
   4355   testb(instance_type, Immediate(kExternalStringTag));
   4356   j(zero, &not_external, Label::kNear);
   4357   movq(length, Immediate(ExternalString::kSize));
   4358   jmp(&is_data_object, Label::kNear);
   4359 
   4360   bind(&not_external);
   4361   // Sequential string, either ASCII or UC16.
   4362   ASSERT(kAsciiStringTag == 0x04);
   4363   and_(length, Immediate(kStringEncodingMask));
   4364   xor_(length, Immediate(kStringEncodingMask));
   4365   addq(length, Immediate(0x04));
   4366   // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
   4367   imul(length, FieldOperand(value, String::kLengthOffset));
   4368   shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
   4369   addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
   4370   and_(length, Immediate(~kObjectAlignmentMask));
   4371 
   4372   bind(&is_data_object);
   4373   // Value is a data object, and it is white.  Mark it black.  Since we know
   4374   // that the object is white we can make it black by flipping one bit.
   4375   or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
   4376 
   4377   and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
   4378   addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
   4379 
   4380   bind(&done);
   4381 }
   4382 
   4383 
   4384 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
   4385   Label next;
   4386   Register empty_fixed_array_value = r8;
   4387   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   4388   Register empty_descriptor_array_value = r9;
   4389   LoadRoot(empty_descriptor_array_value,
   4390               Heap::kEmptyDescriptorArrayRootIndex);
   4391   movq(rcx, rax);
   4392   bind(&next);
   4393 
   4394   // Check that there are no elements.  Register rcx contains the
   4395   // current JS object we've reached through the prototype chain.
   4396   cmpq(empty_fixed_array_value,
   4397        FieldOperand(rcx, JSObject::kElementsOffset));
   4398   j(not_equal, call_runtime);
   4399 
   4400   // Check that instance descriptors are not empty so that we can
   4401   // check for an enum cache.  Leave the map in rbx for the subsequent
   4402   // prototype load.
   4403   movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
   4404   movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
   4405   JumpIfSmi(rdx, call_runtime);
   4406 
   4407   // Check that there is an enum cache in the non-empty instance
   4408   // descriptors (rdx).  This is the case if the next enumeration
   4409   // index field does not contain a smi.
   4410   movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
   4411   JumpIfSmi(rdx, call_runtime);
   4412 
   4413   // For all objects but the receiver, check that the cache is empty.
   4414   Label check_prototype;
   4415   cmpq(rcx, rax);
   4416   j(equal, &check_prototype, Label::kNear);
   4417   movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   4418   cmpq(rdx, empty_fixed_array_value);
   4419   j(not_equal, call_runtime);
   4420 
   4421   // Load the prototype from the map and loop if non-null.
   4422   bind(&check_prototype);
   4423   movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
   4424   cmpq(rcx, null_value);
   4425   j(not_equal, &next);
   4426 }
   4427 
   4428 
   4429 } }  // namespace v8::internal
   4430 
   4431 #endif  // V8_TARGET_ARCH_X64
   4432