Home | History | Annotate | Download | only in x64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_X64
      6 
      7 #include "src/base/bits.h"
      8 #include "src/base/division-by-constant.h"
      9 #include "src/bootstrapper.h"
     10 #include "src/codegen.h"
     11 #include "src/debug/debug.h"
     12 #include "src/heap/heap.h"
     13 #include "src/register-configuration.h"
     14 #include "src/x64/assembler-x64.h"
     15 #include "src/x64/macro-assembler-x64.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
     21                                CodeObjectRequired create_code_object)
     22     : Assembler(arg_isolate, buffer, size),
     23       generating_stub_(false),
     24       has_frame_(false),
     25       root_array_available_(true) {
     26   if (create_code_object == CodeObjectRequired::kYes) {
     27     code_object_ =
     28         Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
     29   }
     30 }
     31 
     32 
     33 static const int64_t kInvalidRootRegisterDelta = -1;
     34 
     35 
     36 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
     37   if (predictable_code_size() &&
     38       (other.address() < reinterpret_cast<Address>(isolate()) ||
     39        other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
     40     return kInvalidRootRegisterDelta;
     41   }
     42   Address roots_register_value = kRootRegisterBias +
     43       reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
     44 
     45   int64_t delta = kInvalidRootRegisterDelta;  // Bogus initialization.
     46   if (kPointerSize == kInt64Size) {
     47     delta = other.address() - roots_register_value;
     48   } else {
     49     // For x32, zero extend the address to 64-bit and calculate the delta.
     50     uint64_t o = static_cast<uint32_t>(
     51         reinterpret_cast<intptr_t>(other.address()));
     52     uint64_t r = static_cast<uint32_t>(
     53         reinterpret_cast<intptr_t>(roots_register_value));
     54     delta = o - r;
     55   }
     56   return delta;
     57 }
     58 
     59 
     60 Operand MacroAssembler::ExternalOperand(ExternalReference target,
     61                                         Register scratch) {
     62   if (root_array_available_ && !serializer_enabled()) {
     63     int64_t delta = RootRegisterDelta(target);
     64     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
     65       return Operand(kRootRegister, static_cast<int32_t>(delta));
     66     }
     67   }
     68   Move(scratch, target);
     69   return Operand(scratch, 0);
     70 }
     71 
     72 
     73 void MacroAssembler::Load(Register destination, ExternalReference source) {
     74   if (root_array_available_ && !serializer_enabled()) {
     75     int64_t delta = RootRegisterDelta(source);
     76     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
     77       movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
     78       return;
     79     }
     80   }
     81   // Safe code.
     82   if (destination.is(rax)) {
     83     load_rax(source);
     84   } else {
     85     Move(kScratchRegister, source);
     86     movp(destination, Operand(kScratchRegister, 0));
     87   }
     88 }
     89 
     90 
     91 void MacroAssembler::Store(ExternalReference destination, Register source) {
     92   if (root_array_available_ && !serializer_enabled()) {
     93     int64_t delta = RootRegisterDelta(destination);
     94     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
     95       movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
     96       return;
     97     }
     98   }
     99   // Safe code.
    100   if (source.is(rax)) {
    101     store_rax(destination);
    102   } else {
    103     Move(kScratchRegister, destination);
    104     movp(Operand(kScratchRegister, 0), source);
    105   }
    106 }
    107 
    108 
    109 void MacroAssembler::LoadAddress(Register destination,
    110                                  ExternalReference source) {
    111   if (root_array_available_ && !serializer_enabled()) {
    112     int64_t delta = RootRegisterDelta(source);
    113     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
    114       leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
    115       return;
    116     }
    117   }
    118   // Safe code.
    119   Move(destination, source);
    120 }
    121 
    122 
    123 int MacroAssembler::LoadAddressSize(ExternalReference source) {
    124   if (root_array_available_ && !serializer_enabled()) {
    125     // This calculation depends on the internals of LoadAddress.
    126     // It's correctness is ensured by the asserts in the Call
    127     // instruction below.
    128     int64_t delta = RootRegisterDelta(source);
    129     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
    130       // Operand is leap(scratch, Operand(kRootRegister, delta));
    131       // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
    132       int size = 4;
    133       if (!is_int8(static_cast<int32_t>(delta))) {
    134         size += 3;  // Need full four-byte displacement in lea.
    135       }
    136       return size;
    137     }
    138   }
    139   // Size of movp(destination, src);
    140   return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
    141 }
    142 
    143 
    144 void MacroAssembler::PushAddress(ExternalReference source) {
    145   int64_t address = reinterpret_cast<int64_t>(source.address());
    146   if (is_int32(address) && !serializer_enabled()) {
    147     if (emit_debug_code()) {
    148       Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
    149     }
    150     Push(Immediate(static_cast<int32_t>(address)));
    151     return;
    152   }
    153   LoadAddress(kScratchRegister, source);
    154   Push(kScratchRegister);
    155 }
    156 
    157 
    158 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
    159   DCHECK(root_array_available_);
    160   movp(destination, Operand(kRootRegister,
    161                             (index << kPointerSizeLog2) - kRootRegisterBias));
    162 }
    163 
    164 
    165 void MacroAssembler::LoadRootIndexed(Register destination,
    166                                      Register variable_offset,
    167                                      int fixed_offset) {
    168   DCHECK(root_array_available_);
    169   movp(destination,
    170        Operand(kRootRegister,
    171                variable_offset, times_pointer_size,
    172                (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
    173 }
    174 
    175 
    176 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
    177   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
    178   DCHECK(root_array_available_);
    179   movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
    180        source);
    181 }
    182 
    183 
    184 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
    185   DCHECK(root_array_available_);
    186   Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
    187 }
    188 
    189 
    190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
    191   DCHECK(root_array_available_);
    192   cmpp(with, Operand(kRootRegister,
    193                      (index << kPointerSizeLog2) - kRootRegisterBias));
    194 }
    195 
    196 
    197 void MacroAssembler::CompareRoot(const Operand& with,
    198                                  Heap::RootListIndex index) {
    199   DCHECK(root_array_available_);
    200   DCHECK(!with.AddressUsesRegister(kScratchRegister));
    201   LoadRoot(kScratchRegister, index);
    202   cmpp(with, kScratchRegister);
    203 }
    204 
    205 
    206 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
    207                                          Register addr,
    208                                          Register scratch,
    209                                          SaveFPRegsMode save_fp,
    210                                          RememberedSetFinalAction and_then) {
    211   if (emit_debug_code()) {
    212     Label ok;
    213     JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
    214     int3();
    215     bind(&ok);
    216   }
    217   // Load store buffer top.
    218   ExternalReference store_buffer =
    219       ExternalReference::store_buffer_top(isolate());
    220   movp(scratch, ExternalOperand(store_buffer));
    221   // Store pointer to buffer.
    222   movp(Operand(scratch, 0), addr);
    223   // Increment buffer top.
    224   addp(scratch, Immediate(kPointerSize));
    225   // Write back new top of buffer.
    226   movp(ExternalOperand(store_buffer), scratch);
    227   // Call stub on end of buffer.
    228   Label done;
    229   // Check for end of buffer.
    230   testp(scratch, Immediate(StoreBuffer::kStoreBufferMask));
    231   if (and_then == kReturnAtEnd) {
    232     Label buffer_overflowed;
    233     j(equal, &buffer_overflowed, Label::kNear);
    234     ret(0);
    235     bind(&buffer_overflowed);
    236   } else {
    237     DCHECK(and_then == kFallThroughAtEnd);
    238     j(not_equal, &done, Label::kNear);
    239   }
    240   StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
    241   CallStub(&store_buffer_overflow);
    242   if (and_then == kReturnAtEnd) {
    243     ret(0);
    244   } else {
    245     DCHECK(and_then == kFallThroughAtEnd);
    246     bind(&done);
    247   }
    248 }
    249 
    250 
    251 void MacroAssembler::InNewSpace(Register object,
    252                                 Register scratch,
    253                                 Condition cc,
    254                                 Label* branch,
    255                                 Label::Distance distance) {
    256   const int mask =
    257       (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
    258   CheckPageFlag(object, scratch, mask, cc, branch, distance);
    259 }
    260 
    261 
    262 void MacroAssembler::RecordWriteField(
    263     Register object,
    264     int offset,
    265     Register value,
    266     Register dst,
    267     SaveFPRegsMode save_fp,
    268     RememberedSetAction remembered_set_action,
    269     SmiCheck smi_check,
    270     PointersToHereCheck pointers_to_here_check_for_value) {
    271   // First, check if a write barrier is even needed. The tests below
    272   // catch stores of Smis.
    273   Label done;
    274 
    275   // Skip barrier if writing a smi.
    276   if (smi_check == INLINE_SMI_CHECK) {
    277     JumpIfSmi(value, &done);
    278   }
    279 
    280   // Although the object register is tagged, the offset is relative to the start
    281   // of the object, so so offset must be a multiple of kPointerSize.
    282   DCHECK(IsAligned(offset, kPointerSize));
    283 
    284   leap(dst, FieldOperand(object, offset));
    285   if (emit_debug_code()) {
    286     Label ok;
    287     testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
    288     j(zero, &ok, Label::kNear);
    289     int3();
    290     bind(&ok);
    291   }
    292 
    293   RecordWrite(object, dst, value, save_fp, remembered_set_action,
    294               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
    295 
    296   bind(&done);
    297 
    298   // Clobber clobbered input registers when running with the debug-code flag
    299   // turned on to provoke errors.
    300   if (emit_debug_code()) {
    301     Move(value, kZapValue, Assembler::RelocInfoNone());
    302     Move(dst, kZapValue, Assembler::RelocInfoNone());
    303   }
    304 }
    305 
    306 
    307 void MacroAssembler::RecordWriteArray(
    308     Register object,
    309     Register value,
    310     Register index,
    311     SaveFPRegsMode save_fp,
    312     RememberedSetAction remembered_set_action,
    313     SmiCheck smi_check,
    314     PointersToHereCheck pointers_to_here_check_for_value) {
    315   // First, check if a write barrier is even needed. The tests below
    316   // catch stores of Smis.
    317   Label done;
    318 
    319   // Skip barrier if writing a smi.
    320   if (smi_check == INLINE_SMI_CHECK) {
    321     JumpIfSmi(value, &done);
    322   }
    323 
    324   // Array access: calculate the destination address. Index is not a smi.
    325   Register dst = index;
    326   leap(dst, Operand(object, index, times_pointer_size,
    327                    FixedArray::kHeaderSize - kHeapObjectTag));
    328 
    329   RecordWrite(object, dst, value, save_fp, remembered_set_action,
    330               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
    331 
    332   bind(&done);
    333 
    334   // Clobber clobbered input registers when running with the debug-code flag
    335   // turned on to provoke errors.
    336   if (emit_debug_code()) {
    337     Move(value, kZapValue, Assembler::RelocInfoNone());
    338     Move(index, kZapValue, Assembler::RelocInfoNone());
    339   }
    340 }
    341 
    342 
    343 void MacroAssembler::RecordWriteForMap(Register object,
    344                                        Register map,
    345                                        Register dst,
    346                                        SaveFPRegsMode fp_mode) {
    347   DCHECK(!object.is(kScratchRegister));
    348   DCHECK(!object.is(map));
    349   DCHECK(!object.is(dst));
    350   DCHECK(!map.is(dst));
    351   AssertNotSmi(object);
    352 
    353   if (emit_debug_code()) {
    354     Label ok;
    355     if (map.is(kScratchRegister)) pushq(map);
    356     CompareMap(map, isolate()->factory()->meta_map());
    357     if (map.is(kScratchRegister)) popq(map);
    358     j(equal, &ok, Label::kNear);
    359     int3();
    360     bind(&ok);
    361   }
    362 
    363   if (!FLAG_incremental_marking) {
    364     return;
    365   }
    366 
    367   if (emit_debug_code()) {
    368     Label ok;
    369     if (map.is(kScratchRegister)) pushq(map);
    370     cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
    371     if (map.is(kScratchRegister)) popq(map);
    372     j(equal, &ok, Label::kNear);
    373     int3();
    374     bind(&ok);
    375   }
    376 
    377   // Compute the address.
    378   leap(dst, FieldOperand(object, HeapObject::kMapOffset));
    379 
    380   // First, check if a write barrier is even needed. The tests below
    381   // catch stores of smis and stores into the young generation.
    382   Label done;
    383 
    384   // A single check of the map's pages interesting flag suffices, since it is
    385   // only set during incremental collection, and then it's also guaranteed that
    386   // the from object's page's interesting flag is also set.  This optimization
    387   // relies on the fact that maps can never be in new space.
    388   CheckPageFlag(map,
    389                 map,  // Used as scratch.
    390                 MemoryChunk::kPointersToHereAreInterestingMask,
    391                 zero,
    392                 &done,
    393                 Label::kNear);
    394 
    395   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
    396                        fp_mode);
    397   CallStub(&stub);
    398 
    399   bind(&done);
    400 
    401   // Count number of write barriers in generated code.
    402   isolate()->counters()->write_barriers_static()->Increment();
    403   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
    404 
    405   // Clobber clobbered registers when running with the debug-code flag
    406   // turned on to provoke errors.
    407   if (emit_debug_code()) {
    408     Move(dst, kZapValue, Assembler::RelocInfoNone());
    409     Move(map, kZapValue, Assembler::RelocInfoNone());
    410   }
    411 }
    412 
    413 
    414 void MacroAssembler::RecordWrite(
    415     Register object,
    416     Register address,
    417     Register value,
    418     SaveFPRegsMode fp_mode,
    419     RememberedSetAction remembered_set_action,
    420     SmiCheck smi_check,
    421     PointersToHereCheck pointers_to_here_check_for_value) {
    422   DCHECK(!object.is(value));
    423   DCHECK(!object.is(address));
    424   DCHECK(!value.is(address));
    425   AssertNotSmi(object);
    426 
    427   if (remembered_set_action == OMIT_REMEMBERED_SET &&
    428       !FLAG_incremental_marking) {
    429     return;
    430   }
    431 
    432   if (emit_debug_code()) {
    433     Label ok;
    434     cmpp(value, Operand(address, 0));
    435     j(equal, &ok, Label::kNear);
    436     int3();
    437     bind(&ok);
    438   }
    439 
    440   // First, check if a write barrier is even needed. The tests below
    441   // catch stores of smis and stores into the young generation.
    442   Label done;
    443 
    444   if (smi_check == INLINE_SMI_CHECK) {
    445     // Skip barrier if writing a smi.
    446     JumpIfSmi(value, &done);
    447   }
    448 
    449   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
    450     CheckPageFlag(value,
    451                   value,  // Used as scratch.
    452                   MemoryChunk::kPointersToHereAreInterestingMask,
    453                   zero,
    454                   &done,
    455                   Label::kNear);
    456   }
    457 
    458   CheckPageFlag(object,
    459                 value,  // Used as scratch.
    460                 MemoryChunk::kPointersFromHereAreInterestingMask,
    461                 zero,
    462                 &done,
    463                 Label::kNear);
    464 
    465   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
    466                        fp_mode);
    467   CallStub(&stub);
    468 
    469   bind(&done);
    470 
    471   // Count number of write barriers in generated code.
    472   isolate()->counters()->write_barriers_static()->Increment();
    473   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
    474 
    475   // Clobber clobbered registers when running with the debug-code flag
    476   // turned on to provoke errors.
    477   if (emit_debug_code()) {
    478     Move(address, kZapValue, Assembler::RelocInfoNone());
    479     Move(value, kZapValue, Assembler::RelocInfoNone());
    480   }
    481 }
    482 
    483 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
    484                                                Register code_entry,
    485                                                Register scratch) {
    486   const int offset = JSFunction::kCodeEntryOffset;
    487 
    488   // The input registers are fixed to make calling the C write barrier function
    489   // easier.
    490   DCHECK(js_function.is(rdi));
    491   DCHECK(code_entry.is(rcx));
    492   DCHECK(scratch.is(r15));
    493 
    494   // Since a code entry (value) is always in old space, we don't need to update
    495   // remembered set. If incremental marking is off, there is nothing for us to
    496   // do.
    497   if (!FLAG_incremental_marking) return;
    498 
    499   AssertNotSmi(js_function);
    500 
    501   if (emit_debug_code()) {
    502     Label ok;
    503     leap(scratch, FieldOperand(js_function, offset));
    504     cmpp(code_entry, Operand(scratch, 0));
    505     j(equal, &ok, Label::kNear);
    506     int3();
    507     bind(&ok);
    508   }
    509 
    510   // First, check if a write barrier is even needed. The tests below
    511   // catch stores of Smis and stores into young gen.
    512   Label done;
    513 
    514   CheckPageFlag(code_entry, scratch,
    515                 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
    516                 Label::kNear);
    517   CheckPageFlag(js_function, scratch,
    518                 MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
    519                 Label::kNear);
    520 
    521   // Save input registers.
    522   Push(js_function);
    523   Push(code_entry);
    524 
    525   const Register dst = scratch;
    526   leap(dst, FieldOperand(js_function, offset));
    527 
    528   // Save caller-saved registers.
    529   PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
    530 
    531   int argument_count = 3;
    532   PrepareCallCFunction(argument_count);
    533 
    534   // Load the argument registers.
    535   if (arg_reg_1.is(rcx)) {
    536     // Windows calling convention.
    537     DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
    538 
    539     movp(arg_reg_1, js_function);  // rcx gets rdi.
    540     movp(arg_reg_2, dst);          // rdx gets r15.
    541   } else {
    542     // AMD64 calling convention.
    543     DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
    544 
    545     // rdi is already loaded with js_function.
    546     movp(arg_reg_2, dst);  // rsi gets r15.
    547   }
    548   Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
    549 
    550   {
    551     AllowExternalCallThatCantCauseGC scope(this);
    552     CallCFunction(
    553         ExternalReference::incremental_marking_record_write_code_entry_function(
    554             isolate()),
    555         argument_count);
    556   }
    557 
    558   // Restore caller-saved registers.
    559   PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
    560 
    561   // Restore input registers.
    562   Pop(code_entry);
    563   Pop(js_function);
    564 
    565   bind(&done);
    566 }
    567 
    568 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
    569   if (emit_debug_code()) Check(cc, reason);
    570 }
    571 
    572 
    573 void MacroAssembler::AssertFastElements(Register elements) {
    574   if (emit_debug_code()) {
    575     Label ok;
    576     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    577                 Heap::kFixedArrayMapRootIndex);
    578     j(equal, &ok, Label::kNear);
    579     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    580                 Heap::kFixedDoubleArrayMapRootIndex);
    581     j(equal, &ok, Label::kNear);
    582     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
    583                 Heap::kFixedCOWArrayMapRootIndex);
    584     j(equal, &ok, Label::kNear);
    585     Abort(kJSObjectWithFastElementsMapHasSlowElements);
    586     bind(&ok);
    587   }
    588 }
    589 
    590 
    591 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
    592   Label L;
    593   j(cc, &L, Label::kNear);
    594   Abort(reason);
    595   // Control will not return here.
    596   bind(&L);
    597 }
    598 
    599 
    600 void MacroAssembler::CheckStackAlignment() {
    601   int frame_alignment = base::OS::ActivationFrameAlignment();
    602   int frame_alignment_mask = frame_alignment - 1;
    603   if (frame_alignment > kPointerSize) {
    604     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
    605     Label alignment_as_expected;
    606     testp(rsp, Immediate(frame_alignment_mask));
    607     j(zero, &alignment_as_expected, Label::kNear);
    608     // Abort if stack is not aligned.
    609     int3();
    610     bind(&alignment_as_expected);
    611   }
    612 }
    613 
    614 
    615 void MacroAssembler::NegativeZeroTest(Register result,
    616                                       Register op,
    617                                       Label* then_label) {
    618   Label ok;
    619   testl(result, result);
    620   j(not_zero, &ok, Label::kNear);
    621   testl(op, op);
    622   j(sign, then_label);
    623   bind(&ok);
    624 }
    625 
    626 
    627 void MacroAssembler::Abort(BailoutReason reason) {
    628 #ifdef DEBUG
    629   const char* msg = GetBailoutReason(reason);
    630   if (msg != NULL) {
    631     RecordComment("Abort message: ");
    632     RecordComment(msg);
    633   }
    634 
    635   if (FLAG_trap_on_abort) {
    636     int3();
    637     return;
    638   }
    639 #endif
    640 
    641   Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
    642        Assembler::RelocInfoNone());
    643   Push(kScratchRegister);
    644 
    645   if (!has_frame_) {
    646     // We don't actually want to generate a pile of code for this, so just
    647     // claim there is a stack frame, without generating one.
    648     FrameScope scope(this, StackFrame::NONE);
    649     CallRuntime(Runtime::kAbort);
    650   } else {
    651     CallRuntime(Runtime::kAbort);
    652   }
    653   // Control will not return here.
    654   int3();
    655 }
    656 
    657 
    658 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
    659   DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
    660   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
    661 }
    662 
    663 
    664 void MacroAssembler::TailCallStub(CodeStub* stub) {
    665   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
    666 }
    667 
    668 
    669 void MacroAssembler::StubReturn(int argc) {
    670   DCHECK(argc >= 1 && generating_stub());
    671   ret((argc - 1) * kPointerSize);
    672 }
    673 
    674 
    675 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
    676   return has_frame_ || !stub->SometimesSetsUpAFrame();
    677 }
    678 
    679 
    680 void MacroAssembler::IndexFromHash(Register hash, Register index) {
    681   // The assert checks that the constants for the maximum number of digits
    682   // for an array index cached in the hash field and the number of bits
    683   // reserved for it does not conflict.
    684   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
    685          (1 << String::kArrayIndexValueBits));
    686   if (!hash.is(index)) {
    687     movl(index, hash);
    688   }
    689   DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
    690 }
    691 
    692 
    693 void MacroAssembler::CallRuntime(const Runtime::Function* f,
    694                                  int num_arguments,
    695                                  SaveFPRegsMode save_doubles) {
    696   // If the expected number of arguments of the runtime function is
    697   // constant, we check that the actual number of arguments match the
    698   // expectation.
    699   CHECK(f->nargs < 0 || f->nargs == num_arguments);
    700 
    701   // TODO(1236192): Most runtime routines don't need the number of
    702   // arguments passed in because it is constant. At some point we
    703   // should remove this need and make the runtime routine entry code
    704   // smarter.
    705   Set(rax, num_arguments);
    706   LoadAddress(rbx, ExternalReference(f, isolate()));
    707   CEntryStub ces(isolate(), f->result_size, save_doubles);
    708   CallStub(&ces);
    709 }
    710 
    711 
    712 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
    713                                            int num_arguments) {
    714   Set(rax, num_arguments);
    715   LoadAddress(rbx, ext);
    716 
    717   CEntryStub stub(isolate(), 1);
    718   CallStub(&stub);
    719 }
    720 
    721 
    722 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
    723   // ----------- S t a t e -------------
    724   //  -- rsp[0]                 : return address
    725   //  -- rsp[8]                 : argument num_arguments - 1
    726   //  ...
    727   //  -- rsp[8 * num_arguments] : argument 0 (receiver)
    728   //
    729   //  For runtime functions with variable arguments:
    730   //  -- rax                    : number of  arguments
    731   // -----------------------------------
    732 
    733   const Runtime::Function* function = Runtime::FunctionForId(fid);
    734   DCHECK_EQ(1, function->result_size);
    735   if (function->nargs >= 0) {
    736     Set(rax, function->nargs);
    737   }
    738   JumpToExternalReference(ExternalReference(fid, isolate()));
    739 }
    740 
    741 
    742 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
    743   // Set the entry point and jump to the C entry runtime stub.
    744   LoadAddress(rbx, ext);
    745   CEntryStub ces(isolate(), 1);
    746   jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
    747 }
    748 
    749 
    750 #define REG(Name) \
    751   { Register::kCode_##Name }
    752 
    753 static const Register saved_regs[] = {
    754   REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
    755   REG(r9), REG(r10), REG(r11)
    756 };
    757 
    758 #undef REG
    759 
    760 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
    761 
    762 
    763 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
    764                                      Register exclusion1,
    765                                      Register exclusion2,
    766                                      Register exclusion3) {
    767   // We don't allow a GC during a store buffer overflow so there is no need to
    768   // store the registers in any particular way, but we do have to store and
    769   // restore them.
    770   for (int i = 0; i < kNumberOfSavedRegs; i++) {
    771     Register reg = saved_regs[i];
    772     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
    773       pushq(reg);
    774     }
    775   }
    776   // R12 to r15 are callee save on all platforms.
    777   if (fp_mode == kSaveFPRegs) {
    778     subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
    779     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
    780       XMMRegister reg = XMMRegister::from_code(i);
    781       Movsd(Operand(rsp, i * kDoubleSize), reg);
    782     }
    783   }
    784 }
    785 
    786 
    787 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
    788                                     Register exclusion1,
    789                                     Register exclusion2,
    790                                     Register exclusion3) {
    791   if (fp_mode == kSaveFPRegs) {
    792     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
    793       XMMRegister reg = XMMRegister::from_code(i);
    794       Movsd(reg, Operand(rsp, i * kDoubleSize));
    795     }
    796     addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
    797   }
    798   for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
    799     Register reg = saved_regs[i];
    800     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
    801       popq(reg);
    802     }
    803   }
    804 }
    805 
    806 
    807 void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
    808   if (CpuFeatures::IsSupported(AVX)) {
    809     CpuFeatureScope scope(this, AVX);
    810     vcvtss2sd(dst, src, src);
    811   } else {
    812     cvtss2sd(dst, src);
    813   }
    814 }
    815 
    816 
    817 void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
    818   if (CpuFeatures::IsSupported(AVX)) {
    819     CpuFeatureScope scope(this, AVX);
    820     vcvtss2sd(dst, dst, src);
    821   } else {
    822     cvtss2sd(dst, src);
    823   }
    824 }
    825 
    826 
    827 void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
    828   if (CpuFeatures::IsSupported(AVX)) {
    829     CpuFeatureScope scope(this, AVX);
    830     vcvtsd2ss(dst, src, src);
    831   } else {
    832     cvtsd2ss(dst, src);
    833   }
    834 }
    835 
    836 
    837 void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
    838   if (CpuFeatures::IsSupported(AVX)) {
    839     CpuFeatureScope scope(this, AVX);
    840     vcvtsd2ss(dst, dst, src);
    841   } else {
    842     cvtsd2ss(dst, src);
    843   }
    844 }
    845 
    846 
    847 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
    848   if (CpuFeatures::IsSupported(AVX)) {
    849     CpuFeatureScope scope(this, AVX);
    850     vxorpd(dst, dst, dst);
    851     vcvtlsi2sd(dst, dst, src);
    852   } else {
    853     xorpd(dst, dst);
    854     cvtlsi2sd(dst, src);
    855   }
    856 }
    857 
    858 
    859 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
    860   if (CpuFeatures::IsSupported(AVX)) {
    861     CpuFeatureScope scope(this, AVX);
    862     vxorpd(dst, dst, dst);
    863     vcvtlsi2sd(dst, dst, src);
    864   } else {
    865     xorpd(dst, dst);
    866     cvtlsi2sd(dst, src);
    867   }
    868 }
    869 
    870 
    871 void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
    872   if (CpuFeatures::IsSupported(AVX)) {
    873     CpuFeatureScope scope(this, AVX);
    874     vxorps(dst, dst, dst);
    875     vcvtlsi2ss(dst, dst, src);
    876   } else {
    877     xorps(dst, dst);
    878     cvtlsi2ss(dst, src);
    879   }
    880 }
    881 
    882 
    883 void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
    884   if (CpuFeatures::IsSupported(AVX)) {
    885     CpuFeatureScope scope(this, AVX);
    886     vxorps(dst, dst, dst);
    887     vcvtlsi2ss(dst, dst, src);
    888   } else {
    889     xorps(dst, dst);
    890     cvtlsi2ss(dst, src);
    891   }
    892 }
    893 
    894 
    895 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
    896   if (CpuFeatures::IsSupported(AVX)) {
    897     CpuFeatureScope scope(this, AVX);
    898     vxorps(dst, dst, dst);
    899     vcvtqsi2ss(dst, dst, src);
    900   } else {
    901     xorps(dst, dst);
    902     cvtqsi2ss(dst, src);
    903   }
    904 }
    905 
    906 
    907 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
    908   if (CpuFeatures::IsSupported(AVX)) {
    909     CpuFeatureScope scope(this, AVX);
    910     vxorps(dst, dst, dst);
    911     vcvtqsi2ss(dst, dst, src);
    912   } else {
    913     xorps(dst, dst);
    914     cvtqsi2ss(dst, src);
    915   }
    916 }
    917 
    918 
    919 void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
    920   if (CpuFeatures::IsSupported(AVX)) {
    921     CpuFeatureScope scope(this, AVX);
    922     vxorpd(dst, dst, dst);
    923     vcvtqsi2sd(dst, dst, src);
    924   } else {
    925     xorpd(dst, dst);
    926     cvtqsi2sd(dst, src);
    927   }
    928 }
    929 
    930 
    931 void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
    932   if (CpuFeatures::IsSupported(AVX)) {
    933     CpuFeatureScope scope(this, AVX);
    934     vxorpd(dst, dst, dst);
    935     vcvtqsi2sd(dst, dst, src);
    936   } else {
    937     xorpd(dst, dst);
    938     cvtqsi2sd(dst, src);
    939   }
    940 }
    941 
    942 
    943 void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
    944   Label msb_set_src;
    945   Label jmp_return;
    946   testq(src, src);
    947   j(sign, &msb_set_src, Label::kNear);
    948   Cvtqsi2ss(dst, src);
    949   jmp(&jmp_return, Label::kNear);
    950   bind(&msb_set_src);
    951   movq(tmp, src);
    952   shrq(src, Immediate(1));
    953   // Recover the least significant bit to avoid rounding errors.
    954   andq(tmp, Immediate(1));
    955   orq(src, tmp);
    956   Cvtqsi2ss(dst, src);
    957   addss(dst, dst);
    958   bind(&jmp_return);
    959 }
    960 
    961 
    962 void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
    963   Label msb_set_src;
    964   Label jmp_return;
    965   testq(src, src);
    966   j(sign, &msb_set_src, Label::kNear);
    967   Cvtqsi2sd(dst, src);
    968   jmp(&jmp_return, Label::kNear);
    969   bind(&msb_set_src);
    970   movq(tmp, src);
    971   shrq(src, Immediate(1));
    972   andq(tmp, Immediate(1));
    973   orq(src, tmp);
    974   Cvtqsi2sd(dst, src);
    975   addsd(dst, dst);
    976   bind(&jmp_return);
    977 }
    978 
    979 
    980 void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
    981   if (CpuFeatures::IsSupported(AVX)) {
    982     CpuFeatureScope scope(this, AVX);
    983     vcvtsd2si(dst, src);
    984   } else {
    985     cvtsd2si(dst, src);
    986   }
    987 }
    988 
    989 
    990 void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
    991   if (CpuFeatures::IsSupported(AVX)) {
    992     CpuFeatureScope scope(this, AVX);
    993     vcvttss2si(dst, src);
    994   } else {
    995     cvttss2si(dst, src);
    996   }
    997 }
    998 
    999 
   1000 void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
   1001   if (CpuFeatures::IsSupported(AVX)) {
   1002     CpuFeatureScope scope(this, AVX);
   1003     vcvttss2si(dst, src);
   1004   } else {
   1005     cvttss2si(dst, src);
   1006   }
   1007 }
   1008 
   1009 
   1010 void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
   1011   if (CpuFeatures::IsSupported(AVX)) {
   1012     CpuFeatureScope scope(this, AVX);
   1013     vcvttsd2si(dst, src);
   1014   } else {
   1015     cvttsd2si(dst, src);
   1016   }
   1017 }
   1018 
   1019 
   1020 void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
   1021   if (CpuFeatures::IsSupported(AVX)) {
   1022     CpuFeatureScope scope(this, AVX);
   1023     vcvttsd2si(dst, src);
   1024   } else {
   1025     cvttsd2si(dst, src);
   1026   }
   1027 }
   1028 
   1029 
   1030 void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
   1031   if (CpuFeatures::IsSupported(AVX)) {
   1032     CpuFeatureScope scope(this, AVX);
   1033     vcvttss2siq(dst, src);
   1034   } else {
   1035     cvttss2siq(dst, src);
   1036   }
   1037 }
   1038 
   1039 
   1040 void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
   1041   if (CpuFeatures::IsSupported(AVX)) {
   1042     CpuFeatureScope scope(this, AVX);
   1043     vcvttss2siq(dst, src);
   1044   } else {
   1045     cvttss2siq(dst, src);
   1046   }
   1047 }
   1048 
   1049 
   1050 void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
   1051   if (CpuFeatures::IsSupported(AVX)) {
   1052     CpuFeatureScope scope(this, AVX);
   1053     vcvttsd2siq(dst, src);
   1054   } else {
   1055     cvttsd2siq(dst, src);
   1056   }
   1057 }
   1058 
   1059 
   1060 void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) {
   1061   if (CpuFeatures::IsSupported(AVX)) {
   1062     CpuFeatureScope scope(this, AVX);
   1063     vcvttsd2siq(dst, src);
   1064   } else {
   1065     cvttsd2siq(dst, src);
   1066   }
   1067 }
   1068 
   1069 
   1070 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
   1071   DCHECK(!r.IsDouble());
   1072   if (r.IsInteger8()) {
   1073     movsxbq(dst, src);
   1074   } else if (r.IsUInteger8()) {
   1075     movzxbl(dst, src);
   1076   } else if (r.IsInteger16()) {
   1077     movsxwq(dst, src);
   1078   } else if (r.IsUInteger16()) {
   1079     movzxwl(dst, src);
   1080   } else if (r.IsInteger32()) {
   1081     movl(dst, src);
   1082   } else {
   1083     movp(dst, src);
   1084   }
   1085 }
   1086 
   1087 
   1088 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
   1089   DCHECK(!r.IsDouble());
   1090   if (r.IsInteger8() || r.IsUInteger8()) {
   1091     movb(dst, src);
   1092   } else if (r.IsInteger16() || r.IsUInteger16()) {
   1093     movw(dst, src);
   1094   } else if (r.IsInteger32()) {
   1095     movl(dst, src);
   1096   } else {
   1097     if (r.IsHeapObject()) {
   1098       AssertNotSmi(src);
   1099     } else if (r.IsSmi()) {
   1100       AssertSmi(src);
   1101     }
   1102     movp(dst, src);
   1103   }
   1104 }
   1105 
   1106 
   1107 void MacroAssembler::Set(Register dst, int64_t x) {
   1108   if (x == 0) {
   1109     xorl(dst, dst);
   1110   } else if (is_uint32(x)) {
   1111     movl(dst, Immediate(static_cast<uint32_t>(x)));
   1112   } else if (is_int32(x)) {
   1113     movq(dst, Immediate(static_cast<int32_t>(x)));
   1114   } else {
   1115     movq(dst, x);
   1116   }
   1117 }
   1118 
   1119 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
   1120   if (kPointerSize == kInt64Size) {
   1121     if (is_int32(x)) {
   1122       movp(dst, Immediate(static_cast<int32_t>(x)));
   1123     } else {
   1124       Set(kScratchRegister, x);
   1125       movp(dst, kScratchRegister);
   1126     }
   1127   } else {
   1128     movp(dst, Immediate(static_cast<int32_t>(x)));
   1129   }
   1130 }
   1131 
   1132 
   1133 // ----------------------------------------------------------------------------
   1134 // Smi tagging, untagging and tag detection.
   1135 
   1136 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
   1137   static const int kMaxBits = 17;
   1138   return !is_intn(x, kMaxBits);
   1139 }
   1140 
   1141 
   1142 void MacroAssembler::SafeMove(Register dst, Smi* src) {
   1143   DCHECK(!dst.is(kScratchRegister));
   1144   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
   1145     if (SmiValuesAre32Bits()) {
   1146       // JIT cookie can be converted to Smi.
   1147       Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
   1148       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
   1149       xorp(dst, kScratchRegister);
   1150     } else {
   1151       DCHECK(SmiValuesAre31Bits());
   1152       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
   1153       movp(dst, Immediate(value ^ jit_cookie()));
   1154       xorp(dst, Immediate(jit_cookie()));
   1155     }
   1156   } else {
   1157     Move(dst, src);
   1158   }
   1159 }
   1160 
   1161 
   1162 void MacroAssembler::SafePush(Smi* src) {
   1163   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
   1164     if (SmiValuesAre32Bits()) {
   1165       // JIT cookie can be converted to Smi.
   1166       Push(Smi::FromInt(src->value() ^ jit_cookie()));
   1167       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
   1168       xorp(Operand(rsp, 0), kScratchRegister);
   1169     } else {
   1170       DCHECK(SmiValuesAre31Bits());
   1171       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
   1172       Push(Immediate(value ^ jit_cookie()));
   1173       xorp(Operand(rsp, 0), Immediate(jit_cookie()));
   1174     }
   1175   } else {
   1176     Push(src);
   1177   }
   1178 }
   1179 
   1180 
   1181 Register MacroAssembler::GetSmiConstant(Smi* source) {
   1182   STATIC_ASSERT(kSmiTag == 0);
   1183   int value = source->value();
   1184   if (value == 0) {
   1185     xorl(kScratchRegister, kScratchRegister);
   1186     return kScratchRegister;
   1187   }
   1188   LoadSmiConstant(kScratchRegister, source);
   1189   return kScratchRegister;
   1190 }
   1191 
   1192 
   1193 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
   1194   STATIC_ASSERT(kSmiTag == 0);
   1195   int value = source->value();
   1196   if (value == 0) {
   1197     xorl(dst, dst);
   1198   } else {
   1199     Move(dst, source, Assembler::RelocInfoNone());
   1200   }
   1201 }
   1202 
   1203 
   1204 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
   1205   STATIC_ASSERT(kSmiTag == 0);
   1206   if (!dst.is(src)) {
   1207     movl(dst, src);
   1208   }
   1209   shlp(dst, Immediate(kSmiShift));
   1210 }
   1211 
   1212 
   1213 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
   1214   if (emit_debug_code()) {
   1215     testb(dst, Immediate(0x01));
   1216     Label ok;
   1217     j(zero, &ok, Label::kNear);
   1218     Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
   1219     bind(&ok);
   1220   }
   1221 
   1222   if (SmiValuesAre32Bits()) {
   1223     DCHECK(kSmiShift % kBitsPerByte == 0);
   1224     movl(Operand(dst, kSmiShift / kBitsPerByte), src);
   1225   } else {
   1226     DCHECK(SmiValuesAre31Bits());
   1227     Integer32ToSmi(kScratchRegister, src);
   1228     movp(dst, kScratchRegister);
   1229   }
   1230 }
   1231 
   1232 
   1233 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
   1234                                                 Register src,
   1235                                                 int constant) {
   1236   if (dst.is(src)) {
   1237     addl(dst, Immediate(constant));
   1238   } else {
   1239     leal(dst, Operand(src, constant));
   1240   }
   1241   shlp(dst, Immediate(kSmiShift));
   1242 }
   1243 
   1244 
   1245 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
   1246   STATIC_ASSERT(kSmiTag == 0);
   1247   if (!dst.is(src)) {
   1248     movp(dst, src);
   1249   }
   1250 
   1251   if (SmiValuesAre32Bits()) {
   1252     shrp(dst, Immediate(kSmiShift));
   1253   } else {
   1254     DCHECK(SmiValuesAre31Bits());
   1255     sarl(dst, Immediate(kSmiShift));
   1256   }
   1257 }
   1258 
   1259 
   1260 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
   1261   if (SmiValuesAre32Bits()) {
   1262     movl(dst, Operand(src, kSmiShift / kBitsPerByte));
   1263   } else {
   1264     DCHECK(SmiValuesAre31Bits());
   1265     movl(dst, src);
   1266     sarl(dst, Immediate(kSmiShift));
   1267   }
   1268 }
   1269 
   1270 
   1271 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
   1272   STATIC_ASSERT(kSmiTag == 0);
   1273   if (!dst.is(src)) {
   1274     movp(dst, src);
   1275   }
   1276   sarp(dst, Immediate(kSmiShift));
   1277   if (kPointerSize == kInt32Size) {
   1278     // Sign extend to 64-bit.
   1279     movsxlq(dst, dst);
   1280   }
   1281 }
   1282 
   1283 
   1284 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
   1285   if (SmiValuesAre32Bits()) {
   1286     movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
   1287   } else {
   1288     DCHECK(SmiValuesAre31Bits());
   1289     movp(dst, src);
   1290     SmiToInteger64(dst, dst);
   1291   }
   1292 }
   1293 
   1294 
   1295 void MacroAssembler::SmiTest(Register src) {
   1296   AssertSmi(src);
   1297   testp(src, src);
   1298 }
   1299 
   1300 
   1301 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
   1302   AssertSmi(smi1);
   1303   AssertSmi(smi2);
   1304   cmpp(smi1, smi2);
   1305 }
   1306 
   1307 
   1308 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
   1309   AssertSmi(dst);
   1310   Cmp(dst, src);
   1311 }
   1312 
   1313 
   1314 void MacroAssembler::Cmp(Register dst, Smi* src) {
   1315   DCHECK(!dst.is(kScratchRegister));
   1316   if (src->value() == 0) {
   1317     testp(dst, dst);
   1318   } else {
   1319     Register constant_reg = GetSmiConstant(src);
   1320     cmpp(dst, constant_reg);
   1321   }
   1322 }
   1323 
   1324 
   1325 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
   1326   AssertSmi(dst);
   1327   AssertSmi(src);
   1328   cmpp(dst, src);
   1329 }
   1330 
   1331 
   1332 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
   1333   AssertSmi(dst);
   1334   AssertSmi(src);
   1335   cmpp(dst, src);
   1336 }
   1337 
   1338 
   1339 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
   1340   AssertSmi(dst);
   1341   if (SmiValuesAre32Bits()) {
   1342     cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
   1343   } else {
   1344     DCHECK(SmiValuesAre31Bits());
   1345     cmpl(dst, Immediate(src));
   1346   }
   1347 }
   1348 
   1349 
   1350 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
   1351   // The Operand cannot use the smi register.
   1352   Register smi_reg = GetSmiConstant(src);
   1353   DCHECK(!dst.AddressUsesRegister(smi_reg));
   1354   cmpp(dst, smi_reg);
   1355 }
   1356 
   1357 
   1358 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
   1359   if (SmiValuesAre32Bits()) {
   1360     cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
   1361   } else {
   1362     DCHECK(SmiValuesAre31Bits());
   1363     SmiToInteger32(kScratchRegister, dst);
   1364     cmpl(kScratchRegister, src);
   1365   }
   1366 }
   1367 
   1368 
   1369 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
   1370                                                            Register src,
   1371                                                            int power) {
   1372   DCHECK(power >= 0);
   1373   DCHECK(power < 64);
   1374   if (power == 0) {
   1375     SmiToInteger64(dst, src);
   1376     return;
   1377   }
   1378   if (!dst.is(src)) {
   1379     movp(dst, src);
   1380   }
   1381   if (power < kSmiShift) {
   1382     sarp(dst, Immediate(kSmiShift - power));
   1383   } else if (power > kSmiShift) {
   1384     shlp(dst, Immediate(power - kSmiShift));
   1385   }
   1386 }
   1387 
   1388 
   1389 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
   1390                                                          Register src,
   1391                                                          int power) {
   1392   DCHECK((0 <= power) && (power < 32));
   1393   if (dst.is(src)) {
   1394     shrp(dst, Immediate(power + kSmiShift));
   1395   } else {
   1396     UNIMPLEMENTED();  // Not used.
   1397   }
   1398 }
   1399 
   1400 
   1401 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
   1402                                  Label* on_not_smis,
   1403                                  Label::Distance near_jump) {
   1404   if (dst.is(src1) || dst.is(src2)) {
   1405     DCHECK(!src1.is(kScratchRegister));
   1406     DCHECK(!src2.is(kScratchRegister));
   1407     movp(kScratchRegister, src1);
   1408     orp(kScratchRegister, src2);
   1409     JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
   1410     movp(dst, kScratchRegister);
   1411   } else {
   1412     movp(dst, src1);
   1413     orp(dst, src2);
   1414     JumpIfNotSmi(dst, on_not_smis, near_jump);
   1415   }
   1416 }
   1417 
   1418 
   1419 Condition MacroAssembler::CheckSmi(Register src) {
   1420   STATIC_ASSERT(kSmiTag == 0);
   1421   testb(src, Immediate(kSmiTagMask));
   1422   return zero;
   1423 }
   1424 
   1425 
   1426 Condition MacroAssembler::CheckSmi(const Operand& src) {
   1427   STATIC_ASSERT(kSmiTag == 0);
   1428   testb(src, Immediate(kSmiTagMask));
   1429   return zero;
   1430 }
   1431 
   1432 
   1433 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
   1434   STATIC_ASSERT(kSmiTag == 0);
   1435   // Test that both bits of the mask 0x8000000000000001 are zero.
   1436   movp(kScratchRegister, src);
   1437   rolp(kScratchRegister, Immediate(1));
   1438   testb(kScratchRegister, Immediate(3));
   1439   return zero;
   1440 }
   1441 
   1442 
   1443 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
   1444   if (first.is(second)) {
   1445     return CheckSmi(first);
   1446   }
   1447   STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
   1448   if (SmiValuesAre32Bits()) {
   1449     leal(kScratchRegister, Operand(first, second, times_1, 0));
   1450     testb(kScratchRegister, Immediate(0x03));
   1451   } else {
   1452     DCHECK(SmiValuesAre31Bits());
   1453     movl(kScratchRegister, first);
   1454     orl(kScratchRegister, second);
   1455     testb(kScratchRegister, Immediate(kSmiTagMask));
   1456   }
   1457   return zero;
   1458 }
   1459 
   1460 
   1461 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
   1462                                                   Register second) {
   1463   if (first.is(second)) {
   1464     return CheckNonNegativeSmi(first);
   1465   }
   1466   movp(kScratchRegister, first);
   1467   orp(kScratchRegister, second);
   1468   rolp(kScratchRegister, Immediate(1));
   1469   testl(kScratchRegister, Immediate(3));
   1470   return zero;
   1471 }
   1472 
   1473 
   1474 Condition MacroAssembler::CheckEitherSmi(Register first,
   1475                                          Register second,
   1476                                          Register scratch) {
   1477   if (first.is(second)) {
   1478     return CheckSmi(first);
   1479   }
   1480   if (scratch.is(second)) {
   1481     andl(scratch, first);
   1482   } else {
   1483     if (!scratch.is(first)) {
   1484       movl(scratch, first);
   1485     }
   1486     andl(scratch, second);
   1487   }
   1488   testb(scratch, Immediate(kSmiTagMask));
   1489   return zero;
   1490 }
   1491 
   1492 
   1493 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
   1494   if (SmiValuesAre32Bits()) {
   1495     // A 32-bit integer value can always be converted to a smi.
   1496     return always;
   1497   } else {
   1498     DCHECK(SmiValuesAre31Bits());
   1499     cmpl(src, Immediate(0xc0000000));
   1500     return positive;
   1501   }
   1502 }
   1503 
   1504 
   1505 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
   1506   if (SmiValuesAre32Bits()) {
   1507     // An unsigned 32-bit integer value is valid as long as the high bit
   1508     // is not set.
   1509     testl(src, src);
   1510     return positive;
   1511   } else {
   1512     DCHECK(SmiValuesAre31Bits());
   1513     testl(src, Immediate(0xc0000000));
   1514     return zero;
   1515   }
   1516 }
   1517 
   1518 
   1519 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
   1520   if (dst.is(src)) {
   1521     andl(dst, Immediate(kSmiTagMask));
   1522   } else {
   1523     movl(dst, Immediate(kSmiTagMask));
   1524     andl(dst, src);
   1525   }
   1526 }
   1527 
   1528 
   1529 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
   1530   if (!(src.AddressUsesRegister(dst))) {
   1531     movl(dst, Immediate(kSmiTagMask));
   1532     andl(dst, src);
   1533   } else {
   1534     movl(dst, src);
   1535     andl(dst, Immediate(kSmiTagMask));
   1536   }
   1537 }
   1538 
   1539 
   1540 void MacroAssembler::JumpIfValidSmiValue(Register src,
   1541                                          Label* on_valid,
   1542                                          Label::Distance near_jump) {
   1543   Condition is_valid = CheckInteger32ValidSmiValue(src);
   1544   j(is_valid, on_valid, near_jump);
   1545 }
   1546 
   1547 
   1548 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
   1549                                             Label* on_invalid,
   1550                                             Label::Distance near_jump) {
   1551   Condition is_valid = CheckInteger32ValidSmiValue(src);
   1552   j(NegateCondition(is_valid), on_invalid, near_jump);
   1553 }
   1554 
   1555 
   1556 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
   1557                                              Label* on_valid,
   1558                                              Label::Distance near_jump) {
   1559   Condition is_valid = CheckUInteger32ValidSmiValue(src);
   1560   j(is_valid, on_valid, near_jump);
   1561 }
   1562 
   1563 
   1564 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
   1565                                                 Label* on_invalid,
   1566                                                 Label::Distance near_jump) {
   1567   Condition is_valid = CheckUInteger32ValidSmiValue(src);
   1568   j(NegateCondition(is_valid), on_invalid, near_jump);
   1569 }
   1570 
   1571 
   1572 void MacroAssembler::JumpIfSmi(Register src,
   1573                                Label* on_smi,
   1574                                Label::Distance near_jump) {
   1575   Condition smi = CheckSmi(src);
   1576   j(smi, on_smi, near_jump);
   1577 }
   1578 
   1579 
   1580 void MacroAssembler::JumpIfNotSmi(Register src,
   1581                                   Label* on_not_smi,
   1582                                   Label::Distance near_jump) {
   1583   Condition smi = CheckSmi(src);
   1584   j(NegateCondition(smi), on_not_smi, near_jump);
   1585 }
   1586 
   1587 
   1588 void MacroAssembler::JumpUnlessNonNegativeSmi(
   1589     Register src, Label* on_not_smi_or_negative,
   1590     Label::Distance near_jump) {
   1591   Condition non_negative_smi = CheckNonNegativeSmi(src);
   1592   j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
   1593 }
   1594 
   1595 
   1596 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
   1597                                              Smi* constant,
   1598                                              Label* on_equals,
   1599                                              Label::Distance near_jump) {
   1600   SmiCompare(src, constant);
   1601   j(equal, on_equals, near_jump);
   1602 }
   1603 
   1604 
   1605 void MacroAssembler::JumpIfNotBothSmi(Register src1,
   1606                                       Register src2,
   1607                                       Label* on_not_both_smi,
   1608                                       Label::Distance near_jump) {
   1609   Condition both_smi = CheckBothSmi(src1, src2);
   1610   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
   1611 }
   1612 
   1613 
   1614 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
   1615                                                   Register src2,
   1616                                                   Label* on_not_both_smi,
   1617                                                   Label::Distance near_jump) {
   1618   Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
   1619   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
   1620 }
   1621 
   1622 
   1623 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
   1624   if (constant->value() == 0) {
   1625     if (!dst.is(src)) {
   1626       movp(dst, src);
   1627     }
   1628     return;
   1629   } else if (dst.is(src)) {
   1630     DCHECK(!dst.is(kScratchRegister));
   1631     Register constant_reg = GetSmiConstant(constant);
   1632     addp(dst, constant_reg);
   1633   } else {
   1634     LoadSmiConstant(dst, constant);
   1635     addp(dst, src);
   1636   }
   1637 }
   1638 
   1639 
   1640 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
   1641   if (constant->value() != 0) {
   1642     if (SmiValuesAre32Bits()) {
   1643       addl(Operand(dst, kSmiShift / kBitsPerByte),
   1644            Immediate(constant->value()));
   1645     } else {
   1646       DCHECK(SmiValuesAre31Bits());
   1647       addp(dst, Immediate(constant));
   1648     }
   1649   }
   1650 }
   1651 
   1652 
   1653 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
   1654                                     SmiOperationConstraints constraints,
   1655                                     Label* bailout_label,
   1656                                     Label::Distance near_jump) {
   1657   if (constant->value() == 0) {
   1658     if (!dst.is(src)) {
   1659       movp(dst, src);
   1660     }
   1661   } else if (dst.is(src)) {
   1662     DCHECK(!dst.is(kScratchRegister));
   1663     LoadSmiConstant(kScratchRegister, constant);
   1664     addp(dst, kScratchRegister);
   1665     if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
   1666       j(no_overflow, bailout_label, near_jump);
   1667       DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
   1668       subp(dst, kScratchRegister);
   1669     } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
   1670       if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
   1671         Label done;
   1672         j(no_overflow, &done, Label::kNear);
   1673         subp(dst, kScratchRegister);
   1674         jmp(bailout_label, near_jump);
   1675         bind(&done);
   1676       } else {
   1677         // Bailout if overflow without reserving src.
   1678         j(overflow, bailout_label, near_jump);
   1679       }
   1680     } else {
   1681       UNREACHABLE();
   1682     }
   1683   } else {
   1684     DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
   1685     DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
   1686     LoadSmiConstant(dst, constant);
   1687     addp(dst, src);
   1688     j(overflow, bailout_label, near_jump);
   1689   }
   1690 }
   1691 
   1692 
   1693 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
   1694   if (constant->value() == 0) {
   1695     if (!dst.is(src)) {
   1696       movp(dst, src);
   1697     }
   1698   } else if (dst.is(src)) {
   1699     DCHECK(!dst.is(kScratchRegister));
   1700     Register constant_reg = GetSmiConstant(constant);
   1701     subp(dst, constant_reg);
   1702   } else {
   1703     if (constant->value() == Smi::kMinValue) {
   1704       LoadSmiConstant(dst, constant);
   1705       // Adding and subtracting the min-value gives the same result, it only
   1706       // differs on the overflow bit, which we don't check here.
   1707       addp(dst, src);
   1708     } else {
   1709       // Subtract by adding the negation.
   1710       LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
   1711       addp(dst, src);
   1712     }
   1713   }
   1714 }
   1715 
   1716 
   1717 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
   1718                                     SmiOperationConstraints constraints,
   1719                                     Label* bailout_label,
   1720                                     Label::Distance near_jump) {
   1721   if (constant->value() == 0) {
   1722     if (!dst.is(src)) {
   1723       movp(dst, src);
   1724     }
   1725   } else if (dst.is(src)) {
   1726     DCHECK(!dst.is(kScratchRegister));
   1727     LoadSmiConstant(kScratchRegister, constant);
   1728     subp(dst, kScratchRegister);
   1729     if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
   1730       j(no_overflow, bailout_label, near_jump);
   1731       DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
   1732       addp(dst, kScratchRegister);
   1733     } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
   1734       if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
   1735         Label done;
   1736         j(no_overflow, &done, Label::kNear);
   1737         addp(dst, kScratchRegister);
   1738         jmp(bailout_label, near_jump);
   1739         bind(&done);
   1740       } else {
   1741         // Bailout if overflow without reserving src.
   1742         j(overflow, bailout_label, near_jump);
   1743       }
   1744     } else {
   1745       UNREACHABLE();
   1746     }
   1747   } else {
   1748     DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
   1749     DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
   1750     if (constant->value() == Smi::kMinValue) {
   1751       DCHECK(!dst.is(kScratchRegister));
   1752       movp(dst, src);
   1753       LoadSmiConstant(kScratchRegister, constant);
   1754       subp(dst, kScratchRegister);
   1755       j(overflow, bailout_label, near_jump);
   1756     } else {
   1757       // Subtract by adding the negation.
   1758       LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
   1759       addp(dst, src);
   1760       j(overflow, bailout_label, near_jump);
   1761     }
   1762   }
   1763 }
   1764 
   1765 
   1766 void MacroAssembler::SmiNeg(Register dst,
   1767                             Register src,
   1768                             Label* on_smi_result,
   1769                             Label::Distance near_jump) {
   1770   if (dst.is(src)) {
   1771     DCHECK(!dst.is(kScratchRegister));
   1772     movp(kScratchRegister, src);
   1773     negp(dst);  // Low 32 bits are retained as zero by negation.
   1774     // Test if result is zero or Smi::kMinValue.
   1775     cmpp(dst, kScratchRegister);
   1776     j(not_equal, on_smi_result, near_jump);
   1777     movp(src, kScratchRegister);
   1778   } else {
   1779     movp(dst, src);
   1780     negp(dst);
   1781     cmpp(dst, src);
   1782     // If the result is zero or Smi::kMinValue, negation failed to create a smi.
   1783     j(not_equal, on_smi_result, near_jump);
   1784   }
   1785 }
   1786 
   1787 
   1788 template<class T>
   1789 static void SmiAddHelper(MacroAssembler* masm,
   1790                          Register dst,
   1791                          Register src1,
   1792                          T src2,
   1793                          Label* on_not_smi_result,
   1794                          Label::Distance near_jump) {
   1795   if (dst.is(src1)) {
   1796     Label done;
   1797     masm->addp(dst, src2);
   1798     masm->j(no_overflow, &done, Label::kNear);
   1799     // Restore src1.
   1800     masm->subp(dst, src2);
   1801     masm->jmp(on_not_smi_result, near_jump);
   1802     masm->bind(&done);
   1803   } else {
   1804     masm->movp(dst, src1);
   1805     masm->addp(dst, src2);
   1806     masm->j(overflow, on_not_smi_result, near_jump);
   1807   }
   1808 }
   1809 
   1810 
   1811 void MacroAssembler::SmiAdd(Register dst,
   1812                             Register src1,
   1813                             Register src2,
   1814                             Label* on_not_smi_result,
   1815                             Label::Distance near_jump) {
   1816   DCHECK_NOT_NULL(on_not_smi_result);
   1817   DCHECK(!dst.is(src2));
   1818   SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
   1819 }
   1820 
   1821 
   1822 void MacroAssembler::SmiAdd(Register dst,
   1823                             Register src1,
   1824                             const Operand& src2,
   1825                             Label* on_not_smi_result,
   1826                             Label::Distance near_jump) {
   1827   DCHECK_NOT_NULL(on_not_smi_result);
   1828   DCHECK(!src2.AddressUsesRegister(dst));
   1829   SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
   1830 }
   1831 
   1832 
   1833 void MacroAssembler::SmiAdd(Register dst,
   1834                             Register src1,
   1835                             Register src2) {
   1836   // No overflow checking. Use only when it's known that
   1837   // overflowing is impossible.
   1838   if (!dst.is(src1)) {
   1839     if (emit_debug_code()) {
   1840       movp(kScratchRegister, src1);
   1841       addp(kScratchRegister, src2);
   1842       Check(no_overflow, kSmiAdditionOverflow);
   1843     }
   1844     leap(dst, Operand(src1, src2, times_1, 0));
   1845   } else {
   1846     addp(dst, src2);
   1847     Assert(no_overflow, kSmiAdditionOverflow);
   1848   }
   1849 }
   1850 
   1851 
   1852 template<class T>
   1853 static void SmiSubHelper(MacroAssembler* masm,
   1854                          Register dst,
   1855                          Register src1,
   1856                          T src2,
   1857                          Label* on_not_smi_result,
   1858                          Label::Distance near_jump) {
   1859   if (dst.is(src1)) {
   1860     Label done;
   1861     masm->subp(dst, src2);
   1862     masm->j(no_overflow, &done, Label::kNear);
   1863     // Restore src1.
   1864     masm->addp(dst, src2);
   1865     masm->jmp(on_not_smi_result, near_jump);
   1866     masm->bind(&done);
   1867   } else {
   1868     masm->movp(dst, src1);
   1869     masm->subp(dst, src2);
   1870     masm->j(overflow, on_not_smi_result, near_jump);
   1871   }
   1872 }
   1873 
   1874 
   1875 void MacroAssembler::SmiSub(Register dst,
   1876                             Register src1,
   1877                             Register src2,
   1878                             Label* on_not_smi_result,
   1879                             Label::Distance near_jump) {
   1880   DCHECK_NOT_NULL(on_not_smi_result);
   1881   DCHECK(!dst.is(src2));
   1882   SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
   1883 }
   1884 
   1885 
   1886 void MacroAssembler::SmiSub(Register dst,
   1887                             Register src1,
   1888                             const Operand& src2,
   1889                             Label* on_not_smi_result,
   1890                             Label::Distance near_jump) {
   1891   DCHECK_NOT_NULL(on_not_smi_result);
   1892   DCHECK(!src2.AddressUsesRegister(dst));
   1893   SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
   1894 }
   1895 
   1896 
   1897 template<class T>
   1898 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
   1899                                    Register dst,
   1900                                    Register src1,
   1901                                    T src2) {
   1902   // No overflow checking. Use only when it's known that
   1903   // overflowing is impossible (e.g., subtracting two positive smis).
   1904   if (!dst.is(src1)) {
   1905     masm->movp(dst, src1);
   1906   }
   1907   masm->subp(dst, src2);
   1908   masm->Assert(no_overflow, kSmiSubtractionOverflow);
   1909 }
   1910 
   1911 
   1912 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
   1913   DCHECK(!dst.is(src2));
   1914   SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
   1915 }
   1916 
   1917 
   1918 void MacroAssembler::SmiSub(Register dst,
   1919                             Register src1,
   1920                             const Operand& src2) {
   1921   SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
   1922 }
   1923 
   1924 
   1925 void MacroAssembler::SmiMul(Register dst,
   1926                             Register src1,
   1927                             Register src2,
   1928                             Label* on_not_smi_result,
   1929                             Label::Distance near_jump) {
   1930   DCHECK(!dst.is(src2));
   1931   DCHECK(!dst.is(kScratchRegister));
   1932   DCHECK(!src1.is(kScratchRegister));
   1933   DCHECK(!src2.is(kScratchRegister));
   1934 
   1935   if (dst.is(src1)) {
   1936     Label failure, zero_correct_result;
   1937     movp(kScratchRegister, src1);  // Create backup for later testing.
   1938     SmiToInteger64(dst, src1);
   1939     imulp(dst, src2);
   1940     j(overflow, &failure, Label::kNear);
   1941 
   1942     // Check for negative zero result.  If product is zero, and one
   1943     // argument is negative, go to slow case.
   1944     Label correct_result;
   1945     testp(dst, dst);
   1946     j(not_zero, &correct_result, Label::kNear);
   1947 
   1948     movp(dst, kScratchRegister);
   1949     xorp(dst, src2);
   1950     // Result was positive zero.
   1951     j(positive, &zero_correct_result, Label::kNear);
   1952 
   1953     bind(&failure);  // Reused failure exit, restores src1.
   1954     movp(src1, kScratchRegister);
   1955     jmp(on_not_smi_result, near_jump);
   1956 
   1957     bind(&zero_correct_result);
   1958     Set(dst, 0);
   1959 
   1960     bind(&correct_result);
   1961   } else {
   1962     SmiToInteger64(dst, src1);
   1963     imulp(dst, src2);
   1964     j(overflow, on_not_smi_result, near_jump);
   1965     // Check for negative zero result.  If product is zero, and one
   1966     // argument is negative, go to slow case.
   1967     Label correct_result;
   1968     testp(dst, dst);
   1969     j(not_zero, &correct_result, Label::kNear);
   1970     // One of src1 and src2 is zero, the check whether the other is
   1971     // negative.
   1972     movp(kScratchRegister, src1);
   1973     xorp(kScratchRegister, src2);
   1974     j(negative, on_not_smi_result, near_jump);
   1975     bind(&correct_result);
   1976   }
   1977 }
   1978 
   1979 
   1980 void MacroAssembler::SmiDiv(Register dst,
   1981                             Register src1,
   1982                             Register src2,
   1983                             Label* on_not_smi_result,
   1984                             Label::Distance near_jump) {
   1985   DCHECK(!src1.is(kScratchRegister));
   1986   DCHECK(!src2.is(kScratchRegister));
   1987   DCHECK(!dst.is(kScratchRegister));
   1988   DCHECK(!src2.is(rax));
   1989   DCHECK(!src2.is(rdx));
   1990   DCHECK(!src1.is(rdx));
   1991 
   1992   // Check for 0 divisor (result is +/-Infinity).
   1993   testp(src2, src2);
   1994   j(zero, on_not_smi_result, near_jump);
   1995 
   1996   if (src1.is(rax)) {
   1997     movp(kScratchRegister, src1);
   1998   }
   1999   SmiToInteger32(rax, src1);
   2000   // We need to rule out dividing Smi::kMinValue by -1, since that would
   2001   // overflow in idiv and raise an exception.
   2002   // We combine this with negative zero test (negative zero only happens
   2003   // when dividing zero by a negative number).
   2004 
   2005   // We overshoot a little and go to slow case if we divide min-value
   2006   // by any negative value, not just -1.
   2007   Label safe_div;
   2008   testl(rax, Immediate(~Smi::kMinValue));
   2009   j(not_zero, &safe_div, Label::kNear);
   2010   testp(src2, src2);
   2011   if (src1.is(rax)) {
   2012     j(positive, &safe_div, Label::kNear);
   2013     movp(src1, kScratchRegister);
   2014     jmp(on_not_smi_result, near_jump);
   2015   } else {
   2016     j(negative, on_not_smi_result, near_jump);
   2017   }
   2018   bind(&safe_div);
   2019 
   2020   SmiToInteger32(src2, src2);
   2021   // Sign extend src1 into edx:eax.
   2022   cdq();
   2023   idivl(src2);
   2024   Integer32ToSmi(src2, src2);
   2025   // Check that the remainder is zero.
   2026   testl(rdx, rdx);
   2027   if (src1.is(rax)) {
   2028     Label smi_result;
   2029     j(zero, &smi_result, Label::kNear);
   2030     movp(src1, kScratchRegister);
   2031     jmp(on_not_smi_result, near_jump);
   2032     bind(&smi_result);
   2033   } else {
   2034     j(not_zero, on_not_smi_result, near_jump);
   2035   }
   2036   if (!dst.is(src1) && src1.is(rax)) {
   2037     movp(src1, kScratchRegister);
   2038   }
   2039   Integer32ToSmi(dst, rax);
   2040 }
   2041 
   2042 
   2043 void MacroAssembler::SmiMod(Register dst,
   2044                             Register src1,
   2045                             Register src2,
   2046                             Label* on_not_smi_result,
   2047                             Label::Distance near_jump) {
   2048   DCHECK(!dst.is(kScratchRegister));
   2049   DCHECK(!src1.is(kScratchRegister));
   2050   DCHECK(!src2.is(kScratchRegister));
   2051   DCHECK(!src2.is(rax));
   2052   DCHECK(!src2.is(rdx));
   2053   DCHECK(!src1.is(rdx));
   2054   DCHECK(!src1.is(src2));
   2055 
   2056   testp(src2, src2);
   2057   j(zero, on_not_smi_result, near_jump);
   2058 
   2059   if (src1.is(rax)) {
   2060     movp(kScratchRegister, src1);
   2061   }
   2062   SmiToInteger32(rax, src1);
   2063   SmiToInteger32(src2, src2);
   2064 
   2065   // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
   2066   Label safe_div;
   2067   cmpl(rax, Immediate(Smi::kMinValue));
   2068   j(not_equal, &safe_div, Label::kNear);
   2069   cmpl(src2, Immediate(-1));
   2070   j(not_equal, &safe_div, Label::kNear);
   2071   // Retag inputs and go slow case.
   2072   Integer32ToSmi(src2, src2);
   2073   if (src1.is(rax)) {
   2074     movp(src1, kScratchRegister);
   2075   }
   2076   jmp(on_not_smi_result, near_jump);
   2077   bind(&safe_div);
   2078 
   2079   // Sign extend eax into edx:eax.
   2080   cdq();
   2081   idivl(src2);
   2082   // Restore smi tags on inputs.
   2083   Integer32ToSmi(src2, src2);
   2084   if (src1.is(rax)) {
   2085     movp(src1, kScratchRegister);
   2086   }
   2087   // Check for a negative zero result.  If the result is zero, and the
   2088   // dividend is negative, go slow to return a floating point negative zero.
   2089   Label smi_result;
   2090   testl(rdx, rdx);
   2091   j(not_zero, &smi_result, Label::kNear);
   2092   testp(src1, src1);
   2093   j(negative, on_not_smi_result, near_jump);
   2094   bind(&smi_result);
   2095   Integer32ToSmi(dst, rdx);
   2096 }
   2097 
   2098 
   2099 void MacroAssembler::SmiNot(Register dst, Register src) {
   2100   DCHECK(!dst.is(kScratchRegister));
   2101   DCHECK(!src.is(kScratchRegister));
   2102   if (SmiValuesAre32Bits()) {
   2103     // Set tag and padding bits before negating, so that they are zero
   2104     // afterwards.
   2105     movl(kScratchRegister, Immediate(~0));
   2106   } else {
   2107     DCHECK(SmiValuesAre31Bits());
   2108     movl(kScratchRegister, Immediate(1));
   2109   }
   2110   if (dst.is(src)) {
   2111     xorp(dst, kScratchRegister);
   2112   } else {
   2113     leap(dst, Operand(src, kScratchRegister, times_1, 0));
   2114   }
   2115   notp(dst);
   2116 }
   2117 
   2118 
   2119 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
   2120   DCHECK(!dst.is(src2));
   2121   if (!dst.is(src1)) {
   2122     movp(dst, src1);
   2123   }
   2124   andp(dst, src2);
   2125 }
   2126 
   2127 
   2128 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
   2129   if (constant->value() == 0) {
   2130     Set(dst, 0);
   2131   } else if (dst.is(src)) {
   2132     DCHECK(!dst.is(kScratchRegister));
   2133     Register constant_reg = GetSmiConstant(constant);
   2134     andp(dst, constant_reg);
   2135   } else {
   2136     LoadSmiConstant(dst, constant);
   2137     andp(dst, src);
   2138   }
   2139 }
   2140 
   2141 
   2142 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   2143   if (!dst.is(src1)) {
   2144     DCHECK(!src1.is(src2));
   2145     movp(dst, src1);
   2146   }
   2147   orp(dst, src2);
   2148 }
   2149 
   2150 
   2151 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
   2152   if (dst.is(src)) {
   2153     DCHECK(!dst.is(kScratchRegister));
   2154     Register constant_reg = GetSmiConstant(constant);
   2155     orp(dst, constant_reg);
   2156   } else {
   2157     LoadSmiConstant(dst, constant);
   2158     orp(dst, src);
   2159   }
   2160 }
   2161 
   2162 
   2163 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   2164   if (!dst.is(src1)) {
   2165     DCHECK(!src1.is(src2));
   2166     movp(dst, src1);
   2167   }
   2168   xorp(dst, src2);
   2169 }
   2170 
   2171 
   2172 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
   2173   if (dst.is(src)) {
   2174     DCHECK(!dst.is(kScratchRegister));
   2175     Register constant_reg = GetSmiConstant(constant);
   2176     xorp(dst, constant_reg);
   2177   } else {
   2178     LoadSmiConstant(dst, constant);
   2179     xorp(dst, src);
   2180   }
   2181 }
   2182 
   2183 
   2184 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
   2185                                                      Register src,
   2186                                                      int shift_value) {
   2187   DCHECK(is_uint5(shift_value));
   2188   if (shift_value > 0) {
   2189     if (dst.is(src)) {
   2190       sarp(dst, Immediate(shift_value + kSmiShift));
   2191       shlp(dst, Immediate(kSmiShift));
   2192     } else {
   2193       UNIMPLEMENTED();  // Not used.
   2194     }
   2195   }
   2196 }
   2197 
   2198 
   2199 void MacroAssembler::SmiShiftLeftConstant(Register dst,
   2200                                           Register src,
   2201                                           int shift_value,
   2202                                           Label* on_not_smi_result,
   2203                                           Label::Distance near_jump) {
   2204   if (SmiValuesAre32Bits()) {
   2205     if (!dst.is(src)) {
   2206       movp(dst, src);
   2207     }
   2208     if (shift_value > 0) {
   2209       // Shift amount specified by lower 5 bits, not six as the shl opcode.
   2210       shlq(dst, Immediate(shift_value & 0x1f));
   2211     }
   2212   } else {
   2213     DCHECK(SmiValuesAre31Bits());
   2214     if (dst.is(src)) {
   2215       UNIMPLEMENTED();  // Not used.
   2216     } else {
   2217       SmiToInteger32(dst, src);
   2218       shll(dst, Immediate(shift_value));
   2219       JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
   2220       Integer32ToSmi(dst, dst);
   2221     }
   2222   }
   2223 }
   2224 
   2225 
   2226 void MacroAssembler::SmiShiftLogicalRightConstant(
   2227     Register dst, Register src, int shift_value,
   2228     Label* on_not_smi_result, Label::Distance near_jump) {
   2229   // Logic right shift interprets its result as an *unsigned* number.
   2230   if (dst.is(src)) {
   2231     UNIMPLEMENTED();  // Not used.
   2232   } else {
   2233     if (shift_value == 0) {
   2234       testp(src, src);
   2235       j(negative, on_not_smi_result, near_jump);
   2236     }
   2237     if (SmiValuesAre32Bits()) {
   2238       movp(dst, src);
   2239       shrp(dst, Immediate(shift_value + kSmiShift));
   2240       shlp(dst, Immediate(kSmiShift));
   2241     } else {
   2242       DCHECK(SmiValuesAre31Bits());
   2243       SmiToInteger32(dst, src);
   2244       shrp(dst, Immediate(shift_value));
   2245       JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
   2246       Integer32ToSmi(dst, dst);
   2247     }
   2248   }
   2249 }
   2250 
   2251 
   2252 void MacroAssembler::SmiShiftLeft(Register dst,
   2253                                   Register src1,
   2254                                   Register src2,
   2255                                   Label* on_not_smi_result,
   2256                                   Label::Distance near_jump) {
   2257   if (SmiValuesAre32Bits()) {
   2258     DCHECK(!dst.is(rcx));
   2259     if (!dst.is(src1)) {
   2260       movp(dst, src1);
   2261     }
   2262     // Untag shift amount.
   2263     SmiToInteger32(rcx, src2);
   2264     // Shift amount specified by lower 5 bits, not six as the shl opcode.
   2265     andp(rcx, Immediate(0x1f));
   2266     shlq_cl(dst);
   2267   } else {
   2268     DCHECK(SmiValuesAre31Bits());
   2269     DCHECK(!dst.is(kScratchRegister));
   2270     DCHECK(!src1.is(kScratchRegister));
   2271     DCHECK(!src2.is(kScratchRegister));
   2272     DCHECK(!dst.is(src2));
   2273     DCHECK(!dst.is(rcx));
   2274 
   2275     if (src1.is(rcx) || src2.is(rcx)) {
   2276       movq(kScratchRegister, rcx);
   2277     }
   2278     if (dst.is(src1)) {
   2279       UNIMPLEMENTED();  // Not used.
   2280     } else {
   2281       Label valid_result;
   2282       SmiToInteger32(dst, src1);
   2283       SmiToInteger32(rcx, src2);
   2284       shll_cl(dst);
   2285       JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
   2286       // As src1 or src2 could not be dst, we do not need to restore them for
   2287       // clobbering dst.
   2288       if (src1.is(rcx) || src2.is(rcx)) {
   2289         if (src1.is(rcx)) {
   2290           movq(src1, kScratchRegister);
   2291         } else {
   2292           movq(src2, kScratchRegister);
   2293         }
   2294       }
   2295       jmp(on_not_smi_result, near_jump);
   2296       bind(&valid_result);
   2297       Integer32ToSmi(dst, dst);
   2298     }
   2299   }
   2300 }
   2301 
   2302 
   2303 void MacroAssembler::SmiShiftLogicalRight(Register dst,
   2304                                           Register src1,
   2305                                           Register src2,
   2306                                           Label* on_not_smi_result,
   2307                                           Label::Distance near_jump) {
   2308   DCHECK(!dst.is(kScratchRegister));
   2309   DCHECK(!src1.is(kScratchRegister));
   2310   DCHECK(!src2.is(kScratchRegister));
   2311   DCHECK(!dst.is(src2));
   2312   DCHECK(!dst.is(rcx));
   2313   if (src1.is(rcx) || src2.is(rcx)) {
   2314     movq(kScratchRegister, rcx);
   2315   }
   2316   if (dst.is(src1)) {
   2317     UNIMPLEMENTED();  // Not used.
   2318   } else {
   2319     Label valid_result;
   2320     SmiToInteger32(dst, src1);
   2321     SmiToInteger32(rcx, src2);
   2322     shrl_cl(dst);
   2323     JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
   2324     // As src1 or src2 could not be dst, we do not need to restore them for
   2325     // clobbering dst.
   2326     if (src1.is(rcx) || src2.is(rcx)) {
   2327       if (src1.is(rcx)) {
   2328         movq(src1, kScratchRegister);
   2329       } else {
   2330         movq(src2, kScratchRegister);
   2331       }
   2332      }
   2333     jmp(on_not_smi_result, near_jump);
   2334     bind(&valid_result);
   2335     Integer32ToSmi(dst, dst);
   2336   }
   2337 }
   2338 
   2339 
   2340 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
   2341                                              Register src1,
   2342                                              Register src2) {
   2343   DCHECK(!dst.is(kScratchRegister));
   2344   DCHECK(!src1.is(kScratchRegister));
   2345   DCHECK(!src2.is(kScratchRegister));
   2346   DCHECK(!dst.is(rcx));
   2347 
   2348   SmiToInteger32(rcx, src2);
   2349   if (!dst.is(src1)) {
   2350     movp(dst, src1);
   2351   }
   2352   SmiToInteger32(dst, dst);
   2353   sarl_cl(dst);
   2354   Integer32ToSmi(dst, dst);
   2355 }
   2356 
   2357 
   2358 void MacroAssembler::SelectNonSmi(Register dst,
   2359                                   Register src1,
   2360                                   Register src2,
   2361                                   Label* on_not_smis,
   2362                                   Label::Distance near_jump) {
   2363   DCHECK(!dst.is(kScratchRegister));
   2364   DCHECK(!src1.is(kScratchRegister));
   2365   DCHECK(!src2.is(kScratchRegister));
   2366   DCHECK(!dst.is(src1));
   2367   DCHECK(!dst.is(src2));
   2368   // Both operands must not be smis.
   2369 #ifdef DEBUG
   2370   Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
   2371   Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
   2372 #endif
   2373   STATIC_ASSERT(kSmiTag == 0);
   2374   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
   2375   movl(kScratchRegister, Immediate(kSmiTagMask));
   2376   andp(kScratchRegister, src1);
   2377   testl(kScratchRegister, src2);
   2378   // If non-zero then both are smis.
   2379   j(not_zero, on_not_smis, near_jump);
   2380 
   2381   // Exactly one operand is a smi.
   2382   DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
   2383   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
   2384   subp(kScratchRegister, Immediate(1));
   2385   // If src1 is a smi, then scratch register all 1s, else it is all 0s.
   2386   movp(dst, src1);
   2387   xorp(dst, src2);
   2388   andp(dst, kScratchRegister);
   2389   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
   2390   xorp(dst, src1);
   2391   // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
   2392 }
   2393 
   2394 
   2395 SmiIndex MacroAssembler::SmiToIndex(Register dst,
   2396                                     Register src,
   2397                                     int shift) {
   2398   if (SmiValuesAre32Bits()) {
   2399     DCHECK(is_uint6(shift));
   2400     // There is a possible optimization if shift is in the range 60-63, but that
   2401     // will (and must) never happen.
   2402     if (!dst.is(src)) {
   2403       movp(dst, src);
   2404     }
   2405     if (shift < kSmiShift) {
   2406       sarp(dst, Immediate(kSmiShift - shift));
   2407     } else {
   2408       shlp(dst, Immediate(shift - kSmiShift));
   2409     }
   2410     return SmiIndex(dst, times_1);
   2411   } else {
   2412     DCHECK(SmiValuesAre31Bits());
   2413     DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
   2414     if (!dst.is(src)) {
   2415       movp(dst, src);
   2416     }
   2417     // We have to sign extend the index register to 64-bit as the SMI might
   2418     // be negative.
   2419     movsxlq(dst, dst);
   2420     if (shift == times_1) {
   2421       sarq(dst, Immediate(kSmiShift));
   2422       return SmiIndex(dst, times_1);
   2423     }
   2424     return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
   2425   }
   2426 }
   2427 
   2428 
   2429 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
   2430                                             Register src,
   2431                                             int shift) {
   2432   if (SmiValuesAre32Bits()) {
   2433     // Register src holds a positive smi.
   2434     DCHECK(is_uint6(shift));
   2435     if (!dst.is(src)) {
   2436       movp(dst, src);
   2437     }
   2438     negp(dst);
   2439     if (shift < kSmiShift) {
   2440       sarp(dst, Immediate(kSmiShift - shift));
   2441     } else {
   2442       shlp(dst, Immediate(shift - kSmiShift));
   2443     }
   2444     return SmiIndex(dst, times_1);
   2445   } else {
   2446     DCHECK(SmiValuesAre31Bits());
   2447     DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
   2448     if (!dst.is(src)) {
   2449       movp(dst, src);
   2450     }
   2451     negq(dst);
   2452     if (shift == times_1) {
   2453       sarq(dst, Immediate(kSmiShift));
   2454       return SmiIndex(dst, times_1);
   2455     }
   2456     return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
   2457   }
   2458 }
   2459 
   2460 
   2461 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
   2462   if (SmiValuesAre32Bits()) {
   2463     DCHECK_EQ(0, kSmiShift % kBitsPerByte);
   2464     addl(dst, Operand(src, kSmiShift / kBitsPerByte));
   2465   } else {
   2466     DCHECK(SmiValuesAre31Bits());
   2467     SmiToInteger32(kScratchRegister, src);
   2468     addl(dst, kScratchRegister);
   2469   }
   2470 }
   2471 
   2472 
   2473 void MacroAssembler::Push(Smi* source) {
   2474   intptr_t smi = reinterpret_cast<intptr_t>(source);
   2475   if (is_int32(smi)) {
   2476     Push(Immediate(static_cast<int32_t>(smi)));
   2477   } else {
   2478     Register constant = GetSmiConstant(source);
   2479     Push(constant);
   2480   }
   2481 }
   2482 
   2483 
   2484 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
   2485   DCHECK(!src.is(scratch));
   2486   movp(scratch, src);
   2487   // High bits.
   2488   shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
   2489   shlp(src, Immediate(kSmiShift));
   2490   Push(src);
   2491   // Low bits.
   2492   shlp(scratch, Immediate(kSmiShift));
   2493   Push(scratch);
   2494 }
   2495 
   2496 
   2497 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
   2498   DCHECK(!dst.is(scratch));
   2499   Pop(scratch);
   2500   // Low bits.
   2501   shrp(scratch, Immediate(kSmiShift));
   2502   Pop(dst);
   2503   shrp(dst, Immediate(kSmiShift));
   2504   // High bits.
   2505   shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
   2506   orp(dst, scratch);
   2507 }
   2508 
   2509 
   2510 void MacroAssembler::Test(const Operand& src, Smi* source) {
   2511   if (SmiValuesAre32Bits()) {
   2512     testl(Operand(src, kIntSize), Immediate(source->value()));
   2513   } else {
   2514     DCHECK(SmiValuesAre31Bits());
   2515     testl(src, Immediate(source));
   2516   }
   2517 }
   2518 
   2519 
   2520 // ----------------------------------------------------------------------------
   2521 
   2522 
   2523 void MacroAssembler::JumpIfNotString(Register object,
   2524                                      Register object_map,
   2525                                      Label* not_string,
   2526                                      Label::Distance near_jump) {
   2527   Condition is_smi = CheckSmi(object);
   2528   j(is_smi, not_string, near_jump);
   2529   CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
   2530   j(above_equal, not_string, near_jump);
   2531 }
   2532 
   2533 
   2534 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
   2535     Register first_object, Register second_object, Register scratch1,
   2536     Register scratch2, Label* on_fail, Label::Distance near_jump) {
   2537   // Check that both objects are not smis.
   2538   Condition either_smi = CheckEitherSmi(first_object, second_object);
   2539   j(either_smi, on_fail, near_jump);
   2540 
   2541   // Load instance type for both strings.
   2542   movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
   2543   movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
   2544   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   2545   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
   2546 
   2547   // Check that both are flat one-byte strings.
   2548   DCHECK(kNotStringTag != 0);
   2549   const int kFlatOneByteStringMask =
   2550       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2551   const int kFlatOneByteStringTag =
   2552       kStringTag | kOneByteStringTag | kSeqStringTag;
   2553 
   2554   andl(scratch1, Immediate(kFlatOneByteStringMask));
   2555   andl(scratch2, Immediate(kFlatOneByteStringMask));
   2556   // Interleave the bits to check both scratch1 and scratch2 in one test.
   2557   DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
   2558   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
   2559   cmpl(scratch1,
   2560        Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
   2561   j(not_equal, on_fail, near_jump);
   2562 }
   2563 
   2564 
   2565 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
   2566     Register instance_type, Register scratch, Label* failure,
   2567     Label::Distance near_jump) {
   2568   if (!scratch.is(instance_type)) {
   2569     movl(scratch, instance_type);
   2570   }
   2571 
   2572   const int kFlatOneByteStringMask =
   2573       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2574 
   2575   andl(scratch, Immediate(kFlatOneByteStringMask));
   2576   cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
   2577   j(not_equal, failure, near_jump);
   2578 }
   2579 
   2580 
   2581 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
   2582     Register first_object_instance_type, Register second_object_instance_type,
   2583     Register scratch1, Register scratch2, Label* on_fail,
   2584     Label::Distance near_jump) {
   2585   // Load instance type for both strings.
   2586   movp(scratch1, first_object_instance_type);
   2587   movp(scratch2, second_object_instance_type);
   2588 
   2589   // Check that both are flat one-byte strings.
   2590   DCHECK(kNotStringTag != 0);
   2591   const int kFlatOneByteStringMask =
   2592       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   2593   const int kFlatOneByteStringTag =
   2594       kStringTag | kOneByteStringTag | kSeqStringTag;
   2595 
   2596   andl(scratch1, Immediate(kFlatOneByteStringMask));
   2597   andl(scratch2, Immediate(kFlatOneByteStringMask));
   2598   // Interleave the bits to check both scratch1 and scratch2 in one test.
   2599   DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
   2600   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
   2601   cmpl(scratch1,
   2602        Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
   2603   j(not_equal, on_fail, near_jump);
   2604 }
   2605 
   2606 
   2607 template<class T>
   2608 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
   2609                                       T operand_or_register,
   2610                                       Label* not_unique_name,
   2611                                       Label::Distance distance) {
   2612   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   2613   Label succeed;
   2614   masm->testb(operand_or_register,
   2615               Immediate(kIsNotStringMask | kIsNotInternalizedMask));
   2616   masm->j(zero, &succeed, Label::kNear);
   2617   masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
   2618   masm->j(not_equal, not_unique_name, distance);
   2619 
   2620   masm->bind(&succeed);
   2621 }
   2622 
   2623 
   2624 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
   2625                                                      Label* not_unique_name,
   2626                                                      Label::Distance distance) {
   2627   JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
   2628 }
   2629 
   2630 
   2631 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
   2632                                                      Label* not_unique_name,
   2633                                                      Label::Distance distance) {
   2634   JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
   2635 }
   2636 
   2637 
   2638 void MacroAssembler::Move(Register dst, Register src) {
   2639   if (!dst.is(src)) {
   2640     movp(dst, src);
   2641   }
   2642 }
   2643 
   2644 
   2645 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   2646   AllowDeferredHandleDereference smi_check;
   2647   if (source->IsSmi()) {
   2648     Move(dst, Smi::cast(*source));
   2649   } else {
   2650     MoveHeapObject(dst, source);
   2651   }
   2652 }
   2653 
   2654 
   2655 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
   2656   AllowDeferredHandleDereference smi_check;
   2657   if (source->IsSmi()) {
   2658     Move(dst, Smi::cast(*source));
   2659   } else {
   2660     MoveHeapObject(kScratchRegister, source);
   2661     movp(dst, kScratchRegister);
   2662   }
   2663 }
   2664 
   2665 
   2666 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
   2667   if (src == 0) {
   2668     Xorpd(dst, dst);
   2669   } else {
   2670     unsigned pop = base::bits::CountPopulation32(src);
   2671     DCHECK_NE(0u, pop);
   2672     if (pop == 32) {
   2673       Pcmpeqd(dst, dst);
   2674     } else {
   2675       movl(kScratchRegister, Immediate(src));
   2676       Movq(dst, kScratchRegister);
   2677     }
   2678   }
   2679 }
   2680 
   2681 
   2682 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
   2683   if (src == 0) {
   2684     Xorpd(dst, dst);
   2685   } else {
   2686     unsigned nlz = base::bits::CountLeadingZeros64(src);
   2687     unsigned ntz = base::bits::CountTrailingZeros64(src);
   2688     unsigned pop = base::bits::CountPopulation64(src);
   2689     DCHECK_NE(0u, pop);
   2690     if (pop == 64) {
   2691       Pcmpeqd(dst, dst);
   2692     } else if (pop + ntz == 64) {
   2693       Pcmpeqd(dst, dst);
   2694       Psllq(dst, ntz);
   2695     } else if (pop + nlz == 64) {
   2696       Pcmpeqd(dst, dst);
   2697       Psrlq(dst, nlz);
   2698     } else {
   2699       uint32_t lower = static_cast<uint32_t>(src);
   2700       uint32_t upper = static_cast<uint32_t>(src >> 32);
   2701       if (upper == 0) {
   2702         Move(dst, lower);
   2703       } else {
   2704         movq(kScratchRegister, src);
   2705         Movq(dst, kScratchRegister);
   2706       }
   2707     }
   2708   }
   2709 }
   2710 
   2711 
   2712 void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
   2713   if (CpuFeatures::IsSupported(AVX)) {
   2714     CpuFeatureScope scope(this, AVX);
   2715     vmovaps(dst, src);
   2716   } else {
   2717     movaps(dst, src);
   2718   }
   2719 }
   2720 
   2721 
   2722 void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
   2723   if (CpuFeatures::IsSupported(AVX)) {
   2724     CpuFeatureScope scope(this, AVX);
   2725     vmovapd(dst, src);
   2726   } else {
   2727     movapd(dst, src);
   2728   }
   2729 }
   2730 
   2731 
   2732 void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
   2733   if (CpuFeatures::IsSupported(AVX)) {
   2734     CpuFeatureScope scope(this, AVX);
   2735     vmovsd(dst, dst, src);
   2736   } else {
   2737     movsd(dst, src);
   2738   }
   2739 }
   2740 
   2741 
   2742 void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
   2743   if (CpuFeatures::IsSupported(AVX)) {
   2744     CpuFeatureScope scope(this, AVX);
   2745     vmovsd(dst, src);
   2746   } else {
   2747     movsd(dst, src);
   2748   }
   2749 }
   2750 
   2751 
   2752 void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
   2753   if (CpuFeatures::IsSupported(AVX)) {
   2754     CpuFeatureScope scope(this, AVX);
   2755     vmovsd(dst, src);
   2756   } else {
   2757     movsd(dst, src);
   2758   }
   2759 }
   2760 
   2761 
   2762 void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
   2763   if (CpuFeatures::IsSupported(AVX)) {
   2764     CpuFeatureScope scope(this, AVX);
   2765     vmovss(dst, dst, src);
   2766   } else {
   2767     movss(dst, src);
   2768   }
   2769 }
   2770 
   2771 
   2772 void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
   2773   if (CpuFeatures::IsSupported(AVX)) {
   2774     CpuFeatureScope scope(this, AVX);
   2775     vmovss(dst, src);
   2776   } else {
   2777     movss(dst, src);
   2778   }
   2779 }
   2780 
   2781 
   2782 void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
   2783   if (CpuFeatures::IsSupported(AVX)) {
   2784     CpuFeatureScope scope(this, AVX);
   2785     vmovss(dst, src);
   2786   } else {
   2787     movss(dst, src);
   2788   }
   2789 }
   2790 
   2791 
   2792 void MacroAssembler::Movd(XMMRegister dst, Register src) {
   2793   if (CpuFeatures::IsSupported(AVX)) {
   2794     CpuFeatureScope scope(this, AVX);
   2795     vmovd(dst, src);
   2796   } else {
   2797     movd(dst, src);
   2798   }
   2799 }
   2800 
   2801 
   2802 void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
   2803   if (CpuFeatures::IsSupported(AVX)) {
   2804     CpuFeatureScope scope(this, AVX);
   2805     vmovd(dst, src);
   2806   } else {
   2807     movd(dst, src);
   2808   }
   2809 }
   2810 
   2811 
   2812 void MacroAssembler::Movd(Register dst, XMMRegister src) {
   2813   if (CpuFeatures::IsSupported(AVX)) {
   2814     CpuFeatureScope scope(this, AVX);
   2815     vmovd(dst, src);
   2816   } else {
   2817     movd(dst, src);
   2818   }
   2819 }
   2820 
   2821 
   2822 void MacroAssembler::Movq(XMMRegister dst, Register src) {
   2823   if (CpuFeatures::IsSupported(AVX)) {
   2824     CpuFeatureScope scope(this, AVX);
   2825     vmovq(dst, src);
   2826   } else {
   2827     movq(dst, src);
   2828   }
   2829 }
   2830 
   2831 
   2832 void MacroAssembler::Movq(Register dst, XMMRegister src) {
   2833   if (CpuFeatures::IsSupported(AVX)) {
   2834     CpuFeatureScope scope(this, AVX);
   2835     vmovq(dst, src);
   2836   } else {
   2837     movq(dst, src);
   2838   }
   2839 }
   2840 
   2841 
   2842 void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
   2843   if (CpuFeatures::IsSupported(AVX)) {
   2844     CpuFeatureScope scope(this, AVX);
   2845     vmovmskpd(dst, src);
   2846   } else {
   2847     movmskpd(dst, src);
   2848   }
   2849 }
   2850 
   2851 
   2852 void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
   2853                              RoundingMode mode) {
   2854   if (CpuFeatures::IsSupported(AVX)) {
   2855     CpuFeatureScope scope(this, AVX);
   2856     vroundss(dst, dst, src, mode);
   2857   } else {
   2858     roundss(dst, src, mode);
   2859   }
   2860 }
   2861 
   2862 
   2863 void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
   2864                              RoundingMode mode) {
   2865   if (CpuFeatures::IsSupported(AVX)) {
   2866     CpuFeatureScope scope(this, AVX);
   2867     vroundsd(dst, dst, src, mode);
   2868   } else {
   2869     roundsd(dst, src, mode);
   2870   }
   2871 }
   2872 
   2873 
   2874 void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
   2875   if (CpuFeatures::IsSupported(AVX)) {
   2876     CpuFeatureScope scope(this, AVX);
   2877     vsqrtsd(dst, dst, src);
   2878   } else {
   2879     sqrtsd(dst, src);
   2880   }
   2881 }
   2882 
   2883 
   2884 void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
   2885   if (CpuFeatures::IsSupported(AVX)) {
   2886     CpuFeatureScope scope(this, AVX);
   2887     vsqrtsd(dst, dst, src);
   2888   } else {
   2889     sqrtsd(dst, src);
   2890   }
   2891 }
   2892 
   2893 
   2894 void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
   2895   if (CpuFeatures::IsSupported(AVX)) {
   2896     CpuFeatureScope scope(this, AVX);
   2897     vucomiss(src1, src2);
   2898   } else {
   2899     ucomiss(src1, src2);
   2900   }
   2901 }
   2902 
   2903 
   2904 void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
   2905   if (CpuFeatures::IsSupported(AVX)) {
   2906     CpuFeatureScope scope(this, AVX);
   2907     vucomiss(src1, src2);
   2908   } else {
   2909     ucomiss(src1, src2);
   2910   }
   2911 }
   2912 
   2913 
   2914 void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
   2915   if (CpuFeatures::IsSupported(AVX)) {
   2916     CpuFeatureScope scope(this, AVX);
   2917     vucomisd(src1, src2);
   2918   } else {
   2919     ucomisd(src1, src2);
   2920   }
   2921 }
   2922 
   2923 
   2924 void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
   2925   if (CpuFeatures::IsSupported(AVX)) {
   2926     CpuFeatureScope scope(this, AVX);
   2927     vucomisd(src1, src2);
   2928   } else {
   2929     ucomisd(src1, src2);
   2930   }
   2931 }
   2932 
   2933 
   2934 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
   2935   AllowDeferredHandleDereference smi_check;
   2936   if (source->IsSmi()) {
   2937     Cmp(dst, Smi::cast(*source));
   2938   } else {
   2939     MoveHeapObject(kScratchRegister, source);
   2940     cmpp(dst, kScratchRegister);
   2941   }
   2942 }
   2943 
   2944 
   2945 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
   2946   AllowDeferredHandleDereference smi_check;
   2947   if (source->IsSmi()) {
   2948     Cmp(dst, Smi::cast(*source));
   2949   } else {
   2950     MoveHeapObject(kScratchRegister, source);
   2951     cmpp(dst, kScratchRegister);
   2952   }
   2953 }
   2954 
   2955 
   2956 void MacroAssembler::Push(Handle<Object> source) {
   2957   AllowDeferredHandleDereference smi_check;
   2958   if (source->IsSmi()) {
   2959     Push(Smi::cast(*source));
   2960   } else {
   2961     MoveHeapObject(kScratchRegister, source);
   2962     Push(kScratchRegister);
   2963   }
   2964 }
   2965 
   2966 
   2967 void MacroAssembler::MoveHeapObject(Register result,
   2968                                     Handle<Object> object) {
   2969   AllowDeferredHandleDereference using_raw_address;
   2970   DCHECK(object->IsHeapObject());
   2971   if (isolate()->heap()->InNewSpace(*object)) {
   2972     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   2973     Move(result, cell, RelocInfo::CELL);
   2974     movp(result, Operand(result, 0));
   2975   } else {
   2976     Move(result, object, RelocInfo::EMBEDDED_OBJECT);
   2977   }
   2978 }
   2979 
   2980 
   2981 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
   2982   if (dst.is(rax)) {
   2983     AllowDeferredHandleDereference embedding_raw_address;
   2984     load_rax(cell.location(), RelocInfo::CELL);
   2985   } else {
   2986     Move(dst, cell, RelocInfo::CELL);
   2987     movp(dst, Operand(dst, 0));
   2988   }
   2989 }
   2990 
   2991 
   2992 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
   2993                                   Register scratch) {
   2994   Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
   2995   cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
   2996 }
   2997 
   2998 
   2999 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
   3000   Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
   3001   movp(value, FieldOperand(value, WeakCell::kValueOffset));
   3002 }
   3003 
   3004 
   3005 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
   3006                                    Label* miss) {
   3007   GetWeakValue(value, cell);
   3008   JumpIfSmi(value, miss);
   3009 }
   3010 
   3011 
   3012 void MacroAssembler::Drop(int stack_elements) {
   3013   if (stack_elements > 0) {
   3014     addp(rsp, Immediate(stack_elements * kPointerSize));
   3015   }
   3016 }
   3017 
   3018 
   3019 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
   3020                                             Register scratch) {
   3021   DCHECK(stack_elements > 0);
   3022   if (kPointerSize == kInt64Size && stack_elements == 1) {
   3023     popq(MemOperand(rsp, 0));
   3024     return;
   3025   }
   3026 
   3027   PopReturnAddressTo(scratch);
   3028   Drop(stack_elements);
   3029   PushReturnAddressFrom(scratch);
   3030 }
   3031 
   3032 
   3033 void MacroAssembler::Push(Register src) {
   3034   if (kPointerSize == kInt64Size) {
   3035     pushq(src);
   3036   } else {
   3037     // x32 uses 64-bit push for rbp in the prologue.
   3038     DCHECK(src.code() != rbp.code());
   3039     leal(rsp, Operand(rsp, -4));
   3040     movp(Operand(rsp, 0), src);
   3041   }
   3042 }
   3043 
   3044 
   3045 void MacroAssembler::Push(const Operand& src) {
   3046   if (kPointerSize == kInt64Size) {
   3047     pushq(src);
   3048   } else {
   3049     movp(kScratchRegister, src);
   3050     leal(rsp, Operand(rsp, -4));
   3051     movp(Operand(rsp, 0), kScratchRegister);
   3052   }
   3053 }
   3054 
   3055 
   3056 void MacroAssembler::PushQuad(const Operand& src) {
   3057   if (kPointerSize == kInt64Size) {
   3058     pushq(src);
   3059   } else {
   3060     movp(kScratchRegister, src);
   3061     pushq(kScratchRegister);
   3062   }
   3063 }
   3064 
   3065 
   3066 void MacroAssembler::Push(Immediate value) {
   3067   if (kPointerSize == kInt64Size) {
   3068     pushq(value);
   3069   } else {
   3070     leal(rsp, Operand(rsp, -4));
   3071     movp(Operand(rsp, 0), value);
   3072   }
   3073 }
   3074 
   3075 
   3076 void MacroAssembler::PushImm32(int32_t imm32) {
   3077   if (kPointerSize == kInt64Size) {
   3078     pushq_imm32(imm32);
   3079   } else {
   3080     leal(rsp, Operand(rsp, -4));
   3081     movp(Operand(rsp, 0), Immediate(imm32));
   3082   }
   3083 }
   3084 
   3085 
   3086 void MacroAssembler::Pop(Register dst) {
   3087   if (kPointerSize == kInt64Size) {
   3088     popq(dst);
   3089   } else {
   3090     // x32 uses 64-bit pop for rbp in the epilogue.
   3091     DCHECK(dst.code() != rbp.code());
   3092     movp(dst, Operand(rsp, 0));
   3093     leal(rsp, Operand(rsp, 4));
   3094   }
   3095 }
   3096 
   3097 
   3098 void MacroAssembler::Pop(const Operand& dst) {
   3099   if (kPointerSize == kInt64Size) {
   3100     popq(dst);
   3101   } else {
   3102     Register scratch = dst.AddressUsesRegister(kScratchRegister)
   3103         ? kRootRegister : kScratchRegister;
   3104     movp(scratch, Operand(rsp, 0));
   3105     movp(dst, scratch);
   3106     leal(rsp, Operand(rsp, 4));
   3107     if (scratch.is(kRootRegister)) {
   3108       // Restore kRootRegister.
   3109       InitializeRootRegister();
   3110     }
   3111   }
   3112 }
   3113 
   3114 
   3115 void MacroAssembler::PopQuad(const Operand& dst) {
   3116   if (kPointerSize == kInt64Size) {
   3117     popq(dst);
   3118   } else {
   3119     popq(kScratchRegister);
   3120     movp(dst, kScratchRegister);
   3121   }
   3122 }
   3123 
   3124 
   3125 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
   3126                                                         Register base,
   3127                                                         int offset) {
   3128   DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
   3129          offset <= SharedFunctionInfo::kSize &&
   3130          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
   3131   if (kPointerSize == kInt64Size) {
   3132     movsxlq(dst, FieldOperand(base, offset));
   3133   } else {
   3134     movp(dst, FieldOperand(base, offset));
   3135     SmiToInteger32(dst, dst);
   3136   }
   3137 }
   3138 
   3139 
   3140 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
   3141                                                            int offset,
   3142                                                            int bits) {
   3143   DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
   3144          offset <= SharedFunctionInfo::kSize &&
   3145          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
   3146   if (kPointerSize == kInt32Size) {
   3147     // On x32, this field is represented by SMI.
   3148     bits += kSmiShift;
   3149   }
   3150   int byte_offset = bits / kBitsPerByte;
   3151   int bit_in_byte = bits & (kBitsPerByte - 1);
   3152   testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
   3153 }
   3154 
   3155 
   3156 void MacroAssembler::Jump(ExternalReference ext) {
   3157   LoadAddress(kScratchRegister, ext);
   3158   jmp(kScratchRegister);
   3159 }
   3160 
   3161 
   3162 void MacroAssembler::Jump(const Operand& op) {
   3163   if (kPointerSize == kInt64Size) {
   3164     jmp(op);
   3165   } else {
   3166     movp(kScratchRegister, op);
   3167     jmp(kScratchRegister);
   3168   }
   3169 }
   3170 
   3171 
   3172 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
   3173   Move(kScratchRegister, destination, rmode);
   3174   jmp(kScratchRegister);
   3175 }
   3176 
   3177 
   3178 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
   3179   // TODO(X64): Inline this
   3180   jmp(code_object, rmode);
   3181 }
   3182 
   3183 
   3184 int MacroAssembler::CallSize(ExternalReference ext) {
   3185   // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
   3186   return LoadAddressSize(ext) +
   3187          Assembler::kCallScratchRegisterInstructionLength;
   3188 }
   3189 
   3190 
   3191 void MacroAssembler::Call(ExternalReference ext) {
   3192 #ifdef DEBUG
   3193   int end_position = pc_offset() + CallSize(ext);
   3194 #endif
   3195   LoadAddress(kScratchRegister, ext);
   3196   call(kScratchRegister);
   3197 #ifdef DEBUG
   3198   CHECK_EQ(end_position, pc_offset());
   3199 #endif
   3200 }
   3201 
   3202 
   3203 void MacroAssembler::Call(const Operand& op) {
   3204   if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
   3205     call(op);
   3206   } else {
   3207     movp(kScratchRegister, op);
   3208     call(kScratchRegister);
   3209   }
   3210 }
   3211 
   3212 
   3213 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
   3214 #ifdef DEBUG
   3215   int end_position = pc_offset() + CallSize(destination);
   3216 #endif
   3217   Move(kScratchRegister, destination, rmode);
   3218   call(kScratchRegister);
   3219 #ifdef DEBUG
   3220   CHECK_EQ(pc_offset(), end_position);
   3221 #endif
   3222 }
   3223 
   3224 
   3225 void MacroAssembler::Call(Handle<Code> code_object,
   3226                           RelocInfo::Mode rmode,
   3227                           TypeFeedbackId ast_id) {
   3228 #ifdef DEBUG
   3229   int end_position = pc_offset() + CallSize(code_object);
   3230 #endif
   3231   DCHECK(RelocInfo::IsCodeTarget(rmode) ||
   3232       rmode == RelocInfo::CODE_AGE_SEQUENCE);
   3233   call(code_object, rmode, ast_id);
   3234 #ifdef DEBUG
   3235   CHECK_EQ(end_position, pc_offset());
   3236 #endif
   3237 }
   3238 
   3239 
   3240 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
   3241   if (imm8 == 0) {
   3242     Movd(dst, src);
   3243     return;
   3244   }
   3245   DCHECK_EQ(1, imm8);
   3246   if (CpuFeatures::IsSupported(SSE4_1)) {
   3247     CpuFeatureScope sse_scope(this, SSE4_1);
   3248     pextrd(dst, src, imm8);
   3249     return;
   3250   }
   3251   movq(dst, src);
   3252   shrq(dst, Immediate(32));
   3253 }
   3254 
   3255 
   3256 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
   3257   if (CpuFeatures::IsSupported(SSE4_1)) {
   3258     CpuFeatureScope sse_scope(this, SSE4_1);
   3259     pinsrd(dst, src, imm8);
   3260     return;
   3261   }
   3262   Movd(kScratchDoubleReg, src);
   3263   if (imm8 == 1) {
   3264     punpckldq(dst, kScratchDoubleReg);
   3265   } else {
   3266     DCHECK_EQ(0, imm8);
   3267     Movss(dst, kScratchDoubleReg);
   3268   }
   3269 }
   3270 
   3271 
   3272 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
   3273   DCHECK(imm8 == 0 || imm8 == 1);
   3274   if (CpuFeatures::IsSupported(SSE4_1)) {
   3275     CpuFeatureScope sse_scope(this, SSE4_1);
   3276     pinsrd(dst, src, imm8);
   3277     return;
   3278   }
   3279   Movd(kScratchDoubleReg, src);
   3280   if (imm8 == 1) {
   3281     punpckldq(dst, kScratchDoubleReg);
   3282   } else {
   3283     DCHECK_EQ(0, imm8);
   3284     Movss(dst, kScratchDoubleReg);
   3285   }
   3286 }
   3287 
   3288 
   3289 void MacroAssembler::Lzcntl(Register dst, Register src) {
   3290   if (CpuFeatures::IsSupported(LZCNT)) {
   3291     CpuFeatureScope scope(this, LZCNT);
   3292     lzcntl(dst, src);
   3293     return;
   3294   }
   3295   Label not_zero_src;
   3296   bsrl(dst, src);
   3297   j(not_zero, &not_zero_src, Label::kNear);
   3298   Set(dst, 63);  // 63^31 == 32
   3299   bind(&not_zero_src);
   3300   xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
   3301 }
   3302 
   3303 
   3304 void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
   3305   if (CpuFeatures::IsSupported(LZCNT)) {
   3306     CpuFeatureScope scope(this, LZCNT);
   3307     lzcntl(dst, src);
   3308     return;
   3309   }
   3310   Label not_zero_src;
   3311   bsrl(dst, src);
   3312   j(not_zero, &not_zero_src, Label::kNear);
   3313   Set(dst, 63);  // 63^31 == 32
   3314   bind(&not_zero_src);
   3315   xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
   3316 }
   3317 
   3318 
   3319 void MacroAssembler::Lzcntq(Register dst, Register src) {
   3320   if (CpuFeatures::IsSupported(LZCNT)) {
   3321     CpuFeatureScope scope(this, LZCNT);
   3322     lzcntq(dst, src);
   3323     return;
   3324   }
   3325   Label not_zero_src;
   3326   bsrq(dst, src);
   3327   j(not_zero, &not_zero_src, Label::kNear);
   3328   Set(dst, 127);  // 127^63 == 64
   3329   bind(&not_zero_src);
   3330   xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
   3331 }
   3332 
   3333 
   3334 void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
   3335   if (CpuFeatures::IsSupported(LZCNT)) {
   3336     CpuFeatureScope scope(this, LZCNT);
   3337     lzcntq(dst, src);
   3338     return;
   3339   }
   3340   Label not_zero_src;
   3341   bsrq(dst, src);
   3342   j(not_zero, &not_zero_src, Label::kNear);
   3343   Set(dst, 127);  // 127^63 == 64
   3344   bind(&not_zero_src);
   3345   xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
   3346 }
   3347 
   3348 
   3349 void MacroAssembler::Tzcntq(Register dst, Register src) {
   3350   if (CpuFeatures::IsSupported(BMI1)) {
   3351     CpuFeatureScope scope(this, BMI1);
   3352     tzcntq(dst, src);
   3353     return;
   3354   }
   3355   Label not_zero_src;
   3356   bsfq(dst, src);
   3357   j(not_zero, &not_zero_src, Label::kNear);
   3358   // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
   3359   Set(dst, 64);
   3360   bind(&not_zero_src);
   3361 }
   3362 
   3363 
   3364 void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
   3365   if (CpuFeatures::IsSupported(BMI1)) {
   3366     CpuFeatureScope scope(this, BMI1);
   3367     tzcntq(dst, src);
   3368     return;
   3369   }
   3370   Label not_zero_src;
   3371   bsfq(dst, src);
   3372   j(not_zero, &not_zero_src, Label::kNear);
   3373   // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
   3374   Set(dst, 64);
   3375   bind(&not_zero_src);
   3376 }
   3377 
   3378 
   3379 void MacroAssembler::Tzcntl(Register dst, Register src) {
   3380   if (CpuFeatures::IsSupported(BMI1)) {
   3381     CpuFeatureScope scope(this, BMI1);
   3382     tzcntl(dst, src);
   3383     return;
   3384   }
   3385   Label not_zero_src;
   3386   bsfl(dst, src);
   3387   j(not_zero, &not_zero_src, Label::kNear);
   3388   Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
   3389   bind(&not_zero_src);
   3390 }
   3391 
   3392 
   3393 void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
   3394   if (CpuFeatures::IsSupported(BMI1)) {
   3395     CpuFeatureScope scope(this, BMI1);
   3396     tzcntl(dst, src);
   3397     return;
   3398   }
   3399   Label not_zero_src;
   3400   bsfl(dst, src);
   3401   j(not_zero, &not_zero_src, Label::kNear);
   3402   Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
   3403   bind(&not_zero_src);
   3404 }
   3405 
   3406 
   3407 void MacroAssembler::Popcntl(Register dst, Register src) {
   3408   if (CpuFeatures::IsSupported(POPCNT)) {
   3409     CpuFeatureScope scope(this, POPCNT);
   3410     popcntl(dst, src);
   3411     return;
   3412   }
   3413   UNREACHABLE();
   3414 }
   3415 
   3416 
   3417 void MacroAssembler::Popcntl(Register dst, const Operand& src) {
   3418   if (CpuFeatures::IsSupported(POPCNT)) {
   3419     CpuFeatureScope scope(this, POPCNT);
   3420     popcntl(dst, src);
   3421     return;
   3422   }
   3423   UNREACHABLE();
   3424 }
   3425 
   3426 
   3427 void MacroAssembler::Popcntq(Register dst, Register src) {
   3428   if (CpuFeatures::IsSupported(POPCNT)) {
   3429     CpuFeatureScope scope(this, POPCNT);
   3430     popcntq(dst, src);
   3431     return;
   3432   }
   3433   UNREACHABLE();
   3434 }
   3435 
   3436 
   3437 void MacroAssembler::Popcntq(Register dst, const Operand& src) {
   3438   if (CpuFeatures::IsSupported(POPCNT)) {
   3439     CpuFeatureScope scope(this, POPCNT);
   3440     popcntq(dst, src);
   3441     return;
   3442   }
   3443   UNREACHABLE();
   3444 }
   3445 
   3446 
   3447 void MacroAssembler::Pushad() {
   3448   Push(rax);
   3449   Push(rcx);
   3450   Push(rdx);
   3451   Push(rbx);
   3452   // Not pushing rsp or rbp.
   3453   Push(rsi);
   3454   Push(rdi);
   3455   Push(r8);
   3456   Push(r9);
   3457   // r10 is kScratchRegister.
   3458   Push(r11);
   3459   Push(r12);
   3460   // r13 is kRootRegister.
   3461   Push(r14);
   3462   Push(r15);
   3463   STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
   3464   // Use lea for symmetry with Popad.
   3465   int sp_delta =
   3466       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
   3467   leap(rsp, Operand(rsp, -sp_delta));
   3468 }
   3469 
   3470 
   3471 void MacroAssembler::Popad() {
   3472   // Popad must not change the flags, so use lea instead of addq.
   3473   int sp_delta =
   3474       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
   3475   leap(rsp, Operand(rsp, sp_delta));
   3476   Pop(r15);
   3477   Pop(r14);
   3478   Pop(r12);
   3479   Pop(r11);
   3480   Pop(r9);
   3481   Pop(r8);
   3482   Pop(rdi);
   3483   Pop(rsi);
   3484   Pop(rbx);
   3485   Pop(rdx);
   3486   Pop(rcx);
   3487   Pop(rax);
   3488 }
   3489 
   3490 
   3491 void MacroAssembler::Dropad() {
   3492   addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
   3493 }
   3494 
   3495 
   3496 // Order general registers are pushed by Pushad:
   3497 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
   3498 const int
   3499 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
   3500     0,
   3501     1,
   3502     2,
   3503     3,
   3504     -1,
   3505     -1,
   3506     4,
   3507     5,
   3508     6,
   3509     7,
   3510     -1,
   3511     8,
   3512     9,
   3513     -1,
   3514     10,
   3515     11
   3516 };
   3517 
   3518 
   3519 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
   3520                                                   const Immediate& imm) {
   3521   movp(SafepointRegisterSlot(dst), imm);
   3522 }
   3523 
   3524 
   3525 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
   3526   movp(SafepointRegisterSlot(dst), src);
   3527 }
   3528 
   3529 
   3530 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
   3531   movp(dst, SafepointRegisterSlot(src));
   3532 }
   3533 
   3534 
   3535 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
   3536   return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
   3537 }
   3538 
   3539 
   3540 void MacroAssembler::PushStackHandler() {
   3541   // Adjust this code if not the case.
   3542   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
   3543   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   3544 
   3545   // Link the current handler as the next handler.
   3546   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   3547   Push(ExternalOperand(handler_address));
   3548 
   3549   // Set this new handler as the current one.
   3550   movp(ExternalOperand(handler_address), rsp);
   3551 }
   3552 
   3553 
   3554 void MacroAssembler::PopStackHandler() {
   3555   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   3556   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   3557   Pop(ExternalOperand(handler_address));
   3558   addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
   3559 }
   3560 
   3561 
   3562 void MacroAssembler::Ret() {
   3563   ret(0);
   3564 }
   3565 
   3566 
   3567 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
   3568   if (is_uint16(bytes_dropped)) {
   3569     ret(bytes_dropped);
   3570   } else {
   3571     PopReturnAddressTo(scratch);
   3572     addp(rsp, Immediate(bytes_dropped));
   3573     PushReturnAddressFrom(scratch);
   3574     ret(0);
   3575   }
   3576 }
   3577 
   3578 
   3579 void MacroAssembler::FCmp() {
   3580   fucomip();
   3581   fstp(0);
   3582 }
   3583 
   3584 
   3585 void MacroAssembler::CmpObjectType(Register heap_object,
   3586                                    InstanceType type,
   3587                                    Register map) {
   3588   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   3589   CmpInstanceType(map, type);
   3590 }
   3591 
   3592 
   3593 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
   3594   cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
   3595        Immediate(static_cast<int8_t>(type)));
   3596 }
   3597 
   3598 
   3599 void MacroAssembler::CheckFastElements(Register map,
   3600                                        Label* fail,
   3601                                        Label::Distance distance) {
   3602   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   3603   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   3604   STATIC_ASSERT(FAST_ELEMENTS == 2);
   3605   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   3606   cmpb(FieldOperand(map, Map::kBitField2Offset),
   3607        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
   3608   j(above, fail, distance);
   3609 }
   3610 
   3611 
   3612 void MacroAssembler::CheckFastObjectElements(Register map,
   3613                                              Label* fail,
   3614                                              Label::Distance distance) {
   3615   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   3616   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   3617   STATIC_ASSERT(FAST_ELEMENTS == 2);
   3618   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   3619   cmpb(FieldOperand(map, Map::kBitField2Offset),
   3620        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
   3621   j(below_equal, fail, distance);
   3622   cmpb(FieldOperand(map, Map::kBitField2Offset),
   3623        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
   3624   j(above, fail, distance);
   3625 }
   3626 
   3627 
   3628 void MacroAssembler::CheckFastSmiElements(Register map,
   3629                                           Label* fail,
   3630                                           Label::Distance distance) {
   3631   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   3632   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   3633   cmpb(FieldOperand(map, Map::kBitField2Offset),
   3634        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
   3635   j(above, fail, distance);
   3636 }
   3637 
   3638 
   3639 void MacroAssembler::StoreNumberToDoubleElements(
   3640     Register maybe_number,
   3641     Register elements,
   3642     Register index,
   3643     XMMRegister xmm_scratch,
   3644     Label* fail,
   3645     int elements_offset) {
   3646   Label smi_value, done;
   3647 
   3648   JumpIfSmi(maybe_number, &smi_value, Label::kNear);
   3649 
   3650   CheckMap(maybe_number,
   3651            isolate()->factory()->heap_number_map(),
   3652            fail,
   3653            DONT_DO_SMI_CHECK);
   3654 
   3655   // Double value, turn potential sNaN into qNaN.
   3656   Move(xmm_scratch, 1.0);
   3657   mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
   3658   jmp(&done, Label::kNear);
   3659 
   3660   bind(&smi_value);
   3661   // Value is a smi. convert to a double and store.
   3662   // Preserve original value.
   3663   SmiToInteger32(kScratchRegister, maybe_number);
   3664   Cvtlsi2sd(xmm_scratch, kScratchRegister);
   3665   bind(&done);
   3666   Movsd(FieldOperand(elements, index, times_8,
   3667                      FixedDoubleArray::kHeaderSize - elements_offset),
   3668         xmm_scratch);
   3669 }
   3670 
   3671 
   3672 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
   3673   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
   3674 }
   3675 
   3676 
   3677 void MacroAssembler::CheckMap(Register obj,
   3678                               Handle<Map> map,
   3679                               Label* fail,
   3680                               SmiCheckType smi_check_type) {
   3681   if (smi_check_type == DO_SMI_CHECK) {
   3682     JumpIfSmi(obj, fail);
   3683   }
   3684 
   3685   CompareMap(obj, map);
   3686   j(not_equal, fail);
   3687 }
   3688 
   3689 
   3690 void MacroAssembler::ClampUint8(Register reg) {
   3691   Label done;
   3692   testl(reg, Immediate(0xFFFFFF00));
   3693   j(zero, &done, Label::kNear);
   3694   setcc(negative, reg);  // 1 if negative, 0 if positive.
   3695   decb(reg);  // 0 if negative, 255 if positive.
   3696   bind(&done);
   3697 }
   3698 
   3699 
   3700 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
   3701                                         XMMRegister temp_xmm_reg,
   3702                                         Register result_reg) {
   3703   Label done;
   3704   Label conv_failure;
   3705   Xorpd(temp_xmm_reg, temp_xmm_reg);
   3706   Cvtsd2si(result_reg, input_reg);
   3707   testl(result_reg, Immediate(0xFFFFFF00));
   3708   j(zero, &done, Label::kNear);
   3709   cmpl(result_reg, Immediate(1));
   3710   j(overflow, &conv_failure, Label::kNear);
   3711   movl(result_reg, Immediate(0));
   3712   setcc(sign, result_reg);
   3713   subl(result_reg, Immediate(1));
   3714   andl(result_reg, Immediate(255));
   3715   jmp(&done, Label::kNear);
   3716   bind(&conv_failure);
   3717   Set(result_reg, 0);
   3718   Ucomisd(input_reg, temp_xmm_reg);
   3719   j(below, &done, Label::kNear);
   3720   Set(result_reg, 255);
   3721   bind(&done);
   3722 }
   3723 
   3724 
   3725 void MacroAssembler::LoadUint32(XMMRegister dst,
   3726                                 Register src) {
   3727   if (FLAG_debug_code) {
   3728     cmpq(src, Immediate(0xffffffff));
   3729     Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
   3730   }
   3731   Cvtqsi2sd(dst, src);
   3732 }
   3733 
   3734 
   3735 void MacroAssembler::SlowTruncateToI(Register result_reg,
   3736                                      Register input_reg,
   3737                                      int offset) {
   3738   DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
   3739   call(stub.GetCode(), RelocInfo::CODE_TARGET);
   3740 }
   3741 
   3742 
   3743 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
   3744                                            Register input_reg) {
   3745   Label done;
   3746   Movsd(kScratchDoubleReg, FieldOperand(input_reg, HeapNumber::kValueOffset));
   3747   Cvttsd2siq(result_reg, kScratchDoubleReg);
   3748   cmpq(result_reg, Immediate(1));
   3749   j(no_overflow, &done, Label::kNear);
   3750 
   3751   // Slow case.
   3752   if (input_reg.is(result_reg)) {
   3753     subp(rsp, Immediate(kDoubleSize));
   3754     Movsd(MemOperand(rsp, 0), kScratchDoubleReg);
   3755     SlowTruncateToI(result_reg, rsp, 0);
   3756     addp(rsp, Immediate(kDoubleSize));
   3757   } else {
   3758     SlowTruncateToI(result_reg, input_reg);
   3759   }
   3760 
   3761   bind(&done);
   3762   // Keep our invariant that the upper 32 bits are zero.
   3763   movl(result_reg, result_reg);
   3764 }
   3765 
   3766 
   3767 void MacroAssembler::TruncateDoubleToI(Register result_reg,
   3768                                        XMMRegister input_reg) {
   3769   Label done;
   3770   Cvttsd2siq(result_reg, input_reg);
   3771   cmpq(result_reg, Immediate(1));
   3772   j(no_overflow, &done, Label::kNear);
   3773 
   3774   subp(rsp, Immediate(kDoubleSize));
   3775   Movsd(MemOperand(rsp, 0), input_reg);
   3776   SlowTruncateToI(result_reg, rsp, 0);
   3777   addp(rsp, Immediate(kDoubleSize));
   3778 
   3779   bind(&done);
   3780   // Keep our invariant that the upper 32 bits are zero.
   3781   movl(result_reg, result_reg);
   3782 }
   3783 
   3784 
   3785 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
   3786                                XMMRegister scratch,
   3787                                MinusZeroMode minus_zero_mode,
   3788                                Label* lost_precision, Label* is_nan,
   3789                                Label* minus_zero, Label::Distance dst) {
   3790   Cvttsd2si(result_reg, input_reg);
   3791   Cvtlsi2sd(kScratchDoubleReg, result_reg);
   3792   Ucomisd(kScratchDoubleReg, input_reg);
   3793   j(not_equal, lost_precision, dst);
   3794   j(parity_even, is_nan, dst);  // NaN.
   3795   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
   3796     Label done;
   3797     // The integer converted back is equal to the original. We
   3798     // only have to test if we got -0 as an input.
   3799     testl(result_reg, result_reg);
   3800     j(not_zero, &done, Label::kNear);
   3801     Movmskpd(result_reg, input_reg);
   3802     // Bit 0 contains the sign of the double in input_reg.
   3803     // If input was positive, we are ok and return 0, otherwise
   3804     // jump to minus_zero.
   3805     andl(result_reg, Immediate(1));
   3806     j(not_zero, minus_zero, dst);
   3807     bind(&done);
   3808   }
   3809 }
   3810 
   3811 
   3812 void MacroAssembler::LoadInstanceDescriptors(Register map,
   3813                                              Register descriptors) {
   3814   movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
   3815 }
   3816 
   3817 
   3818 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
   3819   movl(dst, FieldOperand(map, Map::kBitField3Offset));
   3820   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
   3821 }
   3822 
   3823 
   3824 void MacroAssembler::EnumLength(Register dst, Register map) {
   3825   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
   3826   movl(dst, FieldOperand(map, Map::kBitField3Offset));
   3827   andl(dst, Immediate(Map::EnumLengthBits::kMask));
   3828   Integer32ToSmi(dst, dst);
   3829 }
   3830 
   3831 
   3832 void MacroAssembler::LoadAccessor(Register dst, Register holder,
   3833                                   int accessor_index,
   3834                                   AccessorComponent accessor) {
   3835   movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
   3836   LoadInstanceDescriptors(dst, dst);
   3837   movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
   3838   int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
   3839                                            : AccessorPair::kSetterOffset;
   3840   movp(dst, FieldOperand(dst, offset));
   3841 }
   3842 
   3843 
   3844 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
   3845                                      Register scratch2, Handle<WeakCell> cell,
   3846                                      Handle<Code> success,
   3847                                      SmiCheckType smi_check_type) {
   3848   Label fail;
   3849   if (smi_check_type == DO_SMI_CHECK) {
   3850     JumpIfSmi(obj, &fail);
   3851   }
   3852   movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
   3853   CmpWeakValue(scratch1, cell, scratch2);
   3854   j(equal, success, RelocInfo::CODE_TARGET);
   3855   bind(&fail);
   3856 }
   3857 
   3858 
   3859 void MacroAssembler::AssertNumber(Register object) {
   3860   if (emit_debug_code()) {
   3861     Label ok;
   3862     Condition is_smi = CheckSmi(object);
   3863     j(is_smi, &ok, Label::kNear);
   3864     Cmp(FieldOperand(object, HeapObject::kMapOffset),
   3865         isolate()->factory()->heap_number_map());
   3866     Check(equal, kOperandIsNotANumber);
   3867     bind(&ok);
   3868   }
   3869 }
   3870 
   3871 void MacroAssembler::AssertNotNumber(Register object) {
   3872   if (emit_debug_code()) {
   3873     Condition is_smi = CheckSmi(object);
   3874     Check(NegateCondition(is_smi), kOperandIsANumber);
   3875     Cmp(FieldOperand(object, HeapObject::kMapOffset),
   3876         isolate()->factory()->heap_number_map());
   3877     Check(not_equal, kOperandIsANumber);
   3878   }
   3879 }
   3880 
   3881 void MacroAssembler::AssertNotSmi(Register object) {
   3882   if (emit_debug_code()) {
   3883     Condition is_smi = CheckSmi(object);
   3884     Check(NegateCondition(is_smi), kOperandIsASmi);
   3885   }
   3886 }
   3887 
   3888 
   3889 void MacroAssembler::AssertSmi(Register object) {
   3890   if (emit_debug_code()) {
   3891     Condition is_smi = CheckSmi(object);
   3892     Check(is_smi, kOperandIsNotASmi);
   3893   }
   3894 }
   3895 
   3896 
   3897 void MacroAssembler::AssertSmi(const Operand& object) {
   3898   if (emit_debug_code()) {
   3899     Condition is_smi = CheckSmi(object);
   3900     Check(is_smi, kOperandIsNotASmi);
   3901   }
   3902 }
   3903 
   3904 
   3905 void MacroAssembler::AssertZeroExtended(Register int32_register) {
   3906   if (emit_debug_code()) {
   3907     DCHECK(!int32_register.is(kScratchRegister));
   3908     movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
   3909     cmpq(kScratchRegister, int32_register);
   3910     Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
   3911   }
   3912 }
   3913 
   3914 
   3915 void MacroAssembler::AssertString(Register object) {
   3916   if (emit_debug_code()) {
   3917     testb(object, Immediate(kSmiTagMask));
   3918     Check(not_equal, kOperandIsASmiAndNotAString);
   3919     Push(object);
   3920     movp(object, FieldOperand(object, HeapObject::kMapOffset));
   3921     CmpInstanceType(object, FIRST_NONSTRING_TYPE);
   3922     Pop(object);
   3923     Check(below, kOperandIsNotAString);
   3924   }
   3925 }
   3926 
   3927 
   3928 void MacroAssembler::AssertName(Register object) {
   3929   if (emit_debug_code()) {
   3930     testb(object, Immediate(kSmiTagMask));
   3931     Check(not_equal, kOperandIsASmiAndNotAName);
   3932     Push(object);
   3933     movp(object, FieldOperand(object, HeapObject::kMapOffset));
   3934     CmpInstanceType(object, LAST_NAME_TYPE);
   3935     Pop(object);
   3936     Check(below_equal, kOperandIsNotAName);
   3937   }
   3938 }
   3939 
   3940 
   3941 void MacroAssembler::AssertFunction(Register object) {
   3942   if (emit_debug_code()) {
   3943     testb(object, Immediate(kSmiTagMask));
   3944     Check(not_equal, kOperandIsASmiAndNotAFunction);
   3945     Push(object);
   3946     CmpObjectType(object, JS_FUNCTION_TYPE, object);
   3947     Pop(object);
   3948     Check(equal, kOperandIsNotAFunction);
   3949   }
   3950 }
   3951 
   3952 
   3953 void MacroAssembler::AssertBoundFunction(Register object) {
   3954   if (emit_debug_code()) {
   3955     testb(object, Immediate(kSmiTagMask));
   3956     Check(not_equal, kOperandIsASmiAndNotABoundFunction);
   3957     Push(object);
   3958     CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
   3959     Pop(object);
   3960     Check(equal, kOperandIsNotABoundFunction);
   3961   }
   3962 }
   3963 
   3964 void MacroAssembler::AssertGeneratorObject(Register object) {
   3965   if (emit_debug_code()) {
   3966     testb(object, Immediate(kSmiTagMask));
   3967     Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
   3968     Push(object);
   3969     CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
   3970     Pop(object);
   3971     Check(equal, kOperandIsNotAGeneratorObject);
   3972   }
   3973 }
   3974 
   3975 void MacroAssembler::AssertReceiver(Register object) {
   3976   if (emit_debug_code()) {
   3977     testb(object, Immediate(kSmiTagMask));
   3978     Check(not_equal, kOperandIsASmiAndNotAReceiver);
   3979     Push(object);
   3980     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   3981     CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
   3982     Pop(object);
   3983     Check(above_equal, kOperandIsNotAReceiver);
   3984   }
   3985 }
   3986 
   3987 
   3988 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
   3989   if (emit_debug_code()) {
   3990     Label done_checking;
   3991     AssertNotSmi(object);
   3992     Cmp(object, isolate()->factory()->undefined_value());
   3993     j(equal, &done_checking);
   3994     Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
   3995     Assert(equal, kExpectedUndefinedOrCell);
   3996     bind(&done_checking);
   3997   }
   3998 }
   3999 
   4000 
   4001 void MacroAssembler::AssertRootValue(Register src,
   4002                                      Heap::RootListIndex root_value_index,
   4003                                      BailoutReason reason) {
   4004   if (emit_debug_code()) {
   4005     DCHECK(!src.is(kScratchRegister));
   4006     LoadRoot(kScratchRegister, root_value_index);
   4007     cmpp(src, kScratchRegister);
   4008     Check(equal, reason);
   4009   }
   4010 }
   4011 
   4012 
   4013 
   4014 Condition MacroAssembler::IsObjectStringType(Register heap_object,
   4015                                              Register map,
   4016                                              Register instance_type) {
   4017   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   4018   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
   4019   STATIC_ASSERT(kNotStringTag != 0);
   4020   testb(instance_type, Immediate(kIsNotStringMask));
   4021   return zero;
   4022 }
   4023 
   4024 
   4025 Condition MacroAssembler::IsObjectNameType(Register heap_object,
   4026                                            Register map,
   4027                                            Register instance_type) {
   4028   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   4029   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
   4030   cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
   4031   return below_equal;
   4032 }
   4033 
   4034 
   4035 void MacroAssembler::GetMapConstructor(Register result, Register map,
   4036                                        Register temp) {
   4037   Label done, loop;
   4038   movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
   4039   bind(&loop);
   4040   JumpIfSmi(result, &done, Label::kNear);
   4041   CmpObjectType(result, MAP_TYPE, temp);
   4042   j(not_equal, &done, Label::kNear);
   4043   movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
   4044   jmp(&loop);
   4045   bind(&done);
   4046 }
   4047 
   4048 
   4049 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
   4050                                              Label* miss) {
   4051   // Get the prototype or initial map from the function.
   4052   movp(result,
   4053        FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   4054 
   4055   // If the prototype or initial map is the hole, don't return it and
   4056   // simply miss the cache instead. This will allow us to allocate a
   4057   // prototype object on-demand in the runtime system.
   4058   CompareRoot(result, Heap::kTheHoleValueRootIndex);
   4059   j(equal, miss);
   4060 
   4061   // If the function does not have an initial map, we're done.
   4062   Label done;
   4063   CmpObjectType(result, MAP_TYPE, kScratchRegister);
   4064   j(not_equal, &done, Label::kNear);
   4065 
   4066   // Get the prototype from the initial map.
   4067   movp(result, FieldOperand(result, Map::kPrototypeOffset));
   4068 
   4069   // All done.
   4070   bind(&done);
   4071 }
   4072 
   4073 
   4074 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   4075   if (FLAG_native_code_counters && counter->Enabled()) {
   4076     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   4077     movl(counter_operand, Immediate(value));
   4078   }
   4079 }
   4080 
   4081 
   4082 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
   4083   DCHECK(value > 0);
   4084   if (FLAG_native_code_counters && counter->Enabled()) {
   4085     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   4086     if (value == 1) {
   4087       incl(counter_operand);
   4088     } else {
   4089       addl(counter_operand, Immediate(value));
   4090     }
   4091   }
   4092 }
   4093 
   4094 
   4095 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
   4096   DCHECK(value > 0);
   4097   if (FLAG_native_code_counters && counter->Enabled()) {
   4098     Operand counter_operand = ExternalOperand(ExternalReference(counter));
   4099     if (value == 1) {
   4100       decl(counter_operand);
   4101     } else {
   4102       subl(counter_operand, Immediate(value));
   4103     }
   4104   }
   4105 }
   4106 
   4107 
   4108 void MacroAssembler::DebugBreak() {
   4109   Set(rax, 0);  // No arguments.
   4110   LoadAddress(rbx,
   4111               ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
   4112   CEntryStub ces(isolate(), 1);
   4113   DCHECK(AllowThisStubCall(&ces));
   4114   Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
   4115 }
   4116 
   4117 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
   4118                                         Register caller_args_count_reg,
   4119                                         Register scratch0, Register scratch1,
   4120                                         ReturnAddressState ra_state) {
   4121 #if DEBUG
   4122   if (callee_args_count.is_reg()) {
   4123     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
   4124                        scratch1));
   4125   } else {
   4126     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
   4127   }
   4128 #endif
   4129 
   4130   // Calculate the destination address where we will put the return address
   4131   // after we drop current frame.
   4132   Register new_sp_reg = scratch0;
   4133   if (callee_args_count.is_reg()) {
   4134     subp(caller_args_count_reg, callee_args_count.reg());
   4135     leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
   4136                              StandardFrameConstants::kCallerPCOffset));
   4137   } else {
   4138     leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
   4139                              StandardFrameConstants::kCallerPCOffset -
   4140                                  callee_args_count.immediate() * kPointerSize));
   4141   }
   4142 
   4143   if (FLAG_debug_code) {
   4144     cmpp(rsp, new_sp_reg);
   4145     Check(below, kStackAccessBelowStackPointer);
   4146   }
   4147 
   4148   // Copy return address from caller's frame to current frame's return address
   4149   // to avoid its trashing and let the following loop copy it to the right
   4150   // place.
   4151   Register tmp_reg = scratch1;
   4152   if (ra_state == ReturnAddressState::kOnStack) {
   4153     movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
   4154     movp(Operand(rsp, 0), tmp_reg);
   4155   } else {
   4156     DCHECK(ReturnAddressState::kNotOnStack == ra_state);
   4157     Push(Operand(rbp, StandardFrameConstants::kCallerPCOffset));
   4158   }
   4159 
   4160   // Restore caller's frame pointer now as it could be overwritten by
   4161   // the copying loop.
   4162   movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   4163 
   4164   // +2 here is to copy both receiver and return address.
   4165   Register count_reg = caller_args_count_reg;
   4166   if (callee_args_count.is_reg()) {
   4167     leap(count_reg, Operand(callee_args_count.reg(), 2));
   4168   } else {
   4169     movp(count_reg, Immediate(callee_args_count.immediate() + 2));
   4170     // TODO(ishell): Unroll copying loop for small immediate values.
   4171   }
   4172 
   4173   // Now copy callee arguments to the caller frame going backwards to avoid
   4174   // callee arguments corruption (source and destination areas could overlap).
   4175   Label loop, entry;
   4176   jmp(&entry, Label::kNear);
   4177   bind(&loop);
   4178   decp(count_reg);
   4179   movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
   4180   movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
   4181   bind(&entry);
   4182   cmpp(count_reg, Immediate(0));
   4183   j(not_equal, &loop, Label::kNear);
   4184 
   4185   // Leave current frame.
   4186   movp(rsp, new_sp_reg);
   4187 }
   4188 
   4189 void MacroAssembler::InvokeFunction(Register function,
   4190                                     Register new_target,
   4191                                     const ParameterCount& actual,
   4192                                     InvokeFlag flag,
   4193                                     const CallWrapper& call_wrapper) {
   4194   movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   4195   LoadSharedFunctionInfoSpecialField(
   4196       rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
   4197 
   4198   ParameterCount expected(rbx);
   4199   InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
   4200 }
   4201 
   4202 
   4203 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
   4204                                     const ParameterCount& expected,
   4205                                     const ParameterCount& actual,
   4206                                     InvokeFlag flag,
   4207                                     const CallWrapper& call_wrapper) {
   4208   Move(rdi, function);
   4209   InvokeFunction(rdi, no_reg, expected, actual, flag, call_wrapper);
   4210 }
   4211 
   4212 
   4213 void MacroAssembler::InvokeFunction(Register function,
   4214                                     Register new_target,
   4215                                     const ParameterCount& expected,
   4216                                     const ParameterCount& actual,
   4217                                     InvokeFlag flag,
   4218                                     const CallWrapper& call_wrapper) {
   4219   DCHECK(function.is(rdi));
   4220   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
   4221   InvokeFunctionCode(rdi, new_target, expected, actual, flag, call_wrapper);
   4222 }
   4223 
   4224 
   4225 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
   4226                                         const ParameterCount& expected,
   4227                                         const ParameterCount& actual,
   4228                                         InvokeFlag flag,
   4229                                         const CallWrapper& call_wrapper) {
   4230   // You can't call a function without a valid frame.
   4231   DCHECK(flag == JUMP_FUNCTION || has_frame());
   4232   DCHECK(function.is(rdi));
   4233   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
   4234 
   4235   if (call_wrapper.NeedsDebugStepCheck()) {
   4236     FloodFunctionIfStepping(function, new_target, expected, actual);
   4237   }
   4238 
   4239   // Clear the new.target register if not given.
   4240   if (!new_target.is_valid()) {
   4241     LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
   4242   }
   4243 
   4244   Label done;
   4245   bool definitely_mismatches = false;
   4246   InvokePrologue(expected,
   4247                  actual,
   4248                  &done,
   4249                  &definitely_mismatches,
   4250                  flag,
   4251                  Label::kNear,
   4252                  call_wrapper);
   4253   if (!definitely_mismatches) {
   4254     // We call indirectly through the code field in the function to
   4255     // allow recompilation to take effect without changing any of the
   4256     // call sites.
   4257     Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
   4258     if (flag == CALL_FUNCTION) {
   4259       call_wrapper.BeforeCall(CallSize(code));
   4260       call(code);
   4261       call_wrapper.AfterCall();
   4262     } else {
   4263       DCHECK(flag == JUMP_FUNCTION);
   4264       jmp(code);
   4265     }
   4266     bind(&done);
   4267   }
   4268 }
   4269 
   4270 
   4271 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
   4272                                     const ParameterCount& actual,
   4273                                     Label* done,
   4274                                     bool* definitely_mismatches,
   4275                                     InvokeFlag flag,
   4276                                     Label::Distance near_jump,
   4277                                     const CallWrapper& call_wrapper) {
   4278   bool definitely_matches = false;
   4279   *definitely_mismatches = false;
   4280   Label invoke;
   4281   if (expected.is_immediate()) {
   4282     DCHECK(actual.is_immediate());
   4283     Set(rax, actual.immediate());
   4284     if (expected.immediate() == actual.immediate()) {
   4285       definitely_matches = true;
   4286     } else {
   4287       if (expected.immediate() ==
   4288               SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
   4289         // Don't worry about adapting arguments for built-ins that
   4290         // don't want that done. Skip adaption code by making it look
   4291         // like we have a match between expected and actual number of
   4292         // arguments.
   4293         definitely_matches = true;
   4294       } else {
   4295         *definitely_mismatches = true;
   4296         Set(rbx, expected.immediate());
   4297       }
   4298     }
   4299   } else {
   4300     if (actual.is_immediate()) {
   4301       // Expected is in register, actual is immediate. This is the
   4302       // case when we invoke function values without going through the
   4303       // IC mechanism.
   4304       Set(rax, actual.immediate());
   4305       cmpp(expected.reg(), Immediate(actual.immediate()));
   4306       j(equal, &invoke, Label::kNear);
   4307       DCHECK(expected.reg().is(rbx));
   4308     } else if (!expected.reg().is(actual.reg())) {
   4309       // Both expected and actual are in (different) registers. This
   4310       // is the case when we invoke functions using call and apply.
   4311       cmpp(expected.reg(), actual.reg());
   4312       j(equal, &invoke, Label::kNear);
   4313       DCHECK(actual.reg().is(rax));
   4314       DCHECK(expected.reg().is(rbx));
   4315     } else {
   4316       Move(rax, actual.reg());
   4317     }
   4318   }
   4319 
   4320   if (!definitely_matches) {
   4321     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
   4322     if (flag == CALL_FUNCTION) {
   4323       call_wrapper.BeforeCall(CallSize(adaptor));
   4324       Call(adaptor, RelocInfo::CODE_TARGET);
   4325       call_wrapper.AfterCall();
   4326       if (!*definitely_mismatches) {
   4327         jmp(done, near_jump);
   4328       }
   4329     } else {
   4330       Jump(adaptor, RelocInfo::CODE_TARGET);
   4331     }
   4332     bind(&invoke);
   4333   }
   4334 }
   4335 
   4336 
   4337 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
   4338                                              const ParameterCount& expected,
   4339                                              const ParameterCount& actual) {
   4340   Label skip_flooding;
   4341   ExternalReference last_step_action =
   4342       ExternalReference::debug_last_step_action_address(isolate());
   4343   Operand last_step_action_operand = ExternalOperand(last_step_action);
   4344   STATIC_ASSERT(StepFrame > StepIn);
   4345   cmpb(last_step_action_operand, Immediate(StepIn));
   4346   j(less, &skip_flooding);
   4347   {
   4348     FrameScope frame(this,
   4349                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
   4350     if (expected.is_reg()) {
   4351       Integer32ToSmi(expected.reg(), expected.reg());
   4352       Push(expected.reg());
   4353     }
   4354     if (actual.is_reg()) {
   4355       Integer32ToSmi(actual.reg(), actual.reg());
   4356       Push(actual.reg());
   4357     }
   4358     if (new_target.is_valid()) {
   4359       Push(new_target);
   4360     }
   4361     Push(fun);
   4362     Push(fun);
   4363     CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
   4364     Pop(fun);
   4365     if (new_target.is_valid()) {
   4366       Pop(new_target);
   4367     }
   4368     if (actual.is_reg()) {
   4369       Pop(actual.reg());
   4370       SmiToInteger64(actual.reg(), actual.reg());
   4371     }
   4372     if (expected.is_reg()) {
   4373       Pop(expected.reg());
   4374       SmiToInteger64(expected.reg(), expected.reg());
   4375     }
   4376   }
   4377   bind(&skip_flooding);
   4378 }
   4379 
   4380 void MacroAssembler::StubPrologue(StackFrame::Type type) {
   4381   pushq(rbp);  // Caller's frame pointer.
   4382   movp(rbp, rsp);
   4383   Push(Smi::FromInt(type));
   4384 }
   4385 
   4386 void MacroAssembler::Prologue(bool code_pre_aging) {
   4387   PredictableCodeSizeScope predictible_code_size_scope(this,
   4388       kNoCodeAgeSequenceLength);
   4389   if (code_pre_aging) {
   4390       // Pre-age the code.
   4391     Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
   4392          RelocInfo::CODE_AGE_SEQUENCE);
   4393     Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
   4394   } else {
   4395     pushq(rbp);  // Caller's frame pointer.
   4396     movp(rbp, rsp);
   4397     Push(rsi);  // Callee's context.
   4398     Push(rdi);  // Callee's JS function.
   4399   }
   4400 }
   4401 
   4402 
   4403 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   4404   movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   4405   movp(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
   4406   movp(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
   4407 }
   4408 
   4409 
   4410 void MacroAssembler::EnterFrame(StackFrame::Type type,
   4411                                 bool load_constant_pool_pointer_reg) {
   4412   // Out-of-line constant pool not implemented on x64.
   4413   UNREACHABLE();
   4414 }
   4415 
   4416 
   4417 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   4418   pushq(rbp);
   4419   movp(rbp, rsp);
   4420   Push(Smi::FromInt(type));
   4421   if (type == StackFrame::INTERNAL) {
   4422     Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   4423     Push(kScratchRegister);
   4424   }
   4425   if (emit_debug_code()) {
   4426     Move(kScratchRegister,
   4427          isolate()->factory()->undefined_value(),
   4428          RelocInfo::EMBEDDED_OBJECT);
   4429     cmpp(Operand(rsp, 0), kScratchRegister);
   4430     Check(not_equal, kCodeObjectNotProperlyPatched);
   4431   }
   4432 }
   4433 
   4434 
   4435 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   4436   if (emit_debug_code()) {
   4437     Move(kScratchRegister, Smi::FromInt(type));
   4438     cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
   4439          kScratchRegister);
   4440     Check(equal, kStackFrameTypesMustMatch);
   4441   }
   4442   movp(rsp, rbp);
   4443   popq(rbp);
   4444 }
   4445 
   4446 
   4447 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
   4448   // Set up the frame structure on the stack.
   4449   // All constants are relative to the frame pointer of the exit frame.
   4450   DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
   4451             ExitFrameConstants::kCallerSPDisplacement);
   4452   DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
   4453   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
   4454   pushq(rbp);
   4455   movp(rbp, rsp);
   4456 
   4457   // Reserve room for entry stack pointer and push the code object.
   4458   Push(Smi::FromInt(StackFrame::EXIT));
   4459   DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
   4460   Push(Immediate(0));  // Saved entry sp, patched before call.
   4461   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   4462   Push(kScratchRegister);  // Accessed from EditFrame::code_slot.
   4463 
   4464   // Save the frame pointer and the context in top.
   4465   if (save_rax) {
   4466     movp(r14, rax);  // Backup rax in callee-save register.
   4467   }
   4468 
   4469   Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
   4470   Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
   4471   Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
   4472 }
   4473 
   4474 
   4475 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
   4476                                             bool save_doubles) {
   4477 #ifdef _WIN64
   4478   const int kShadowSpace = 4;
   4479   arg_stack_space += kShadowSpace;
   4480 #endif
   4481   // Optionally save all XMM registers.
   4482   if (save_doubles) {
   4483     int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
   4484                 arg_stack_space * kRegisterSize;
   4485     subp(rsp, Immediate(space));
   4486     int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
   4487     const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   4488     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
   4489       DoubleRegister reg =
   4490           DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
   4491       Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
   4492     }
   4493   } else if (arg_stack_space > 0) {
   4494     subp(rsp, Immediate(arg_stack_space * kRegisterSize));
   4495   }
   4496 
   4497   // Get the required frame alignment for the OS.
   4498   const int kFrameAlignment = base::OS::ActivationFrameAlignment();
   4499   if (kFrameAlignment > 0) {
   4500     DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
   4501     DCHECK(is_int8(kFrameAlignment));
   4502     andp(rsp, Immediate(-kFrameAlignment));
   4503   }
   4504 
   4505   // Patch the saved entry sp.
   4506   movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
   4507 }
   4508 
   4509 
   4510 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
   4511   EnterExitFramePrologue(true);
   4512 
   4513   // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
   4514   // so it must be retained across the C-call.
   4515   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
   4516   leap(r15, Operand(rbp, r14, times_pointer_size, offset));
   4517 
   4518   EnterExitFrameEpilogue(arg_stack_space, save_doubles);
   4519 }
   4520 
   4521 
   4522 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
   4523   EnterExitFramePrologue(false);
   4524   EnterExitFrameEpilogue(arg_stack_space, false);
   4525 }
   4526 
   4527 
   4528 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
   4529   // Registers:
   4530   // r15 : argv
   4531   if (save_doubles) {
   4532     int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
   4533     const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   4534     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
   4535       DoubleRegister reg =
   4536           DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
   4537       Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
   4538     }
   4539   }
   4540 
   4541   if (pop_arguments) {
   4542     // Get the return address from the stack and restore the frame pointer.
   4543     movp(rcx, Operand(rbp, kFPOnStackSize));
   4544     movp(rbp, Operand(rbp, 0 * kPointerSize));
   4545 
   4546     // Drop everything up to and including the arguments and the receiver
   4547     // from the caller stack.
   4548     leap(rsp, Operand(r15, 1 * kPointerSize));
   4549 
   4550     PushReturnAddressFrom(rcx);
   4551   } else {
   4552     // Otherwise just leave the exit frame.
   4553     leave();
   4554   }
   4555 
   4556   LeaveExitFrameEpilogue(true);
   4557 }
   4558 
   4559 
   4560 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
   4561   movp(rsp, rbp);
   4562   popq(rbp);
   4563 
   4564   LeaveExitFrameEpilogue(restore_context);
   4565 }
   4566 
   4567 
   4568 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
   4569   // Restore current context from top and clear it in debug mode.
   4570   ExternalReference context_address(Isolate::kContextAddress, isolate());
   4571   Operand context_operand = ExternalOperand(context_address);
   4572   if (restore_context) {
   4573     movp(rsi, context_operand);
   4574   }
   4575 #ifdef DEBUG
   4576   movp(context_operand, Immediate(0));
   4577 #endif
   4578 
   4579   // Clear the top frame.
   4580   ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
   4581                                        isolate());
   4582   Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
   4583   movp(c_entry_fp_operand, Immediate(0));
   4584 }
   4585 
   4586 
   4587 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
   4588                                             Register scratch,
   4589                                             Label* miss) {
   4590   Label same_contexts;
   4591 
   4592   DCHECK(!holder_reg.is(scratch));
   4593   DCHECK(!scratch.is(kScratchRegister));
   4594   // Load current lexical context from the active StandardFrame, which
   4595   // may require crawling past STUB frames.
   4596   Label load_context;
   4597   Label has_context;
   4598   movp(scratch, rbp);
   4599   bind(&load_context);
   4600   DCHECK(SmiValuesAre32Bits());
   4601   // This is "JumpIfNotSmi" but without loading the value into a register.
   4602   cmpl(MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset),
   4603        Immediate(0));
   4604   j(not_equal, &has_context);
   4605   movp(scratch, MemOperand(scratch, CommonFrameConstants::kCallerFPOffset));
   4606   jmp(&load_context);
   4607   bind(&has_context);
   4608   movp(scratch,
   4609        MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
   4610 
   4611   // When generating debug code, make sure the lexical context is set.
   4612   if (emit_debug_code()) {
   4613     cmpp(scratch, Immediate(0));
   4614     Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
   4615   }
   4616   // Load the native context of the current context.
   4617   movp(scratch, ContextOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
   4618 
   4619   // Check the context is a native context.
   4620   if (emit_debug_code()) {
   4621     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
   4622         isolate()->factory()->native_context_map());
   4623     Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
   4624   }
   4625 
   4626   // Check if both contexts are the same.
   4627   cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   4628   j(equal, &same_contexts);
   4629 
   4630   // Compare security tokens.
   4631   // Check that the security token in the calling global object is
   4632   // compatible with the security token in the receiving global
   4633   // object.
   4634 
   4635   // Check the context is a native context.
   4636   if (emit_debug_code()) {
   4637     // Preserve original value of holder_reg.
   4638     Push(holder_reg);
   4639     movp(holder_reg,
   4640          FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   4641     CompareRoot(holder_reg, Heap::kNullValueRootIndex);
   4642     Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
   4643 
   4644     // Read the first word and compare to native_context_map(),
   4645     movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
   4646     CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
   4647     Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
   4648     Pop(holder_reg);
   4649   }
   4650 
   4651   movp(kScratchRegister,
   4652        FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   4653   int token_offset =
   4654       Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
   4655   movp(scratch, FieldOperand(scratch, token_offset));
   4656   cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
   4657   j(not_equal, miss);
   4658 
   4659   bind(&same_contexts);
   4660 }
   4661 
   4662 
   4663 // Compute the hash code from the untagged key.  This must be kept in sync with
   4664 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
   4665 // code-stub-hydrogen.cc
   4666 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
   4667   // First of all we assign the hash seed to scratch.
   4668   LoadRoot(scratch, Heap::kHashSeedRootIndex);
   4669   SmiToInteger32(scratch, scratch);
   4670 
   4671   // Xor original key with a seed.
   4672   xorl(r0, scratch);
   4673 
   4674   // Compute the hash code from the untagged key.  This must be kept in sync
   4675   // with ComputeIntegerHash in utils.h.
   4676   //
   4677   // hash = ~hash + (hash << 15);
   4678   movl(scratch, r0);
   4679   notl(r0);
   4680   shll(scratch, Immediate(15));
   4681   addl(r0, scratch);
   4682   // hash = hash ^ (hash >> 12);
   4683   movl(scratch, r0);
   4684   shrl(scratch, Immediate(12));
   4685   xorl(r0, scratch);
   4686   // hash = hash + (hash << 2);
   4687   leal(r0, Operand(r0, r0, times_4, 0));
   4688   // hash = hash ^ (hash >> 4);
   4689   movl(scratch, r0);
   4690   shrl(scratch, Immediate(4));
   4691   xorl(r0, scratch);
   4692   // hash = hash * 2057;
   4693   imull(r0, r0, Immediate(2057));
   4694   // hash = hash ^ (hash >> 16);
   4695   movl(scratch, r0);
   4696   shrl(scratch, Immediate(16));
   4697   xorl(r0, scratch);
   4698   andl(r0, Immediate(0x3fffffff));
   4699 }
   4700 
   4701 
   4702 
   4703 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   4704                                               Register elements,
   4705                                               Register key,
   4706                                               Register r0,
   4707                                               Register r1,
   4708                                               Register r2,
   4709                                               Register result) {
   4710   // Register use:
   4711   //
   4712   // elements - holds the slow-case elements of the receiver on entry.
   4713   //            Unchanged unless 'result' is the same register.
   4714   //
   4715   // key      - holds the smi key on entry.
   4716   //            Unchanged unless 'result' is the same register.
   4717   //
   4718   // Scratch registers:
   4719   //
   4720   // r0 - holds the untagged key on entry and holds the hash once computed.
   4721   //
   4722   // r1 - used to hold the capacity mask of the dictionary
   4723   //
   4724   // r2 - used for the index into the dictionary.
   4725   //
   4726   // result - holds the result on exit if the load succeeded.
   4727   //          Allowed to be the same as 'key' or 'result'.
   4728   //          Unchanged on bailout so 'key' or 'result' can be used
   4729   //          in further computation.
   4730 
   4731   Label done;
   4732 
   4733   GetNumberHash(r0, r1);
   4734 
   4735   // Compute capacity mask.
   4736   SmiToInteger32(r1, FieldOperand(elements,
   4737                                   SeededNumberDictionary::kCapacityOffset));
   4738   decl(r1);
   4739 
   4740   // Generate an unrolled loop that performs a few probes before giving up.
   4741   for (int i = 0; i < kNumberDictionaryProbes; i++) {
   4742     // Use r2 for index calculations and keep the hash intact in r0.
   4743     movp(r2, r0);
   4744     // Compute the masked index: (hash + i + i * i) & mask.
   4745     if (i > 0) {
   4746       addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
   4747     }
   4748     andp(r2, r1);
   4749 
   4750     // Scale the index by multiplying by the entry size.
   4751     DCHECK(SeededNumberDictionary::kEntrySize == 3);
   4752     leap(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
   4753 
   4754     // Check if the key matches.
   4755     cmpp(key, FieldOperand(elements,
   4756                            r2,
   4757                            times_pointer_size,
   4758                            SeededNumberDictionary::kElementsStartOffset));
   4759     if (i != (kNumberDictionaryProbes - 1)) {
   4760       j(equal, &done);
   4761     } else {
   4762       j(not_equal, miss);
   4763     }
   4764   }
   4765 
   4766   bind(&done);
   4767   // Check that the value is a field property.
   4768   const int kDetailsOffset =
   4769       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   4770   DCHECK_EQ(DATA, 0);
   4771   Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
   4772        Smi::FromInt(PropertyDetails::TypeField::kMask));
   4773   j(not_zero, miss);
   4774 
   4775   // Get the value at the masked, scaled index.
   4776   const int kValueOffset =
   4777       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
   4778   movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
   4779 }
   4780 
   4781 
   4782 void MacroAssembler::LoadAllocationTopHelper(Register result,
   4783                                              Register scratch,
   4784                                              AllocationFlags flags) {
   4785   ExternalReference allocation_top =
   4786       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   4787 
   4788   // Just return if allocation top is already known.
   4789   if ((flags & RESULT_CONTAINS_TOP) != 0) {
   4790     // No use of scratch if allocation top is provided.
   4791     DCHECK(!scratch.is_valid());
   4792 #ifdef DEBUG
   4793     // Assert that result actually contains top on entry.
   4794     Operand top_operand = ExternalOperand(allocation_top);
   4795     cmpp(result, top_operand);
   4796     Check(equal, kUnexpectedAllocationTop);
   4797 #endif
   4798     return;
   4799   }
   4800 
   4801   // Move address of new object to result. Use scratch register if available,
   4802   // and keep address in scratch until call to UpdateAllocationTopHelper.
   4803   if (scratch.is_valid()) {
   4804     LoadAddress(scratch, allocation_top);
   4805     movp(result, Operand(scratch, 0));
   4806   } else {
   4807     Load(result, allocation_top);
   4808   }
   4809 }
   4810 
   4811 
   4812 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
   4813                                                  Register scratch,
   4814                                                  Label* gc_required,
   4815                                                  AllocationFlags flags) {
   4816   if (kPointerSize == kDoubleSize) {
   4817     if (FLAG_debug_code) {
   4818       testl(result, Immediate(kDoubleAlignmentMask));
   4819       Check(zero, kAllocationIsNotDoubleAligned);
   4820     }
   4821   } else {
   4822     // Align the next allocation. Storing the filler map without checking top
   4823     // is safe in new-space because the limit of the heap is aligned there.
   4824     DCHECK(kPointerSize * 2 == kDoubleSize);
   4825     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
   4826     // Make sure scratch is not clobbered by this function as it might be
   4827     // used in UpdateAllocationTopHelper later.
   4828     DCHECK(!scratch.is(kScratchRegister));
   4829     Label aligned;
   4830     testl(result, Immediate(kDoubleAlignmentMask));
   4831     j(zero, &aligned, Label::kNear);
   4832     if (((flags & ALLOCATION_FOLDED) == 0) && ((flags & PRETENURE) != 0)) {
   4833       ExternalReference allocation_limit =
   4834           AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   4835       cmpp(result, ExternalOperand(allocation_limit));
   4836       j(above_equal, gc_required);
   4837     }
   4838     LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
   4839     movp(Operand(result, 0), kScratchRegister);
   4840     addp(result, Immediate(kDoubleSize / 2));
   4841     bind(&aligned);
   4842   }
   4843 }
   4844 
   4845 
   4846 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
   4847                                                Register scratch,
   4848                                                AllocationFlags flags) {
   4849   if (emit_debug_code()) {
   4850     testp(result_end, Immediate(kObjectAlignmentMask));
   4851     Check(zero, kUnalignedAllocationInNewSpace);
   4852   }
   4853 
   4854   ExternalReference allocation_top =
   4855       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   4856 
   4857   // Update new top.
   4858   if (scratch.is_valid()) {
   4859     // Scratch already contains address of allocation top.
   4860     movp(Operand(scratch, 0), result_end);
   4861   } else {
   4862     Store(allocation_top, result_end);
   4863   }
   4864 }
   4865 
   4866 
   4867 void MacroAssembler::Allocate(int object_size,
   4868                               Register result,
   4869                               Register result_end,
   4870                               Register scratch,
   4871                               Label* gc_required,
   4872                               AllocationFlags flags) {
   4873   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   4874   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   4875   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   4876   if (!FLAG_inline_new) {
   4877     if (emit_debug_code()) {
   4878       // Trash the registers to simulate an allocation failure.
   4879       movl(result, Immediate(0x7091));
   4880       if (result_end.is_valid()) {
   4881         movl(result_end, Immediate(0x7191));
   4882       }
   4883       if (scratch.is_valid()) {
   4884         movl(scratch, Immediate(0x7291));
   4885       }
   4886     }
   4887     jmp(gc_required);
   4888     return;
   4889   }
   4890   DCHECK(!result.is(result_end));
   4891 
   4892   // Load address of new object into result.
   4893   LoadAllocationTopHelper(result, scratch, flags);
   4894 
   4895   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   4896     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
   4897   }
   4898 
   4899   // Calculate new top and bail out if new space is exhausted.
   4900   ExternalReference allocation_limit =
   4901       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   4902 
   4903   Register top_reg = result_end.is_valid() ? result_end : result;
   4904 
   4905   if (!top_reg.is(result)) {
   4906     movp(top_reg, result);
   4907   }
   4908   addp(top_reg, Immediate(object_size));
   4909   Operand limit_operand = ExternalOperand(allocation_limit);
   4910   cmpp(top_reg, limit_operand);
   4911   j(above, gc_required);
   4912 
   4913   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
   4914     // The top pointer is not updated for allocation folding dominators.
   4915     UpdateAllocationTopHelper(top_reg, scratch, flags);
   4916   }
   4917 
   4918   if (top_reg.is(result)) {
   4919     subp(result, Immediate(object_size - kHeapObjectTag));
   4920   } else {
   4921     // Tag the result.
   4922     DCHECK(kHeapObjectTag == 1);
   4923     incp(result);
   4924   }
   4925 }
   4926 
   4927 
   4928 void MacroAssembler::Allocate(int header_size,
   4929                               ScaleFactor element_size,
   4930                               Register element_count,
   4931                               Register result,
   4932                               Register result_end,
   4933                               Register scratch,
   4934                               Label* gc_required,
   4935                               AllocationFlags flags) {
   4936   DCHECK((flags & SIZE_IN_WORDS) == 0);
   4937   DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
   4938   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   4939   leap(result_end, Operand(element_count, element_size, header_size));
   4940   Allocate(result_end, result, result_end, scratch, gc_required, flags);
   4941 }
   4942 
   4943 
   4944 void MacroAssembler::Allocate(Register object_size,
   4945                               Register result,
   4946                               Register result_end,
   4947                               Register scratch,
   4948                               Label* gc_required,
   4949                               AllocationFlags flags) {
   4950   DCHECK((flags & SIZE_IN_WORDS) == 0);
   4951   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   4952   if (!FLAG_inline_new) {
   4953     if (emit_debug_code()) {
   4954       // Trash the registers to simulate an allocation failure.
   4955       movl(result, Immediate(0x7091));
   4956       movl(result_end, Immediate(0x7191));
   4957       if (scratch.is_valid()) {
   4958         movl(scratch, Immediate(0x7291));
   4959       }
   4960       // object_size is left unchanged by this function.
   4961     }
   4962     jmp(gc_required);
   4963     return;
   4964   }
   4965   DCHECK(!result.is(result_end));
   4966 
   4967   // Load address of new object into result.
   4968   LoadAllocationTopHelper(result, scratch, flags);
   4969 
   4970   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   4971     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
   4972   }
   4973 
   4974   ExternalReference allocation_limit =
   4975       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   4976   if (!object_size.is(result_end)) {
   4977     movp(result_end, object_size);
   4978   }
   4979   addp(result_end, result);
   4980   Operand limit_operand = ExternalOperand(allocation_limit);
   4981   cmpp(result_end, limit_operand);
   4982   j(above, gc_required);
   4983 
   4984   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
   4985     // The top pointer is not updated for allocation folding dominators.
   4986     UpdateAllocationTopHelper(result_end, scratch, flags);
   4987   }
   4988 
   4989   // Tag the result.
   4990   addp(result, Immediate(kHeapObjectTag));
   4991 }
   4992 
   4993 void MacroAssembler::FastAllocate(int object_size, Register result,
   4994                                   Register result_end, AllocationFlags flags) {
   4995   DCHECK(!result.is(result_end));
   4996   // Load address of new object into result.
   4997   LoadAllocationTopHelper(result, no_reg, flags);
   4998 
   4999   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   5000     MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
   5001   }
   5002 
   5003   leap(result_end, Operand(result, object_size));
   5004 
   5005   UpdateAllocationTopHelper(result_end, no_reg, flags);
   5006 
   5007   addp(result, Immediate(kHeapObjectTag));
   5008 }
   5009 
   5010 void MacroAssembler::FastAllocate(Register object_size, Register result,
   5011                                   Register result_end, AllocationFlags flags) {
   5012   DCHECK(!result.is(result_end));
   5013   // Load address of new object into result.
   5014   LoadAllocationTopHelper(result, no_reg, flags);
   5015 
   5016   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   5017     MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
   5018   }
   5019 
   5020   leap(result_end, Operand(result, object_size, times_1, 0));
   5021 
   5022   UpdateAllocationTopHelper(result_end, no_reg, flags);
   5023 
   5024   addp(result, Immediate(kHeapObjectTag));
   5025 }
   5026 
   5027 void MacroAssembler::AllocateHeapNumber(Register result,
   5028                                         Register scratch,
   5029                                         Label* gc_required,
   5030                                         MutableMode mode) {
   5031   // Allocate heap number in new space.
   5032   Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required,
   5033            NO_ALLOCATION_FLAGS);
   5034 
   5035   Heap::RootListIndex map_index = mode == MUTABLE
   5036       ? Heap::kMutableHeapNumberMapRootIndex
   5037       : Heap::kHeapNumberMapRootIndex;
   5038 
   5039   // Set the map.
   5040   LoadRoot(kScratchRegister, map_index);
   5041   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   5042 }
   5043 
   5044 
   5045 void MacroAssembler::AllocateTwoByteString(Register result,
   5046                                            Register length,
   5047                                            Register scratch1,
   5048                                            Register scratch2,
   5049                                            Register scratch3,
   5050                                            Label* gc_required) {
   5051   // Calculate the number of bytes needed for the characters in the string while
   5052   // observing object alignment.
   5053   const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
   5054                                kObjectAlignmentMask;
   5055   DCHECK(kShortSize == 2);
   5056   // scratch1 = length * 2 + kObjectAlignmentMask.
   5057   leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
   5058                 kHeaderAlignment));
   5059   andp(scratch1, Immediate(~kObjectAlignmentMask));
   5060   if (kHeaderAlignment > 0) {
   5061     subp(scratch1, Immediate(kHeaderAlignment));
   5062   }
   5063 
   5064   // Allocate two byte string in new space.
   5065   Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1, result, scratch2,
   5066            scratch3, gc_required, NO_ALLOCATION_FLAGS);
   5067 
   5068   // Set the map, length and hash field.
   5069   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
   5070   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   5071   Integer32ToSmi(scratch1, length);
   5072   movp(FieldOperand(result, String::kLengthOffset), scratch1);
   5073   movp(FieldOperand(result, String::kHashFieldOffset),
   5074        Immediate(String::kEmptyHashField));
   5075 }
   5076 
   5077 
   5078 void MacroAssembler::AllocateOneByteString(Register result, Register length,
   5079                                            Register scratch1, Register scratch2,
   5080                                            Register scratch3,
   5081                                            Label* gc_required) {
   5082   // Calculate the number of bytes needed for the characters in the string while
   5083   // observing object alignment.
   5084   const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
   5085                                kObjectAlignmentMask;
   5086   movl(scratch1, length);
   5087   DCHECK(kCharSize == 1);
   5088   addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
   5089   andp(scratch1, Immediate(~kObjectAlignmentMask));
   5090   if (kHeaderAlignment > 0) {
   5091     subp(scratch1, Immediate(kHeaderAlignment));
   5092   }
   5093 
   5094   // Allocate one-byte string in new space.
   5095   Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1, result, scratch2,
   5096            scratch3, gc_required, NO_ALLOCATION_FLAGS);
   5097 
   5098   // Set the map, length and hash field.
   5099   LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
   5100   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   5101   Integer32ToSmi(scratch1, length);
   5102   movp(FieldOperand(result, String::kLengthOffset), scratch1);
   5103   movp(FieldOperand(result, String::kHashFieldOffset),
   5104        Immediate(String::kEmptyHashField));
   5105 }
   5106 
   5107 
   5108 void MacroAssembler::AllocateTwoByteConsString(Register result,
   5109                                         Register scratch1,
   5110                                         Register scratch2,
   5111                                         Label* gc_required) {
   5112   // Allocate heap number in new space.
   5113   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
   5114            NO_ALLOCATION_FLAGS);
   5115 
   5116   // Set the map. The other fields are left uninitialized.
   5117   LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
   5118   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   5119 }
   5120 
   5121 
   5122 void MacroAssembler::AllocateOneByteConsString(Register result,
   5123                                                Register scratch1,
   5124                                                Register scratch2,
   5125                                                Label* gc_required) {
   5126   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
   5127            NO_ALLOCATION_FLAGS);
   5128 
   5129   // Set the map. The other fields are left uninitialized.
   5130   LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
   5131   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   5132 }
   5133 
   5134 
   5135 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
   5136                                           Register scratch1,
   5137                                           Register scratch2,
   5138                                           Label* gc_required) {
   5139   // Allocate heap number in new space.
   5140   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   5141            NO_ALLOCATION_FLAGS);
   5142 
   5143   // Set the map. The other fields are left uninitialized.
   5144   LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
   5145   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   5146 }
   5147 
   5148 
   5149 void MacroAssembler::AllocateOneByteSlicedString(Register result,
   5150                                                  Register scratch1,
   5151                                                  Register scratch2,
   5152                                                  Label* gc_required) {
   5153   // Allocate heap number in new space.
   5154   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   5155            NO_ALLOCATION_FLAGS);
   5156 
   5157   // Set the map. The other fields are left uninitialized.
   5158   LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
   5159   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   5160 }
   5161 
   5162 
   5163 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
   5164                                      Register value, Register scratch,
   5165                                      Label* gc_required) {
   5166   DCHECK(!result.is(constructor));
   5167   DCHECK(!result.is(scratch));
   5168   DCHECK(!result.is(value));
   5169 
   5170   // Allocate JSValue in new space.
   5171   Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
   5172            NO_ALLOCATION_FLAGS);
   5173 
   5174   // Initialize the JSValue.
   5175   LoadGlobalFunctionInitialMap(constructor, scratch);
   5176   movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
   5177   LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
   5178   movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
   5179   movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
   5180   movp(FieldOperand(result, JSValue::kValueOffset), value);
   5181   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
   5182 }
   5183 
   5184 
   5185 // Copy memory, byte-by-byte, from source to destination.  Not optimized for
   5186 // long or aligned copies.  The contents of scratch and length are destroyed.
   5187 // Destination is incremented by length, source, length and scratch are
   5188 // clobbered.
   5189 // A simpler loop is faster on small copies, but slower on large ones.
   5190 // The cld() instruction must have been emitted, to set the direction flag(),
   5191 // before calling this function.
   5192 void MacroAssembler::CopyBytes(Register destination,
   5193                                Register source,
   5194                                Register length,
   5195                                int min_length,
   5196                                Register scratch) {
   5197   DCHECK(min_length >= 0);
   5198   if (emit_debug_code()) {
   5199     cmpl(length, Immediate(min_length));
   5200     Assert(greater_equal, kInvalidMinLength);
   5201   }
   5202   Label short_loop, len8, len16, len24, done, short_string;
   5203 
   5204   const int kLongStringLimit = 4 * kPointerSize;
   5205   if (min_length <= kLongStringLimit) {
   5206     cmpl(length, Immediate(kPointerSize));
   5207     j(below, &short_string, Label::kNear);
   5208   }
   5209 
   5210   DCHECK(source.is(rsi));
   5211   DCHECK(destination.is(rdi));
   5212   DCHECK(length.is(rcx));
   5213 
   5214   if (min_length <= kLongStringLimit) {
   5215     cmpl(length, Immediate(2 * kPointerSize));
   5216     j(below_equal, &len8, Label::kNear);
   5217     cmpl(length, Immediate(3 * kPointerSize));
   5218     j(below_equal, &len16, Label::kNear);
   5219     cmpl(length, Immediate(4 * kPointerSize));
   5220     j(below_equal, &len24, Label::kNear);
   5221   }
   5222 
   5223   // Because source is 8-byte aligned in our uses of this function,
   5224   // we keep source aligned for the rep movs operation by copying the odd bytes
   5225   // at the end of the ranges.
   5226   movp(scratch, length);
   5227   shrl(length, Immediate(kPointerSizeLog2));
   5228   repmovsp();
   5229   // Move remaining bytes of length.
   5230   andl(scratch, Immediate(kPointerSize - 1));
   5231   movp(length, Operand(source, scratch, times_1, -kPointerSize));
   5232   movp(Operand(destination, scratch, times_1, -kPointerSize), length);
   5233   addp(destination, scratch);
   5234 
   5235   if (min_length <= kLongStringLimit) {
   5236     jmp(&done, Label::kNear);
   5237     bind(&len24);
   5238     movp(scratch, Operand(source, 2 * kPointerSize));
   5239     movp(Operand(destination, 2 * kPointerSize), scratch);
   5240     bind(&len16);
   5241     movp(scratch, Operand(source, kPointerSize));
   5242     movp(Operand(destination, kPointerSize), scratch);
   5243     bind(&len8);
   5244     movp(scratch, Operand(source, 0));
   5245     movp(Operand(destination, 0), scratch);
   5246     // Move remaining bytes of length.
   5247     movp(scratch, Operand(source, length, times_1, -kPointerSize));
   5248     movp(Operand(destination, length, times_1, -kPointerSize), scratch);
   5249     addp(destination, length);
   5250     jmp(&done, Label::kNear);
   5251 
   5252     bind(&short_string);
   5253     if (min_length == 0) {
   5254       testl(length, length);
   5255       j(zero, &done, Label::kNear);
   5256     }
   5257 
   5258     bind(&short_loop);
   5259     movb(scratch, Operand(source, 0));
   5260     movb(Operand(destination, 0), scratch);
   5261     incp(source);
   5262     incp(destination);
   5263     decl(length);
   5264     j(not_zero, &short_loop, Label::kNear);
   5265   }
   5266 
   5267   bind(&done);
   5268 }
   5269 
   5270 
   5271 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
   5272                                                 Register end_address,
   5273                                                 Register filler) {
   5274   Label loop, entry;
   5275   jmp(&entry, Label::kNear);
   5276   bind(&loop);
   5277   movp(Operand(current_address, 0), filler);
   5278   addp(current_address, Immediate(kPointerSize));
   5279   bind(&entry);
   5280   cmpp(current_address, end_address);
   5281   j(below, &loop, Label::kNear);
   5282 }
   5283 
   5284 
   5285 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   5286   if (context_chain_length > 0) {
   5287     // Move up the chain of contexts to the context containing the slot.
   5288     movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   5289     for (int i = 1; i < context_chain_length; i++) {
   5290       movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   5291     }
   5292   } else {
   5293     // Slot is in the current function context.  Move it into the
   5294     // destination register in case we store into it (the write barrier
   5295     // cannot be allowed to destroy the context in rsi).
   5296     movp(dst, rsi);
   5297   }
   5298 
   5299   // We should not have found a with context by walking the context
   5300   // chain (i.e., the static scope chain and runtime context chain do
   5301   // not agree).  A variable occurring in such a scope should have
   5302   // slot type LOOKUP and not CONTEXT.
   5303   if (emit_debug_code()) {
   5304     CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
   5305                 Heap::kWithContextMapRootIndex);
   5306     Check(not_equal, kVariableResolvedToWithContext);
   5307   }
   5308 }
   5309 
   5310 
   5311 void MacroAssembler::LoadTransitionedArrayMapConditional(
   5312     ElementsKind expected_kind,
   5313     ElementsKind transitioned_kind,
   5314     Register map_in_out,
   5315     Register scratch,
   5316     Label* no_map_match) {
   5317   DCHECK(IsFastElementsKind(expected_kind));
   5318   DCHECK(IsFastElementsKind(transitioned_kind));
   5319 
   5320   // Check that the function's map is the same as the expected cached map.
   5321   movp(scratch, NativeContextOperand());
   5322   cmpp(map_in_out,
   5323        ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
   5324   j(not_equal, no_map_match);
   5325 
   5326   // Use the transitioned cached map.
   5327   movp(map_in_out,
   5328        ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
   5329 }
   5330 
   5331 
   5332 #ifdef _WIN64
   5333 static const int kRegisterPassedArguments = 4;
   5334 #else
   5335 static const int kRegisterPassedArguments = 6;
   5336 #endif
   5337 
   5338 
   5339 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
   5340   movp(dst, NativeContextOperand());
   5341   movp(dst, ContextOperand(dst, index));
   5342 }
   5343 
   5344 
   5345 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
   5346                                                   Register map) {
   5347   // Load the initial map.  The global functions all have initial maps.
   5348   movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   5349   if (emit_debug_code()) {
   5350     Label ok, fail;
   5351     CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
   5352     jmp(&ok);
   5353     bind(&fail);
   5354     Abort(kGlobalFunctionsMustHaveInitialMap);
   5355     bind(&ok);
   5356   }
   5357 }
   5358 
   5359 
   5360 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
   5361   // On Windows 64 stack slots are reserved by the caller for all arguments
   5362   // including the ones passed in registers, and space is always allocated for
   5363   // the four register arguments even if the function takes fewer than four
   5364   // arguments.
   5365   // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
   5366   // and the caller does not reserve stack slots for them.
   5367   DCHECK(num_arguments >= 0);
   5368 #ifdef _WIN64
   5369   const int kMinimumStackSlots = kRegisterPassedArguments;
   5370   if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
   5371   return num_arguments;
   5372 #else
   5373   if (num_arguments < kRegisterPassedArguments) return 0;
   5374   return num_arguments - kRegisterPassedArguments;
   5375 #endif
   5376 }
   5377 
   5378 
   5379 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
   5380                                                Register index,
   5381                                                Register value,
   5382                                                uint32_t encoding_mask) {
   5383   Label is_object;
   5384   JumpIfNotSmi(string, &is_object);
   5385   Abort(kNonObject);
   5386   bind(&is_object);
   5387 
   5388   Push(value);
   5389   movp(value, FieldOperand(string, HeapObject::kMapOffset));
   5390   movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
   5391 
   5392   andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
   5393   cmpp(value, Immediate(encoding_mask));
   5394   Pop(value);
   5395   Check(equal, kUnexpectedStringType);
   5396 
   5397   // The index is assumed to be untagged coming in, tag it to compare with the
   5398   // string length without using a temp register, it is restored at the end of
   5399   // this function.
   5400   Integer32ToSmi(index, index);
   5401   SmiCompare(index, FieldOperand(string, String::kLengthOffset));
   5402   Check(less, kIndexIsTooLarge);
   5403 
   5404   SmiCompare(index, Smi::FromInt(0));
   5405   Check(greater_equal, kIndexIsNegative);
   5406 
   5407   // Restore the index
   5408   SmiToInteger32(index, index);
   5409 }
   5410 
   5411 
   5412 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
   5413   int frame_alignment = base::OS::ActivationFrameAlignment();
   5414   DCHECK(frame_alignment != 0);
   5415   DCHECK(num_arguments >= 0);
   5416 
   5417   // Make stack end at alignment and allocate space for arguments and old rsp.
   5418   movp(kScratchRegister, rsp);
   5419   DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   5420   int argument_slots_on_stack =
   5421       ArgumentStackSlotsForCFunctionCall(num_arguments);
   5422   subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
   5423   andp(rsp, Immediate(-frame_alignment));
   5424   movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
   5425 }
   5426 
   5427 
   5428 void MacroAssembler::CallCFunction(ExternalReference function,
   5429                                    int num_arguments) {
   5430   LoadAddress(rax, function);
   5431   CallCFunction(rax, num_arguments);
   5432 }
   5433 
   5434 
   5435 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
   5436   DCHECK(has_frame());
   5437   // Check stack alignment.
   5438   if (emit_debug_code()) {
   5439     CheckStackAlignment();
   5440   }
   5441 
   5442   call(function);
   5443   DCHECK(base::OS::ActivationFrameAlignment() != 0);
   5444   DCHECK(num_arguments >= 0);
   5445   int argument_slots_on_stack =
   5446       ArgumentStackSlotsForCFunctionCall(num_arguments);
   5447   movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
   5448 }
   5449 
   5450 
   5451 #ifdef DEBUG
   5452 bool AreAliased(Register reg1,
   5453                 Register reg2,
   5454                 Register reg3,
   5455                 Register reg4,
   5456                 Register reg5,
   5457                 Register reg6,
   5458                 Register reg7,
   5459                 Register reg8) {
   5460   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
   5461       reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
   5462       reg7.is_valid() + reg8.is_valid();
   5463 
   5464   RegList regs = 0;
   5465   if (reg1.is_valid()) regs |= reg1.bit();
   5466   if (reg2.is_valid()) regs |= reg2.bit();
   5467   if (reg3.is_valid()) regs |= reg3.bit();
   5468   if (reg4.is_valid()) regs |= reg4.bit();
   5469   if (reg5.is_valid()) regs |= reg5.bit();
   5470   if (reg6.is_valid()) regs |= reg6.bit();
   5471   if (reg7.is_valid()) regs |= reg7.bit();
   5472   if (reg8.is_valid()) regs |= reg8.bit();
   5473   int n_of_non_aliasing_regs = NumRegs(regs);
   5474 
   5475   return n_of_valid_regs != n_of_non_aliasing_regs;
   5476 }
   5477 #endif
   5478 
   5479 
   5480 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
   5481     : address_(address),
   5482       size_(size),
   5483       masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
   5484   // Create a new macro assembler pointing to the address of the code to patch.
   5485   // The size is adjusted with kGap on order for the assembler to generate size
   5486   // bytes of instructions without failing with buffer size constraints.
   5487   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   5488 }
   5489 
   5490 
   5491 CodePatcher::~CodePatcher() {
   5492   // Indicate that code has changed.
   5493   Assembler::FlushICache(masm_.isolate(), address_, size_);
   5494 
   5495   // Check that the code was patched as expected.
   5496   DCHECK(masm_.pc_ == address_ + size_);
   5497   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   5498 }
   5499 
   5500 
   5501 void MacroAssembler::CheckPageFlag(
   5502     Register object,
   5503     Register scratch,
   5504     int mask,
   5505     Condition cc,
   5506     Label* condition_met,
   5507     Label::Distance condition_met_distance) {
   5508   DCHECK(cc == zero || cc == not_zero);
   5509   if (scratch.is(object)) {
   5510     andp(scratch, Immediate(~Page::kPageAlignmentMask));
   5511   } else {
   5512     movp(scratch, Immediate(~Page::kPageAlignmentMask));
   5513     andp(scratch, object);
   5514   }
   5515   if (mask < (1 << kBitsPerByte)) {
   5516     testb(Operand(scratch, MemoryChunk::kFlagsOffset),
   5517           Immediate(static_cast<uint8_t>(mask)));
   5518   } else {
   5519     testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
   5520   }
   5521   j(cc, condition_met, condition_met_distance);
   5522 }
   5523 
   5524 
   5525 void MacroAssembler::JumpIfBlack(Register object,
   5526                                  Register bitmap_scratch,
   5527                                  Register mask_scratch,
   5528                                  Label* on_black,
   5529                                  Label::Distance on_black_distance) {
   5530   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
   5531 
   5532   GetMarkBits(object, bitmap_scratch, mask_scratch);
   5533 
   5534   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
   5535   // The mask_scratch register contains a 1 at the position of the first bit
   5536   // and a 1 at a position of the second bit. All other positions are zero.
   5537   movp(rcx, mask_scratch);
   5538   andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
   5539   cmpp(mask_scratch, rcx);
   5540   j(equal, on_black, on_black_distance);
   5541 }
   5542 
   5543 
   5544 void MacroAssembler::GetMarkBits(Register addr_reg,
   5545                                  Register bitmap_reg,
   5546                                  Register mask_reg) {
   5547   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
   5548   movp(bitmap_reg, addr_reg);
   5549   // Sign extended 32 bit immediate.
   5550   andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
   5551   movp(rcx, addr_reg);
   5552   int shift =
   5553       Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
   5554   shrl(rcx, Immediate(shift));
   5555   andp(rcx,
   5556        Immediate((Page::kPageAlignmentMask >> shift) &
   5557                  ~(Bitmap::kBytesPerCell - 1)));
   5558 
   5559   addp(bitmap_reg, rcx);
   5560   movp(rcx, addr_reg);
   5561   shrl(rcx, Immediate(kPointerSizeLog2));
   5562   andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
   5563   movl(mask_reg, Immediate(3));
   5564   shlp_cl(mask_reg);
   5565 }
   5566 
   5567 
   5568 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
   5569                                  Register mask_scratch, Label* value_is_white,
   5570                                  Label::Distance distance) {
   5571   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
   5572   GetMarkBits(value, bitmap_scratch, mask_scratch);
   5573 
   5574   // If the value is black or grey we don't need to do anything.
   5575   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   5576   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
   5577   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
   5578   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
   5579 
   5580   // Since both black and grey have a 1 in the first position and white does
   5581   // not have a 1 there we only need to check one bit.
   5582   testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
   5583   j(zero, value_is_white, distance);
   5584 }
   5585 
   5586 
   5587 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
   5588   Label next, start;
   5589   Register empty_fixed_array_value = r8;
   5590   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   5591   movp(rcx, rax);
   5592 
   5593   // Check if the enum length field is properly initialized, indicating that
   5594   // there is an enum cache.
   5595   movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
   5596 
   5597   EnumLength(rdx, rbx);
   5598   Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
   5599   j(equal, call_runtime);
   5600 
   5601   jmp(&start);
   5602 
   5603   bind(&next);
   5604 
   5605   movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
   5606 
   5607   // For all objects but the receiver, check that the cache is empty.
   5608   EnumLength(rdx, rbx);
   5609   Cmp(rdx, Smi::FromInt(0));
   5610   j(not_equal, call_runtime);
   5611 
   5612   bind(&start);
   5613 
   5614   // Check that there are no elements. Register rcx contains the current JS
   5615   // object we've reached through the prototype chain.
   5616   Label no_elements;
   5617   cmpp(empty_fixed_array_value,
   5618        FieldOperand(rcx, JSObject::kElementsOffset));
   5619   j(equal, &no_elements);
   5620 
   5621   // Second chance, the object may be using the empty slow element dictionary.
   5622   LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
   5623   cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
   5624   j(not_equal, call_runtime);
   5625 
   5626   bind(&no_elements);
   5627   movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
   5628   CompareRoot(rcx, Heap::kNullValueRootIndex);
   5629   j(not_equal, &next);
   5630 }
   5631 
   5632 
   5633 void MacroAssembler::TestJSArrayForAllocationMemento(
   5634     Register receiver_reg,
   5635     Register scratch_reg,
   5636     Label* no_memento_found) {
   5637   Label map_check;
   5638   Label top_check;
   5639   ExternalReference new_space_allocation_top =
   5640       ExternalReference::new_space_allocation_top_address(isolate());
   5641   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   5642   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
   5643 
   5644   // Bail out if the object is not in new space.
   5645   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   5646   // If the object is in new space, we need to check whether it is on the same
   5647   // page as the current top.
   5648   leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
   5649   xorp(scratch_reg, ExternalOperand(new_space_allocation_top));
   5650   testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
   5651   j(zero, &top_check);
   5652   // The object is on a different page than allocation top. Bail out if the
   5653   // object sits on the page boundary as no memento can follow and we cannot
   5654   // touch the memory following it.
   5655   leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
   5656   xorp(scratch_reg, receiver_reg);
   5657   testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
   5658   j(not_zero, no_memento_found);
   5659   // Continue with the actual map check.
   5660   jmp(&map_check);
   5661   // If top is on the same page as the current object, we need to check whether
   5662   // we are below top.
   5663   bind(&top_check);
   5664   leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
   5665   cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
   5666   j(greater, no_memento_found);
   5667   // Memento map check.
   5668   bind(&map_check);
   5669   CompareRoot(MemOperand(receiver_reg, kMementoMapOffset),
   5670               Heap::kAllocationMementoMapRootIndex);
   5671 }
   5672 
   5673 
   5674 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
   5675     Register object,
   5676     Register scratch0,
   5677     Register scratch1,
   5678     Label* found) {
   5679   DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
   5680   DCHECK(!scratch1.is(scratch0));
   5681   Register current = scratch0;
   5682   Label loop_again, end;
   5683 
   5684   movp(current, object);
   5685   movp(current, FieldOperand(current, HeapObject::kMapOffset));
   5686   movp(current, FieldOperand(current, Map::kPrototypeOffset));
   5687   CompareRoot(current, Heap::kNullValueRootIndex);
   5688   j(equal, &end);
   5689 
   5690   // Loop based on the map going up the prototype chain.
   5691   bind(&loop_again);
   5692   movp(current, FieldOperand(current, HeapObject::kMapOffset));
   5693   STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
   5694   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
   5695   CmpInstanceType(current, JS_OBJECT_TYPE);
   5696   j(below, found);
   5697   movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
   5698   DecodeField<Map::ElementsKindBits>(scratch1);
   5699   cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
   5700   j(equal, found);
   5701   movp(current, FieldOperand(current, Map::kPrototypeOffset));
   5702   CompareRoot(current, Heap::kNullValueRootIndex);
   5703   j(not_equal, &loop_again);
   5704 
   5705   bind(&end);
   5706 }
   5707 
   5708 
   5709 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
   5710   DCHECK(!dividend.is(rax));
   5711   DCHECK(!dividend.is(rdx));
   5712   base::MagicNumbersForDivision<uint32_t> mag =
   5713       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
   5714   movl(rax, Immediate(mag.multiplier));
   5715   imull(dividend);
   5716   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
   5717   if (divisor > 0 && neg) addl(rdx, dividend);
   5718   if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
   5719   if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
   5720   movl(rax, dividend);
   5721   shrl(rax, Immediate(31));
   5722   addl(rdx, rax);
   5723 }
   5724 
   5725 
   5726 }  // namespace internal
   5727 }  // namespace v8
   5728 
   5729 #endif  // V8_TARGET_ARCH_X64
   5730