Home | History | Annotate | Download | only in x64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/x64/codegen-x64.h"
      6 
      7 #if V8_TARGET_ARCH_X64
      8 
      9 #include "src/codegen.h"
     10 #include "src/macro-assembler.h"
     11 
     12 namespace v8 {
     13 namespace internal {
     14 
     15 // -------------------------------------------------------------------------
     16 // Platform-specific RuntimeCallHelper functions.
     17 
     18 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
     19   masm->EnterFrame(StackFrame::INTERNAL);
     20   DCHECK(!masm->has_frame());
     21   masm->set_has_frame(true);
     22 }
     23 
     24 
     25 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
     26   masm->LeaveFrame(StackFrame::INTERNAL);
     27   DCHECK(masm->has_frame());
     28   masm->set_has_frame(false);
     29 }
     30 
     31 
     32 #define __ masm.
     33 
     34 
     35 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
     36   size_t actual_size;
     37   // Allocate buffer in executable space.
     38   byte* buffer =
     39       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
     40   if (buffer == nullptr) return nullptr;
     41 
     42   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
     43                       CodeObjectRequired::kNo);
     44   // xmm0: raw double input.
     45   // Move double input into registers.
     46   __ Sqrtsd(xmm0, xmm0);
     47   __ Ret();
     48 
     49   CodeDesc desc;
     50   masm.GetCode(&desc);
     51   DCHECK(!RelocInfo::RequiresRelocation(desc));
     52 
     53   Assembler::FlushICache(isolate, buffer, actual_size);
     54   base::OS::ProtectCode(buffer, actual_size);
     55   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
     56 }
     57 
     58 #undef __
     59 
     60 // -------------------------------------------------------------------------
     61 // Code generators
     62 
     63 #define __ ACCESS_MASM(masm)
     64 
     65 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
     66     MacroAssembler* masm,
     67     Register receiver,
     68     Register key,
     69     Register value,
     70     Register target_map,
     71     AllocationSiteMode mode,
     72     Label* allocation_memento_found) {
     73   // Return address is on the stack.
     74   Register scratch = rdi;
     75   DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
     76 
     77   if (mode == TRACK_ALLOCATION_SITE) {
     78     DCHECK(allocation_memento_found != NULL);
     79     __ JumpIfJSArrayHasAllocationMemento(
     80         receiver, scratch, allocation_memento_found);
     81   }
     82 
     83   // Set transitioned map.
     84   __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
     85   __ RecordWriteField(receiver,
     86                       HeapObject::kMapOffset,
     87                       target_map,
     88                       scratch,
     89                       kDontSaveFPRegs,
     90                       EMIT_REMEMBERED_SET,
     91                       OMIT_SMI_CHECK);
     92 }
     93 
     94 
     95 void ElementsTransitionGenerator::GenerateSmiToDouble(
     96     MacroAssembler* masm,
     97     Register receiver,
     98     Register key,
     99     Register value,
    100     Register target_map,
    101     AllocationSiteMode mode,
    102     Label* fail) {
    103   // Return address is on the stack.
    104   DCHECK(receiver.is(rdx));
    105   DCHECK(key.is(rcx));
    106   DCHECK(value.is(rax));
    107   DCHECK(target_map.is(rbx));
    108 
    109   // The fail label is not actually used since we do not allocate.
    110   Label allocated, new_backing_store, only_change_map, done;
    111 
    112   if (mode == TRACK_ALLOCATION_SITE) {
    113     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
    114   }
    115 
    116   // Check for empty arrays, which only require a map transition and no changes
    117   // to the backing store.
    118   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    119   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
    120   __ j(equal, &only_change_map);
    121 
    122   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
    123   if (kPointerSize == kDoubleSize) {
    124     // Check backing store for COW-ness. For COW arrays we have to
    125     // allocate a new backing store.
    126     __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
    127                    Heap::kFixedCOWArrayMapRootIndex);
    128     __ j(equal, &new_backing_store);
    129   } else {
    130     // For x32 port we have to allocate a new backing store as SMI size is
    131     // not equal with double size.
    132     DCHECK(kDoubleSize == 2 * kPointerSize);
    133     __ jmp(&new_backing_store);
    134   }
    135 
    136   // Check if the backing store is in new-space. If not, we need to allocate
    137   // a new one since the old one is in pointer-space.
    138   // If in new space, we can reuse the old backing store because it is
    139   // the same size.
    140   __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
    141 
    142   __ movp(r14, r8);  // Destination array equals source array.
    143 
    144   // r8 : source FixedArray
    145   // r9 : elements array length
    146   // r14: destination FixedDoubleArray
    147   // Set backing store's map
    148   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
    149   __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
    150 
    151   __ bind(&allocated);
    152   // Set transitioned map.
    153   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    154   __ RecordWriteField(rdx,
    155                       HeapObject::kMapOffset,
    156                       rbx,
    157                       rdi,
    158                       kDontSaveFPRegs,
    159                       EMIT_REMEMBERED_SET,
    160                       OMIT_SMI_CHECK);
    161 
    162   // Convert smis to doubles and holes to hole NaNs.  The Array's length
    163   // remains unchanged.
    164   STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
    165   STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
    166 
    167   Label loop, entry, convert_hole;
    168   __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
    169   // r15: the-hole NaN
    170   __ jmp(&entry);
    171 
    172   // Allocate new backing store.
    173   __ bind(&new_backing_store);
    174   __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
    175   __ Allocate(rdi, r14, r11, r15, fail, NO_ALLOCATION_FLAGS);
    176   // Set backing store's map
    177   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
    178   __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
    179   // Set receiver's backing store.
    180   __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
    181   __ movp(r11, r14);
    182   __ RecordWriteField(rdx,
    183                       JSObject::kElementsOffset,
    184                       r11,
    185                       r15,
    186                       kDontSaveFPRegs,
    187                       EMIT_REMEMBERED_SET,
    188                       OMIT_SMI_CHECK);
    189   // Set backing store's length.
    190   __ Integer32ToSmi(r11, r9);
    191   __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
    192   __ jmp(&allocated);
    193 
    194   __ bind(&only_change_map);
    195   // Set transitioned map.
    196   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    197   __ RecordWriteField(rdx,
    198                       HeapObject::kMapOffset,
    199                       rbx,
    200                       rdi,
    201                       kDontSaveFPRegs,
    202                       OMIT_REMEMBERED_SET,
    203                       OMIT_SMI_CHECK);
    204   __ jmp(&done);
    205 
    206   // Conversion loop.
    207   __ bind(&loop);
    208   __ movp(rbx,
    209           FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
    210   // r9 : current element's index
    211   // rbx: current element (smi-tagged)
    212   __ JumpIfNotSmi(rbx, &convert_hole);
    213   __ SmiToInteger32(rbx, rbx);
    214   __ Cvtlsi2sd(kScratchDoubleReg, rbx);
    215   __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
    216            kScratchDoubleReg);
    217   __ jmp(&entry);
    218   __ bind(&convert_hole);
    219 
    220   if (FLAG_debug_code) {
    221     __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
    222     __ Assert(equal, kObjectFoundInSmiOnlyArray);
    223   }
    224 
    225   __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
    226   __ bind(&entry);
    227   __ decp(r9);
    228   __ j(not_sign, &loop);
    229 
    230   __ bind(&done);
    231 }
    232 
    233 
    234 void ElementsTransitionGenerator::GenerateDoubleToObject(
    235     MacroAssembler* masm,
    236     Register receiver,
    237     Register key,
    238     Register value,
    239     Register target_map,
    240     AllocationSiteMode mode,
    241     Label* fail) {
    242   // Return address is on the stack.
    243   DCHECK(receiver.is(rdx));
    244   DCHECK(key.is(rcx));
    245   DCHECK(value.is(rax));
    246   DCHECK(target_map.is(rbx));
    247 
    248   Label loop, entry, convert_hole, gc_required, only_change_map;
    249 
    250   if (mode == TRACK_ALLOCATION_SITE) {
    251     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
    252   }
    253 
    254   // Check for empty arrays, which only require a map transition and no changes
    255   // to the backing store.
    256   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    257   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
    258   __ j(equal, &only_change_map);
    259 
    260   __ Push(rsi);
    261   __ Push(rax);
    262 
    263   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    264   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
    265   // r8 : source FixedDoubleArray
    266   // r9 : number of elements
    267   __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
    268   __ Allocate(rdi, r11, r14, r15, &gc_required, NO_ALLOCATION_FLAGS);
    269   // r11: destination FixedArray
    270   __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
    271   __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
    272   __ Integer32ToSmi(r14, r9);
    273   __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
    274 
    275   // Prepare for conversion loop.
    276   __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
    277   __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
    278   // rsi: the-hole NaN
    279   // rdi: pointer to the-hole
    280 
    281   // Allocating heap numbers in the loop below can fail and cause a jump to
    282   // gc_required. We can't leave a partly initialized FixedArray behind,
    283   // so pessimistically fill it with holes now.
    284   Label initialization_loop, initialization_loop_entry;
    285   __ jmp(&initialization_loop_entry, Label::kNear);
    286   __ bind(&initialization_loop);
    287   __ movp(FieldOperand(r11, r9, times_pointer_size, FixedArray::kHeaderSize),
    288           rdi);
    289   __ bind(&initialization_loop_entry);
    290   __ decp(r9);
    291   __ j(not_sign, &initialization_loop);
    292 
    293   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
    294   __ jmp(&entry);
    295 
    296   // Call into runtime if GC is required.
    297   __ bind(&gc_required);
    298   __ Pop(rax);
    299   __ Pop(rsi);
    300   __ jmp(fail);
    301 
    302   // Box doubles into heap numbers.
    303   __ bind(&loop);
    304   __ movq(r14, FieldOperand(r8,
    305                             r9,
    306                             times_8,
    307                             FixedDoubleArray::kHeaderSize));
    308   // r9 : current element's index
    309   // r14: current element
    310   __ cmpq(r14, rsi);
    311   __ j(equal, &convert_hole);
    312 
    313   // Non-hole double, copy value into a heap number.
    314   __ AllocateHeapNumber(rax, r15, &gc_required);
    315   // rax: new heap number
    316   __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
    317   __ movp(FieldOperand(r11,
    318                        r9,
    319                        times_pointer_size,
    320                        FixedArray::kHeaderSize),
    321           rax);
    322   __ movp(r15, r9);
    323   __ RecordWriteArray(r11,
    324                       rax,
    325                       r15,
    326                       kDontSaveFPRegs,
    327                       EMIT_REMEMBERED_SET,
    328                       OMIT_SMI_CHECK);
    329   __ jmp(&entry, Label::kNear);
    330 
    331   // Replace the-hole NaN with the-hole pointer.
    332   __ bind(&convert_hole);
    333   __ movp(FieldOperand(r11,
    334                        r9,
    335                        times_pointer_size,
    336                        FixedArray::kHeaderSize),
    337           rdi);
    338 
    339   __ bind(&entry);
    340   __ decp(r9);
    341   __ j(not_sign, &loop);
    342 
    343   // Replace receiver's backing store with newly created and filled FixedArray.
    344   __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
    345   __ RecordWriteField(rdx,
    346                       JSObject::kElementsOffset,
    347                       r11,
    348                       r15,
    349                       kDontSaveFPRegs,
    350                       EMIT_REMEMBERED_SET,
    351                       OMIT_SMI_CHECK);
    352   __ Pop(rax);
    353   __ Pop(rsi);
    354 
    355   __ bind(&only_change_map);
    356   // Set transitioned map.
    357   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    358   __ RecordWriteField(rdx,
    359                       HeapObject::kMapOffset,
    360                       rbx,
    361                       rdi,
    362                       kDontSaveFPRegs,
    363                       OMIT_REMEMBERED_SET,
    364                       OMIT_SMI_CHECK);
    365 }
    366 
    367 
    368 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
    369                                        Register string,
    370                                        Register index,
    371                                        Register result,
    372                                        Label* call_runtime) {
    373   // Fetch the instance type of the receiver into result register.
    374   __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
    375   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
    376 
    377   // We need special handling for indirect strings.
    378   Label check_sequential;
    379   __ testb(result, Immediate(kIsIndirectStringMask));
    380   __ j(zero, &check_sequential, Label::kNear);
    381 
    382   // Dispatch on the indirect string shape: slice or cons.
    383   Label cons_string;
    384   __ testb(result, Immediate(kSlicedNotConsMask));
    385   __ j(zero, &cons_string, Label::kNear);
    386 
    387   // Handle slices.
    388   Label indirect_string_loaded;
    389   __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
    390   __ addp(index, result);
    391   __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
    392   __ jmp(&indirect_string_loaded, Label::kNear);
    393 
    394   // Handle cons strings.
    395   // Check whether the right hand side is the empty string (i.e. if
    396   // this is really a flat string in a cons string). If that is not
    397   // the case we would rather go to the runtime system now to flatten
    398   // the string.
    399   __ bind(&cons_string);
    400   __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
    401                  Heap::kempty_stringRootIndex);
    402   __ j(not_equal, call_runtime);
    403   __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
    404 
    405   __ bind(&indirect_string_loaded);
    406   __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
    407   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
    408 
    409   // Distinguish sequential and external strings. Only these two string
    410   // representations can reach here (slices and flat cons strings have been
    411   // reduced to the underlying sequential or external string).
    412   Label seq_string;
    413   __ bind(&check_sequential);
    414   STATIC_ASSERT(kSeqStringTag == 0);
    415   __ testb(result, Immediate(kStringRepresentationMask));
    416   __ j(zero, &seq_string, Label::kNear);
    417 
    418   // Handle external strings.
    419   Label one_byte_external, done;
    420   if (FLAG_debug_code) {
    421     // Assert that we do not have a cons or slice (indirect strings) here.
    422     // Sequential strings have already been ruled out.
    423     __ testb(result, Immediate(kIsIndirectStringMask));
    424     __ Assert(zero, kExternalStringExpectedButNotFound);
    425   }
    426   // Rule out short external strings.
    427   STATIC_ASSERT(kShortExternalStringTag != 0);
    428   __ testb(result, Immediate(kShortExternalStringTag));
    429   __ j(not_zero, call_runtime);
    430   // Check encoding.
    431   STATIC_ASSERT(kTwoByteStringTag == 0);
    432   __ testb(result, Immediate(kStringEncodingMask));
    433   __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
    434   __ j(not_equal, &one_byte_external, Label::kNear);
    435   // Two-byte string.
    436   __ movzxwl(result, Operand(result, index, times_2, 0));
    437   __ jmp(&done, Label::kNear);
    438   __ bind(&one_byte_external);
    439   // One-byte string.
    440   __ movzxbl(result, Operand(result, index, times_1, 0));
    441   __ jmp(&done, Label::kNear);
    442 
    443   // Dispatch on the encoding: one-byte or two-byte.
    444   Label one_byte;
    445   __ bind(&seq_string);
    446   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
    447   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
    448   __ testb(result, Immediate(kStringEncodingMask));
    449   __ j(not_zero, &one_byte, Label::kNear);
    450 
    451   // Two-byte string.
    452   // Load the two-byte character code into the result register.
    453   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
    454   __ movzxwl(result, FieldOperand(string,
    455                                   index,
    456                                   times_2,
    457                                   SeqTwoByteString::kHeaderSize));
    458   __ jmp(&done, Label::kNear);
    459 
    460   // One-byte string.
    461   // Load the byte into the result register.
    462   __ bind(&one_byte);
    463   __ movzxbl(result, FieldOperand(string,
    464                                   index,
    465                                   times_1,
    466                                   SeqOneByteString::kHeaderSize));
    467   __ bind(&done);
    468 }
    469 
    470 #undef __
    471 
    472 
    473 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
    474   USE(isolate);
    475   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
    476   // The sequence of instructions that is patched out for aging code is the
    477   // following boilerplate stack-building prologue that is found both in
    478   // FUNCTION and OPTIMIZED_FUNCTION code:
    479   CodePatcher patcher(isolate, young_sequence_.start(),
    480                       young_sequence_.length());
    481   patcher.masm()->pushq(rbp);
    482   patcher.masm()->movp(rbp, rsp);
    483   patcher.masm()->Push(rsi);
    484   patcher.masm()->Push(rdi);
    485 }
    486 
    487 
    488 #ifdef DEBUG
    489 bool CodeAgingHelper::IsOld(byte* candidate) const {
    490   return *candidate == kCallOpcode;
    491 }
    492 #endif
    493 
    494 
    495 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
    496   bool result = isolate->code_aging_helper()->IsYoung(sequence);
    497   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
    498   return result;
    499 }
    500 
    501 
    502 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
    503                                MarkingParity* parity) {
    504   if (IsYoungSequence(isolate, sequence)) {
    505     *age = kNoAgeCodeAge;
    506     *parity = NO_MARKING_PARITY;
    507   } else {
    508     sequence++;  // Skip the kCallOpcode byte
    509     Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
    510         Assembler::kCallTargetAddressOffset;
    511     Code* stub = GetCodeFromTargetAddress(target_address);
    512     GetCodeAgeAndParity(stub, age, parity);
    513   }
    514 }
    515 
    516 
    517 void Code::PatchPlatformCodeAge(Isolate* isolate,
    518                                 byte* sequence,
    519                                 Code::Age age,
    520                                 MarkingParity parity) {
    521   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
    522   if (age == kNoAgeCodeAge) {
    523     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
    524     Assembler::FlushICache(isolate, sequence, young_length);
    525   } else {
    526     Code* stub = GetCodeAgeStub(isolate, age, parity);
    527     CodePatcher patcher(isolate, sequence, young_length);
    528     patcher.masm()->call(stub->instruction_start());
    529     patcher.masm()->Nop(
    530         kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
    531   }
    532 }
    533 
    534 
    535 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
    536   DCHECK(index >= 0);
    537   int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
    538   int displacement_to_last_argument = base_reg_.is(rsp) ?
    539       kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
    540   displacement_to_last_argument += extra_displacement_to_last_argument_;
    541   if (argument_count_reg_.is(no_reg)) {
    542     // argument[0] is at base_reg_ + displacement_to_last_argument +
    543     // (argument_count_immediate_ + receiver - 1) * kPointerSize.
    544     DCHECK(argument_count_immediate_ + receiver > 0);
    545     return Operand(base_reg_, displacement_to_last_argument +
    546         (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
    547   } else {
    548     // argument[0] is at base_reg_ + displacement_to_last_argument +
    549     // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
    550     return Operand(base_reg_, argument_count_reg_, times_pointer_size,
    551         displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
    552   }
    553 }
    554 
    555 
    556 }  // namespace internal
    557 }  // namespace v8
    558 
    559 #endif  // V8_TARGET_ARCH_X64
    560