Home | History | Annotate | Download | only in x87
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/x87/codegen-x87.h"
      6 
      7 #if V8_TARGET_ARCH_X87
      8 
      9 #include "src/codegen.h"
     10 #include "src/heap/heap.h"
     11 #include "src/macro-assembler.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 
     17 // -------------------------------------------------------------------------
     18 // Platform-specific RuntimeCallHelper functions.
     19 
     20 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
     21   masm->EnterFrame(StackFrame::INTERNAL);
     22   DCHECK(!masm->has_frame());
     23   masm->set_has_frame(true);
     24 }
     25 
     26 
     27 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
     28   masm->LeaveFrame(StackFrame::INTERNAL);
     29   DCHECK(masm->has_frame());
     30   masm->set_has_frame(false);
     31 }
     32 
     33 
     34 #define __ masm.
     35 
     36 
     37 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
     38   size_t actual_size;
     39   // Allocate buffer in executable space.
     40   byte* buffer =
     41       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
     42   if (buffer == nullptr) return nullptr;
     43 
     44   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
     45                       CodeObjectRequired::kNo);
     46   // Load double input into registers.
     47   __ fld_d(MemOperand(esp, 4));
     48   __ X87SetFPUCW(0x027F);
     49   __ fsqrt();
     50   __ X87SetFPUCW(0x037F);
     51   __ Ret();
     52 
     53   CodeDesc desc;
     54   masm.GetCode(&desc);
     55   DCHECK(!RelocInfo::RequiresRelocation(desc));
     56 
     57   Assembler::FlushICache(isolate, buffer, actual_size);
     58   base::OS::ProtectCode(buffer, actual_size);
     59   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
     60 }
     61 
     62 
     63 // Helper functions for CreateMemMoveFunction.
     64 #undef __
     65 #define __ ACCESS_MASM(masm)
     66 
     67 enum Direction { FORWARD, BACKWARD };
     68 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
     69 
     70 
     71 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
     72   __ pop(esi);
     73   __ pop(edi);
     74   __ ret(0);
     75 }
     76 
     77 
     78 #undef __
     79 #define __ masm.
     80 
     81 
     82 class LabelConverter {
     83  public:
     84   explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
     85   int32_t address(Label* l) const {
     86     return reinterpret_cast<int32_t>(buffer_) + l->pos();
     87   }
     88  private:
     89   byte* buffer_;
     90 };
     91 
     92 
     93 MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
     94   size_t actual_size;
     95   // Allocate buffer in executable space.
     96   byte* buffer =
     97       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
     98   if (buffer == nullptr) return nullptr;
     99   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
    100                       CodeObjectRequired::kNo);
    101   LabelConverter conv(buffer);
    102 
    103   // Generated code is put into a fixed, unmovable buffer, and not into
    104   // the V8 heap. We can't, and don't, refer to any relocatable addresses
    105   // (e.g. the JavaScript nan-object).
    106 
    107   // 32-bit C declaration function calls pass arguments on stack.
    108 
    109   // Stack layout:
    110   // esp[12]: Third argument, size.
    111   // esp[8]: Second argument, source pointer.
    112   // esp[4]: First argument, destination pointer.
    113   // esp[0]: return address
    114 
    115   const int kDestinationOffset = 1 * kPointerSize;
    116   const int kSourceOffset = 2 * kPointerSize;
    117   const int kSizeOffset = 3 * kPointerSize;
    118 
    119   int stack_offset = 0;  // Update if we change the stack height.
    120 
    121   Label backward, backward_much_overlap;
    122   Label forward_much_overlap, small_size, medium_size, pop_and_return;
    123   __ push(edi);
    124   __ push(esi);
    125   stack_offset += 2 * kPointerSize;
    126   Register dst = edi;
    127   Register src = esi;
    128   Register count = ecx;
    129   __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
    130   __ mov(src, Operand(esp, stack_offset + kSourceOffset));
    131   __ mov(count, Operand(esp, stack_offset + kSizeOffset));
    132 
    133   __ cmp(dst, src);
    134   __ j(equal, &pop_and_return);
    135 
    136   // No SSE2.
    137   Label forward;
    138   __ cmp(count, 0);
    139   __ j(equal, &pop_and_return);
    140   __ cmp(dst, src);
    141   __ j(above, &backward);
    142   __ jmp(&forward);
    143   {
    144     // Simple forward copier.
    145     Label forward_loop_1byte, forward_loop_4byte;
    146     __ bind(&forward_loop_4byte);
    147     __ mov(eax, Operand(src, 0));
    148     __ sub(count, Immediate(4));
    149     __ add(src, Immediate(4));
    150     __ mov(Operand(dst, 0), eax);
    151     __ add(dst, Immediate(4));
    152     __ bind(&forward);  // Entry point.
    153     __ cmp(count, 3);
    154     __ j(above, &forward_loop_4byte);
    155     __ bind(&forward_loop_1byte);
    156     __ cmp(count, 0);
    157     __ j(below_equal, &pop_and_return);
    158     __ mov_b(eax, Operand(src, 0));
    159     __ dec(count);
    160     __ inc(src);
    161     __ mov_b(Operand(dst, 0), eax);
    162     __ inc(dst);
    163     __ jmp(&forward_loop_1byte);
    164   }
    165   {
    166     // Simple backward copier.
    167     Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
    168     __ bind(&backward);
    169     __ add(src, count);
    170     __ add(dst, count);
    171     __ cmp(count, 3);
    172     __ j(below_equal, &entry_shortcut);
    173 
    174     __ bind(&backward_loop_4byte);
    175     __ sub(src, Immediate(4));
    176     __ sub(count, Immediate(4));
    177     __ mov(eax, Operand(src, 0));
    178     __ sub(dst, Immediate(4));
    179     __ mov(Operand(dst, 0), eax);
    180     __ cmp(count, 3);
    181     __ j(above, &backward_loop_4byte);
    182     __ bind(&backward_loop_1byte);
    183     __ cmp(count, 0);
    184     __ j(below_equal, &pop_and_return);
    185     __ bind(&entry_shortcut);
    186     __ dec(src);
    187     __ dec(count);
    188     __ mov_b(eax, Operand(src, 0));
    189     __ dec(dst);
    190     __ mov_b(Operand(dst, 0), eax);
    191     __ jmp(&backward_loop_1byte);
    192   }
    193 
    194   __ bind(&pop_and_return);
    195   MemMoveEmitPopAndReturn(&masm);
    196 
    197   CodeDesc desc;
    198   masm.GetCode(&desc);
    199   DCHECK(!RelocInfo::RequiresRelocation(desc));
    200   Assembler::FlushICache(isolate, buffer, actual_size);
    201   base::OS::ProtectCode(buffer, actual_size);
    202   // TODO(jkummerow): It would be nice to register this code creation event
    203   // with the PROFILE / GDBJIT system.
    204   return FUNCTION_CAST<MemMoveFunction>(buffer);
    205 }
    206 
    207 
    208 #undef __
    209 
    210 // -------------------------------------------------------------------------
    211 // Code generators
    212 
    213 #define __ ACCESS_MASM(masm)
    214 
    215 
    216 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    217     MacroAssembler* masm,
    218     Register receiver,
    219     Register key,
    220     Register value,
    221     Register target_map,
    222     AllocationSiteMode mode,
    223     Label* allocation_memento_found) {
    224   Register scratch = edi;
    225   DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
    226 
    227   if (mode == TRACK_ALLOCATION_SITE) {
    228     DCHECK(allocation_memento_found != NULL);
    229     __ JumpIfJSArrayHasAllocationMemento(
    230         receiver, scratch, allocation_memento_found);
    231   }
    232 
    233   // Set transitioned map.
    234   __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
    235   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
    236                       kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    237 }
    238 
    239 
    240 void ElementsTransitionGenerator::GenerateSmiToDouble(
    241     MacroAssembler* masm,
    242     Register receiver,
    243     Register key,
    244     Register value,
    245     Register target_map,
    246     AllocationSiteMode mode,
    247     Label* fail) {
    248   // Return address is on the stack.
    249   DCHECK(receiver.is(edx));
    250   DCHECK(key.is(ecx));
    251   DCHECK(value.is(eax));
    252   DCHECK(target_map.is(ebx));
    253 
    254   Label loop, entry, convert_hole, gc_required, only_change_map;
    255 
    256   if (mode == TRACK_ALLOCATION_SITE) {
    257     __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
    258   }
    259 
    260   // Check for empty arrays, which only require a map transition and no changes
    261   // to the backing store.
    262   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    263   __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
    264   __ j(equal, &only_change_map);
    265 
    266   __ push(eax);
    267   __ push(ebx);
    268   __ push(esi);
    269 
    270   __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
    271 
    272   // Allocate new FixedDoubleArray.
    273   // edx: receiver
    274   // edi: length of source FixedArray (smi-tagged)
    275   AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
    276   __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
    277               REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
    278 
    279   // eax: destination FixedDoubleArray
    280   // edi: number of elements
    281   // edx: receiver
    282   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
    283          Immediate(masm->isolate()->factory()->fixed_double_array_map()));
    284   __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
    285   __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
    286   // Replace receiver's backing store with newly created FixedDoubleArray.
    287   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
    288   __ mov(ebx, eax);
    289   __ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs,
    290                       EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    291 
    292   __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
    293 
    294   // Prepare for conversion loop.
    295   ExternalReference canonical_the_hole_nan_reference =
    296       ExternalReference::address_of_the_hole_nan();
    297   __ jmp(&entry);
    298 
    299   // Call into runtime if GC is required.
    300   __ bind(&gc_required);
    301 
    302   // Restore registers before jumping into runtime.
    303   __ pop(esi);
    304   __ pop(ebx);
    305   __ pop(eax);
    306   __ jmp(fail);
    307 
    308   // Convert and copy elements
    309   // esi: source FixedArray
    310   __ bind(&loop);
    311   __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
    312   // ebx: current element from source
    313   // edi: index of current element
    314   __ JumpIfNotSmi(ebx, &convert_hole);
    315 
    316   // Normal smi, convert it to double and store.
    317   __ SmiUntag(ebx);
    318   __ push(ebx);
    319   __ fild_s(Operand(esp, 0));
    320   __ pop(ebx);
    321   __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
    322   __ jmp(&entry);
    323 
    324   // Found hole, store hole_nan_as_double instead.
    325   __ bind(&convert_hole);
    326 
    327   if (FLAG_debug_code) {
    328     __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
    329     __ Assert(equal, kObjectFoundInSmiOnlyArray);
    330   }
    331 
    332   __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
    333   __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
    334 
    335   __ bind(&entry);
    336   __ sub(edi, Immediate(Smi::FromInt(1)));
    337   __ j(not_sign, &loop);
    338 
    339   // Restore registers.
    340   __ pop(esi);
    341   __ pop(ebx);
    342   __ pop(eax);
    343 
    344   __ bind(&only_change_map);
    345   // eax: value
    346   // ebx: target map
    347   // Set transitioned map.
    348   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    349   __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
    350                       OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    351 }
    352 
    353 
    354 void ElementsTransitionGenerator::GenerateDoubleToObject(
    355     MacroAssembler* masm,
    356     Register receiver,
    357     Register key,
    358     Register value,
    359     Register target_map,
    360     AllocationSiteMode mode,
    361     Label* fail) {
    362   // Return address is on the stack.
    363   DCHECK(receiver.is(edx));
    364   DCHECK(key.is(ecx));
    365   DCHECK(value.is(eax));
    366   DCHECK(target_map.is(ebx));
    367 
    368   Label loop, entry, convert_hole, gc_required, only_change_map, success;
    369 
    370   if (mode == TRACK_ALLOCATION_SITE) {
    371     __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
    372   }
    373 
    374   // Check for empty arrays, which only require a map transition and no changes
    375   // to the backing store.
    376   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    377   __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
    378   __ j(equal, &only_change_map);
    379 
    380   __ push(esi);
    381   __ push(eax);
    382   __ push(edx);
    383   __ push(ebx);
    384 
    385   __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
    386 
    387   // Allocate new FixedArray.
    388   // ebx: length of source FixedDoubleArray (smi-tagged)
    389   __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
    390   __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
    391 
    392   // eax: destination FixedArray
    393   // ebx: number of elements
    394   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
    395          Immediate(masm->isolate()->factory()->fixed_array_map()));
    396   __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
    397   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    398 
    399   // Allocating heap numbers in the loop below can fail and cause a jump to
    400   // gc_required. We can't leave a partly initialized FixedArray behind,
    401   // so pessimistically fill it with holes now.
    402   Label initialization_loop, initialization_loop_entry;
    403   __ jmp(&initialization_loop_entry, Label::kNear);
    404   __ bind(&initialization_loop);
    405   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
    406          masm->isolate()->factory()->the_hole_value());
    407   __ bind(&initialization_loop_entry);
    408   __ sub(ebx, Immediate(Smi::FromInt(1)));
    409   __ j(not_sign, &initialization_loop);
    410 
    411   __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
    412   __ jmp(&entry);
    413 
    414   // ebx: target map
    415   // edx: receiver
    416   // Set transitioned map.
    417   __ bind(&only_change_map);
    418   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    419   __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
    420                       OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    421   __ jmp(&success);
    422 
    423   // Call into runtime if GC is required.
    424   __ bind(&gc_required);
    425   __ pop(ebx);
    426   __ pop(edx);
    427   __ pop(eax);
    428   __ pop(esi);
    429   __ jmp(fail);
    430 
    431   // Box doubles into heap numbers.
    432   // edi: source FixedDoubleArray
    433   // eax: destination FixedArray
    434   __ bind(&loop);
    435   // ebx: index of current element (smi-tagged)
    436   uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
    437   __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
    438   __ j(equal, &convert_hole);
    439 
    440   // Non-hole double, copy value into a heap number.
    441   __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
    442   // edx: new heap number
    443   __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
    444   __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
    445   __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
    446   __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
    447   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
    448   __ mov(esi, ebx);
    449   __ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
    450                       OMIT_SMI_CHECK);
    451   __ jmp(&entry, Label::kNear);
    452 
    453   // Replace the-hole NaN with the-hole pointer.
    454   __ bind(&convert_hole);
    455   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
    456          masm->isolate()->factory()->the_hole_value());
    457 
    458   __ bind(&entry);
    459   __ sub(ebx, Immediate(Smi::FromInt(1)));
    460   __ j(not_sign, &loop);
    461 
    462   __ pop(ebx);
    463   __ pop(edx);
    464   // ebx: target map
    465   // edx: receiver
    466   // Set transitioned map.
    467   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    468   __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
    469                       OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    470   // Replace receiver's backing store with newly created and filled FixedArray.
    471   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
    472   __ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs,
    473                       EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    474 
    475   // Restore registers.
    476   __ pop(eax);
    477   __ pop(esi);
    478 
    479   __ bind(&success);
    480 }
    481 
    482 
    483 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
    484                                        Factory* factory,
    485                                        Register string,
    486                                        Register index,
    487                                        Register result,
    488                                        Label* call_runtime) {
    489   // Fetch the instance type of the receiver into result register.
    490   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
    491   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
    492 
    493   // We need special handling for indirect strings.
    494   Label check_sequential;
    495   __ test(result, Immediate(kIsIndirectStringMask));
    496   __ j(zero, &check_sequential, Label::kNear);
    497 
    498   // Dispatch on the indirect string shape: slice or cons.
    499   Label cons_string;
    500   __ test(result, Immediate(kSlicedNotConsMask));
    501   __ j(zero, &cons_string, Label::kNear);
    502 
    503   // Handle slices.
    504   Label indirect_string_loaded;
    505   __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
    506   __ SmiUntag(result);
    507   __ add(index, result);
    508   __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
    509   __ jmp(&indirect_string_loaded, Label::kNear);
    510 
    511   // Handle cons strings.
    512   // Check whether the right hand side is the empty string (i.e. if
    513   // this is really a flat string in a cons string). If that is not
    514   // the case we would rather go to the runtime system now to flatten
    515   // the string.
    516   __ bind(&cons_string);
    517   __ cmp(FieldOperand(string, ConsString::kSecondOffset),
    518          Immediate(factory->empty_string()));
    519   __ j(not_equal, call_runtime);
    520   __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
    521 
    522   __ bind(&indirect_string_loaded);
    523   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
    524   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
    525 
    526   // Distinguish sequential and external strings. Only these two string
    527   // representations can reach here (slices and flat cons strings have been
    528   // reduced to the underlying sequential or external string).
    529   Label seq_string;
    530   __ bind(&check_sequential);
    531   STATIC_ASSERT(kSeqStringTag == 0);
    532   __ test(result, Immediate(kStringRepresentationMask));
    533   __ j(zero, &seq_string, Label::kNear);
    534 
    535   // Handle external strings.
    536   Label one_byte_external, done;
    537   if (FLAG_debug_code) {
    538     // Assert that we do not have a cons or slice (indirect strings) here.
    539     // Sequential strings have already been ruled out.
    540     __ test(result, Immediate(kIsIndirectStringMask));
    541     __ Assert(zero, kExternalStringExpectedButNotFound);
    542   }
    543   // Rule out short external strings.
    544   STATIC_ASSERT(kShortExternalStringTag != 0);
    545   __ test_b(result, Immediate(kShortExternalStringMask));
    546   __ j(not_zero, call_runtime);
    547   // Check encoding.
    548   STATIC_ASSERT(kTwoByteStringTag == 0);
    549   __ test_b(result, Immediate(kStringEncodingMask));
    550   __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
    551   __ j(not_equal, &one_byte_external, Label::kNear);
    552   // Two-byte string.
    553   __ movzx_w(result, Operand(result, index, times_2, 0));
    554   __ jmp(&done, Label::kNear);
    555   __ bind(&one_byte_external);
    556   // One-byte string.
    557   __ movzx_b(result, Operand(result, index, times_1, 0));
    558   __ jmp(&done, Label::kNear);
    559 
    560   // Dispatch on the encoding: one-byte or two-byte.
    561   Label one_byte;
    562   __ bind(&seq_string);
    563   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
    564   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
    565   __ test(result, Immediate(kStringEncodingMask));
    566   __ j(not_zero, &one_byte, Label::kNear);
    567 
    568   // Two-byte string.
    569   // Load the two-byte character code into the result register.
    570   __ movzx_w(result, FieldOperand(string,
    571                                   index,
    572                                   times_2,
    573                                   SeqTwoByteString::kHeaderSize));
    574   __ jmp(&done, Label::kNear);
    575 
    576   // One-byte string.
    577   // Load the byte into the result register.
    578   __ bind(&one_byte);
    579   __ movzx_b(result, FieldOperand(string,
    580                                   index,
    581                                   times_1,
    582                                   SeqOneByteString::kHeaderSize));
    583   __ bind(&done);
    584 }
    585 
    586 
    587 #undef __
    588 
    589 
    590 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
    591   USE(isolate);
    592   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
    593   CodePatcher patcher(isolate, young_sequence_.start(),
    594                       young_sequence_.length());
    595   patcher.masm()->push(ebp);
    596   patcher.masm()->mov(ebp, esp);
    597   patcher.masm()->push(esi);
    598   patcher.masm()->push(edi);
    599 }
    600 
    601 
    602 #ifdef DEBUG
    603 bool CodeAgingHelper::IsOld(byte* candidate) const {
    604   return *candidate == kCallOpcode;
    605 }
    606 #endif
    607 
    608 
    609 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
    610   bool result = isolate->code_aging_helper()->IsYoung(sequence);
    611   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
    612   return result;
    613 }
    614 
    615 
    616 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
    617                                MarkingParity* parity) {
    618   if (IsYoungSequence(isolate, sequence)) {
    619     *age = kNoAgeCodeAge;
    620     *parity = NO_MARKING_PARITY;
    621   } else {
    622     sequence++;  // Skip the kCallOpcode byte
    623     Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
    624         Assembler::kCallTargetAddressOffset;
    625     Code* stub = GetCodeFromTargetAddress(target_address);
    626     GetCodeAgeAndParity(stub, age, parity);
    627   }
    628 }
    629 
    630 
    631 void Code::PatchPlatformCodeAge(Isolate* isolate,
    632                                 byte* sequence,
    633                                 Code::Age age,
    634                                 MarkingParity parity) {
    635   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
    636   if (age == kNoAgeCodeAge) {
    637     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
    638     Assembler::FlushICache(isolate, sequence, young_length);
    639   } else {
    640     Code* stub = GetCodeAgeStub(isolate, age, parity);
    641     CodePatcher patcher(isolate, sequence, young_length);
    642     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
    643   }
    644 }
    645 
    646 
    647 }  // namespace internal
    648 }  // namespace v8
    649 
    650 #endif  // V8_TARGET_ARCH_X87
    651