Home | History | Annotate | Download | only in ppc
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/ppc/codegen-ppc.h"
      6 
      7 #if V8_TARGET_ARCH_PPC
      8 
      9 #include "src/codegen.h"
     10 #include "src/macro-assembler.h"
     11 #include "src/ppc/simulator-ppc.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 
     17 #define __ masm.
     18 
     19 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
     20 #if defined(USE_SIMULATOR)
     21   return nullptr;
     22 #else
     23   size_t actual_size;
     24   byte* buffer =
     25       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
     26   if (buffer == nullptr) return nullptr;
     27 
     28   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
     29                       CodeObjectRequired::kNo);
     30 
     31 // Called from C
     32   __ function_descriptor();
     33 
     34   __ MovFromFloatParameter(d1);
     35   __ fsqrt(d1, d1);
     36   __ MovToFloatResult(d1);
     37   __ Ret();
     38 
     39   CodeDesc desc;
     40   masm.GetCode(&desc);
     41   DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
     42 
     43   Assembler::FlushICache(isolate, buffer, actual_size);
     44   base::OS::ProtectCode(buffer, actual_size);
     45   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
     46 #endif
     47 }
     48 
     49 #undef __
     50 
     51 
     52 // -------------------------------------------------------------------------
     53 // Platform-specific RuntimeCallHelper functions.
     54 
     55 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
     56   masm->EnterFrame(StackFrame::INTERNAL);
     57   DCHECK(!masm->has_frame());
     58   masm->set_has_frame(true);
     59 }
     60 
     61 
     62 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
     63   masm->LeaveFrame(StackFrame::INTERNAL);
     64   DCHECK(masm->has_frame());
     65   masm->set_has_frame(false);
     66 }
     67 
     68 
     69 // -------------------------------------------------------------------------
     70 // Code generators
     71 
     72 #define __ ACCESS_MASM(masm)
     73 
     74 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
     75     MacroAssembler* masm, Register receiver, Register key, Register value,
     76     Register target_map, AllocationSiteMode mode,
     77     Label* allocation_memento_found) {
     78   Register scratch_elements = r7;
     79   DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
     80 
     81   if (mode == TRACK_ALLOCATION_SITE) {
     82     DCHECK(allocation_memento_found != NULL);
     83     __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11,
     84                                          allocation_memento_found);
     85   }
     86 
     87   // Set transitioned map.
     88   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
     89   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
     90                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
     91                       OMIT_SMI_CHECK);
     92 }
     93 
     94 
     95 void ElementsTransitionGenerator::GenerateSmiToDouble(
     96     MacroAssembler* masm, Register receiver, Register key, Register value,
     97     Register target_map, AllocationSiteMode mode, Label* fail) {
     98   // lr contains the return address
     99   Label loop, entry, convert_hole, only_change_map, done;
    100   Register elements = r7;
    101   Register length = r8;
    102   Register array = r9;
    103   Register array_end = array;
    104 
    105   // target_map parameter can be clobbered.
    106   Register scratch1 = target_map;
    107   Register scratch2 = r10;
    108   Register scratch3 = r11;
    109   Register scratch4 = r14;
    110 
    111   // Verify input registers don't conflict with locals.
    112   DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
    113                      scratch2));
    114 
    115   if (mode == TRACK_ALLOCATION_SITE) {
    116     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
    117   }
    118 
    119   // Check for empty arrays, which only require a map transition and no changes
    120   // to the backing store.
    121   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    122   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
    123   __ beq(&only_change_map);
    124 
    125   __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
    126   // length: number of elements (smi-tagged)
    127 
    128   // Allocate new FixedDoubleArray.
    129   __ SmiToDoubleArrayOffset(scratch3, length);
    130   __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
    131   __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
    132   __ subi(array, array, Operand(kHeapObjectTag));
    133   // array: destination FixedDoubleArray, not tagged as heap object.
    134   // elements: source FixedArray.
    135 
    136   // Set destination FixedDoubleArray's length and map.
    137   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
    138   __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
    139   // Update receiver's map.
    140   __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
    141 
    142   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
    143   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
    144                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
    145                       OMIT_SMI_CHECK);
    146   // Replace receiver's backing store with newly created FixedDoubleArray.
    147   __ addi(scratch1, array, Operand(kHeapObjectTag));
    148   __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
    149   __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
    150                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
    151                       OMIT_SMI_CHECK);
    152 
    153   // Prepare for conversion loop.
    154   __ addi(scratch1, elements,
    155           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    156   __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
    157   __ SmiToDoubleArrayOffset(array_end, length);
    158   __ add(array_end, scratch2, array_end);
    159 // Repurpose registers no longer in use.
    160 #if V8_TARGET_ARCH_PPC64
    161   Register hole_int64 = elements;
    162   __ mov(hole_int64, Operand(kHoleNanInt64));
    163 #else
    164   Register hole_lower = elements;
    165   Register hole_upper = length;
    166   __ mov(hole_lower, Operand(kHoleNanLower32));
    167   __ mov(hole_upper, Operand(kHoleNanUpper32));
    168 #endif
    169   // scratch1: begin of source FixedArray element fields, not tagged
    170   // hole_lower: kHoleNanLower32 OR hol_int64
    171   // hole_upper: kHoleNanUpper32
    172   // array_end: end of destination FixedDoubleArray, not tagged
    173   // scratch2: begin of FixedDoubleArray element fields, not tagged
    174 
    175   __ b(&entry);
    176 
    177   __ bind(&only_change_map);
    178   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
    179   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
    180                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
    181                       OMIT_SMI_CHECK);
    182   __ b(&done);
    183 
    184   // Convert and copy elements.
    185   __ bind(&loop);
    186   __ LoadP(scratch3, MemOperand(scratch1));
    187   __ addi(scratch1, scratch1, Operand(kPointerSize));
    188   // scratch3: current element
    189   __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
    190 
    191   // Normal smi, convert to double and store.
    192   __ ConvertIntToDouble(scratch3, d0);
    193   __ stfd(d0, MemOperand(scratch2, 0));
    194   __ addi(scratch2, scratch2, Operand(8));
    195   __ b(&entry);
    196 
    197   // Hole found, store the-hole NaN.
    198   __ bind(&convert_hole);
    199   if (FLAG_debug_code) {
    200     __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
    201     __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
    202     __ Assert(eq, kObjectFoundInSmiOnlyArray);
    203   }
    204 #if V8_TARGET_ARCH_PPC64
    205   __ std(hole_int64, MemOperand(scratch2, 0));
    206 #else
    207   __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
    208   __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
    209 #endif
    210   __ addi(scratch2, scratch2, Operand(8));
    211 
    212   __ bind(&entry);
    213   __ cmp(scratch2, array_end);
    214   __ blt(&loop);
    215 
    216   __ bind(&done);
    217 }
    218 
    219 
    220 void ElementsTransitionGenerator::GenerateDoubleToObject(
    221     MacroAssembler* masm, Register receiver, Register key, Register value,
    222     Register target_map, AllocationSiteMode mode, Label* fail) {
    223   // Register lr contains the return address.
    224   Label loop, convert_hole, gc_required, only_change_map;
    225   Register elements = r7;
    226   Register array = r9;
    227   Register length = r8;
    228   Register scratch = r10;
    229   Register scratch3 = r11;
    230   Register hole_value = r14;
    231 
    232   // Verify input registers don't conflict with locals.
    233   DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
    234                      scratch));
    235 
    236   if (mode == TRACK_ALLOCATION_SITE) {
    237     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
    238   }
    239 
    240   // Check for empty arrays, which only require a map transition and no changes
    241   // to the backing store.
    242   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    243   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
    244   __ beq(&only_change_map);
    245 
    246   __ Push(target_map, receiver, key, value);
    247   __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
    248   // elements: source FixedDoubleArray
    249   // length: number of elements (smi-tagged)
    250 
    251   // Allocate new FixedArray.
    252   // Re-use value and target_map registers, as they have been saved on the
    253   // stack.
    254   Register array_size = value;
    255   Register allocate_scratch = target_map;
    256   __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
    257   __ SmiToPtrArrayOffset(r0, length);
    258   __ add(array_size, array_size, r0);
    259   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
    260               NO_ALLOCATION_FLAGS);
    261   // array: destination FixedArray, tagged as heap object
    262   // Set destination FixedDoubleArray's length and map.
    263   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
    264   __ StoreP(length, FieldMemOperand(array,
    265             FixedDoubleArray::kLengthOffset), r0);
    266   __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
    267 
    268   // Prepare for conversion loop.
    269   Register src_elements = elements;
    270   Register dst_elements = target_map;
    271   Register dst_end = length;
    272   Register heap_number_map = scratch;
    273   __ addi(src_elements, elements,
    274           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
    275   __ SmiToPtrArrayOffset(length, length);
    276   __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
    277 
    278   Label initialization_loop, loop_done;
    279   __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
    280   __ beq(&loop_done, cr0);
    281 
    282   // Allocating heap numbers in the loop below can fail and cause a jump to
    283   // gc_required. We can't leave a partly initialized FixedArray behind,
    284   // so pessimistically fill it with holes now.
    285   __ mtctr(r0);
    286   __ addi(dst_elements, array,
    287           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
    288   __ bind(&initialization_loop);
    289   __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
    290   __ bdnz(&initialization_loop);
    291 
    292   __ addi(dst_elements, array,
    293           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    294   __ add(dst_end, dst_elements, length);
    295   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
    296   // Using offsetted addresses in src_elements to fully take advantage of
    297   // post-indexing.
    298   // dst_elements: begin of destination FixedArray element fields, not tagged
    299   // src_elements: begin of source FixedDoubleArray element fields,
    300   //               not tagged, +4
    301   // dst_end: end of destination FixedArray, not tagged
    302   // array: destination FixedArray
    303   // hole_value: the-hole pointer
    304   // heap_number_map: heap number map
    305   __ b(&loop);
    306 
    307   // Call into runtime if GC is required.
    308   __ bind(&gc_required);
    309   __ Pop(target_map, receiver, key, value);
    310   __ b(fail);
    311 
    312   // Replace the-hole NaN with the-hole pointer.
    313   __ bind(&convert_hole);
    314   __ StoreP(hole_value, MemOperand(dst_elements));
    315   __ addi(dst_elements, dst_elements, Operand(kPointerSize));
    316   __ cmpl(dst_elements, dst_end);
    317   __ bge(&loop_done);
    318 
    319   __ bind(&loop);
    320   Register upper_bits = key;
    321   __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
    322   __ addi(src_elements, src_elements, Operand(kDoubleSize));
    323   // upper_bits: current element's upper 32 bit
    324   // src_elements: address of next element's upper 32 bit
    325   __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
    326   __ beq(&convert_hole);
    327 
    328   // Non-hole double, copy value into a heap number.
    329   Register heap_number = receiver;
    330   Register scratch2 = value;
    331   __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
    332                         &gc_required);
    333   // heap_number: new heap number
    334 #if V8_TARGET_ARCH_PPC64
    335   __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
    336   // subtract tag for std
    337   __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
    338   __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
    339 #else
    340   __ lwz(scratch2,
    341          MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
    342   __ lwz(upper_bits,
    343          MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
    344   __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
    345   __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
    346 #endif
    347   __ mr(scratch2, dst_elements);
    348   __ StoreP(heap_number, MemOperand(dst_elements));
    349   __ addi(dst_elements, dst_elements, Operand(kPointerSize));
    350   __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
    351                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    352   __ cmpl(dst_elements, dst_end);
    353   __ blt(&loop);
    354   __ bind(&loop_done);
    355 
    356   __ Pop(target_map, receiver, key, value);
    357   // Replace receiver's backing store with newly created and filled FixedArray.
    358   __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
    359   __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
    360                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
    361                       OMIT_SMI_CHECK);
    362 
    363   __ bind(&only_change_map);
    364   // Update receiver's map.
    365   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
    366   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
    367                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
    368                       OMIT_SMI_CHECK);
    369 }
    370 
    371 
    372 // assume ip can be used as a scratch register below
    373 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
    374                                        Register index, Register result,
    375                                        Label* call_runtime) {
    376   // Fetch the instance type of the receiver into result register.
    377   __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
    378   __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
    379 
    380   // We need special handling for indirect strings.
    381   Label check_sequential;
    382   __ andi(r0, result, Operand(kIsIndirectStringMask));
    383   __ beq(&check_sequential, cr0);
    384 
    385   // Dispatch on the indirect string shape: slice or cons.
    386   Label cons_string;
    387   __ mov(ip, Operand(kSlicedNotConsMask));
    388   __ and_(r0, result, ip, SetRC);
    389   __ beq(&cons_string, cr0);
    390 
    391   // Handle slices.
    392   Label indirect_string_loaded;
    393   __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
    394   __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
    395   __ SmiUntag(ip, result);
    396   __ add(index, index, ip);
    397   __ b(&indirect_string_loaded);
    398 
    399   // Handle cons strings.
    400   // Check whether the right hand side is the empty string (i.e. if
    401   // this is really a flat string in a cons string). If that is not
    402   // the case we would rather go to the runtime system now to flatten
    403   // the string.
    404   __ bind(&cons_string);
    405   __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
    406   __ CompareRoot(result, Heap::kempty_stringRootIndex);
    407   __ bne(call_runtime);
    408   // Get the first of the two strings and load its instance type.
    409   __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
    410 
    411   __ bind(&indirect_string_loaded);
    412   __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
    413   __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
    414 
    415   // Distinguish sequential and external strings. Only these two string
    416   // representations can reach here (slices and flat cons strings have been
    417   // reduced to the underlying sequential or external string).
    418   Label external_string, check_encoding;
    419   __ bind(&check_sequential);
    420   STATIC_ASSERT(kSeqStringTag == 0);
    421   __ andi(r0, result, Operand(kStringRepresentationMask));
    422   __ bne(&external_string, cr0);
    423 
    424   // Prepare sequential strings
    425   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
    426   __ addi(string, string,
    427           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
    428   __ b(&check_encoding);
    429 
    430   // Handle external strings.
    431   __ bind(&external_string);
    432   if (FLAG_debug_code) {
    433     // Assert that we do not have a cons or slice (indirect strings) here.
    434     // Sequential strings have already been ruled out.
    435     __ andi(r0, result, Operand(kIsIndirectStringMask));
    436     __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
    437   }
    438   // Rule out short external strings.
    439   STATIC_ASSERT(kShortExternalStringTag != 0);
    440   __ andi(r0, result, Operand(kShortExternalStringMask));
    441   __ bne(call_runtime, cr0);
    442   __ LoadP(string,
    443            FieldMemOperand(string, ExternalString::kResourceDataOffset));
    444 
    445   Label one_byte, done;
    446   __ bind(&check_encoding);
    447   STATIC_ASSERT(kTwoByteStringTag == 0);
    448   __ andi(r0, result, Operand(kStringEncodingMask));
    449   __ bne(&one_byte, cr0);
    450   // Two-byte string.
    451   __ ShiftLeftImm(result, index, Operand(1));
    452   __ lhzx(result, MemOperand(string, result));
    453   __ b(&done);
    454   __ bind(&one_byte);
    455   // One-byte string.
    456   __ lbzx(result, MemOperand(string, index));
    457   __ bind(&done);
    458 }
    459 
    460 #undef __
    461 
    462 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
    463   USE(isolate);
    464   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
    465   // Since patcher is a large object, allocate it dynamically when needed,
    466   // to avoid overloading the stack in stress conditions.
    467   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
    468   // the process, before ARM simulator ICache is setup.
    469   base::SmartPointer<CodePatcher> patcher(
    470       new CodePatcher(isolate, young_sequence_.start(),
    471                       young_sequence_.length() / Assembler::kInstrSize,
    472                       CodePatcher::DONT_FLUSH));
    473   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
    474   patcher->masm()->PushStandardFrame(r4);
    475   for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
    476     patcher->masm()->nop();
    477   }
    478 }
    479 
    480 
    481 #ifdef DEBUG
    482 bool CodeAgingHelper::IsOld(byte* candidate) const {
    483   return Assembler::IsNop(Assembler::instr_at(candidate));
    484 }
    485 #endif
    486 
    487 
    488 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
    489   bool result = isolate->code_aging_helper()->IsYoung(sequence);
    490   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
    491   return result;
    492 }
    493 
    494 
    495 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
    496                                MarkingParity* parity) {
    497   if (IsYoungSequence(isolate, sequence)) {
    498     *age = kNoAgeCodeAge;
    499     *parity = NO_MARKING_PARITY;
    500   } else {
    501     Code* code = NULL;
    502     Address target_address =
    503         Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
    504     Code* stub = GetCodeFromTargetAddress(target_address);
    505     GetCodeAgeAndParity(stub, age, parity);
    506   }
    507 }
    508 
    509 
    510 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
    511                                 MarkingParity parity) {
    512   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
    513   if (age == kNoAgeCodeAge) {
    514     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
    515     Assembler::FlushICache(isolate, sequence, young_length);
    516   } else {
    517     // FIXED_SEQUENCE
    518     Code* stub = GetCodeAgeStub(isolate, age, parity);
    519     CodePatcher patcher(isolate, sequence,
    520                         young_length / Assembler::kInstrSize);
    521     Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
    522     intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
    523     // Don't use Call -- we need to preserve ip and lr.
    524     // GenerateMakeCodeYoungAgainCommon for the stub code.
    525     patcher.masm()->nop();  // marker to detect sequence (see IsOld)
    526     patcher.masm()->mov(r3, Operand(target));
    527     patcher.masm()->Jump(r3);
    528     for (int i = 0; i < kCodeAgingSequenceNops; i++) {
    529       patcher.masm()->nop();
    530     }
    531   }
    532 }
    533 }  // namespace internal
    534 }  // namespace v8
    535 
    536 #endif  // V8_TARGET_ARCH_PPC
    537