Home | History | Annotate | Download | only in s390
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/s390/codegen-s390.h"
      6 
      7 #if V8_TARGET_ARCH_S390
      8 
      9 #include "src/codegen.h"
     10 #include "src/macro-assembler.h"
     11 #include "src/s390/simulator-s390.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 #define __ masm.
     17 
     18 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
     19 #if defined(USE_SIMULATOR)
     20   return nullptr;
     21 #else
     22   size_t actual_size;
     23   byte* buffer =
     24       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
     25   if (buffer == nullptr) return nullptr;
     26 
     27   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
     28                       CodeObjectRequired::kNo);
     29 
     30   __ MovFromFloatParameter(d0);
     31   __ sqdbr(d0, d0);
     32   __ MovToFloatResult(d0);
     33   __ Ret();
     34 
     35   CodeDesc desc;
     36   masm.GetCode(&desc);
     37   DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
     38 
     39   Assembler::FlushICache(isolate, buffer, actual_size);
     40   base::OS::ProtectCode(buffer, actual_size);
     41   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
     42 #endif
     43 }
     44 
     45 #undef __
     46 
     47 // -------------------------------------------------------------------------
     48 // Platform-specific RuntimeCallHelper functions.
     49 
     50 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
     51   masm->EnterFrame(StackFrame::INTERNAL);
     52   DCHECK(!masm->has_frame());
     53   masm->set_has_frame(true);
     54 }
     55 
     56 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
     57   masm->LeaveFrame(StackFrame::INTERNAL);
     58   DCHECK(masm->has_frame());
     59   masm->set_has_frame(false);
     60 }
     61 
     62 // -------------------------------------------------------------------------
     63 // Code generators
     64 
     65 #define __ ACCESS_MASM(masm)
     66 
     67 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
     68     MacroAssembler* masm, Register receiver, Register key, Register value,
     69     Register target_map, AllocationSiteMode mode,
     70     Label* allocation_memento_found) {
     71   Register scratch_elements = r6;
     72   DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
     73 
     74   if (mode == TRACK_ALLOCATION_SITE) {
     75     DCHECK(allocation_memento_found != NULL);
     76     __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
     77                                          allocation_memento_found);
     78   }
     79 
     80   // Set transitioned map.
     81   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
     82   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
     83                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
     84                       OMIT_SMI_CHECK);
     85 }
     86 
     87 void ElementsTransitionGenerator::GenerateSmiToDouble(
     88     MacroAssembler* masm, Register receiver, Register key, Register value,
     89     Register target_map, AllocationSiteMode mode, Label* fail) {
     90   // lr contains the return address
     91   Label loop, entry, convert_hole, gc_required, only_change_map, done;
     92   Register elements = r6;
     93   Register length = r7;
     94   Register array = r8;
     95   Register array_end = array;
     96 
     97   // target_map parameter can be clobbered.
     98   Register scratch1 = target_map;
     99   Register scratch2 = r1;
    100 
    101   // Verify input registers don't conflict with locals.
    102   DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
    103                      scratch2));
    104 
    105   if (mode == TRACK_ALLOCATION_SITE) {
    106     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
    107   }
    108 
    109   // Check for empty arrays, which only require a map transition and no changes
    110   // to the backing store.
    111   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    112   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
    113   __ beq(&only_change_map, Label::kNear);
    114 
    115   // Preserve lr and use r14 as a temporary register.
    116   __ push(r14);
    117 
    118   __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
    119   // length: number of elements (smi-tagged)
    120 
    121   // Allocate new FixedDoubleArray.
    122   __ SmiToDoubleArrayOffset(r14, length);
    123   __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
    124   __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
    125   __ SubP(array, array, Operand(kHeapObjectTag));
    126   // Set destination FixedDoubleArray's length and map.
    127   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
    128   __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
    129   // Update receiver's map.
    130   __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
    131 
    132   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    133   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
    134                       kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
    135                       OMIT_SMI_CHECK);
    136   // Replace receiver's backing store with newly created FixedDoubleArray.
    137   __ AddP(scratch1, array, Operand(kHeapObjectTag));
    138   __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
    139   __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
    140                       kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
    141                       OMIT_SMI_CHECK);
    142 
    143   // Prepare for conversion loop.
    144   __ AddP(target_map, elements,
    145           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    146   __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
    147   __ SmiToDoubleArrayOffset(array, length);
    148   __ AddP(array_end, r9, array);
    149 // Repurpose registers no longer in use.
    150 #if V8_TARGET_ARCH_S390X
    151   Register hole_int64 = elements;
    152 #else
    153   Register hole_lower = elements;
    154   Register hole_upper = length;
    155 #endif
    156   // scratch1: begin of source FixedArray element fields, not tagged
    157   // hole_lower: kHoleNanLower32 OR hol_int64
    158   // hole_upper: kHoleNanUpper32
    159   // array_end: end of destination FixedDoubleArray, not tagged
    160   // scratch2: begin of FixedDoubleArray element fields, not tagged
    161 
    162   __ b(&entry, Label::kNear);
    163 
    164   __ bind(&only_change_map);
    165   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    166   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
    167                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
    168                       OMIT_SMI_CHECK);
    169   __ b(&done, Label::kNear);
    170 
    171   // Call into runtime if GC is required.
    172   __ bind(&gc_required);
    173   __ pop(r14);
    174   __ b(fail);
    175 
    176   // Convert and copy elements.
    177   __ bind(&loop);
    178   __ LoadP(r14, MemOperand(scratch1));
    179   __ la(scratch1, MemOperand(scratch1, kPointerSize));
    180   // r1: current element
    181   __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
    182 
    183   // Normal smi, convert to double and store.
    184   __ ConvertIntToDouble(r14, d0);
    185   __ StoreDouble(d0, MemOperand(r9, 0));
    186   __ la(r9, MemOperand(r9, 8));
    187 
    188   __ b(&entry, Label::kNear);
    189 
    190   // Hole found, store the-hole NaN.
    191   __ bind(&convert_hole);
    192   if (FLAG_debug_code) {
    193     // Restore a "smi-untagged" heap object.
    194     __ LoadP(r1, MemOperand(r5, -kPointerSize));
    195     __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
    196     __ Assert(eq, kObjectFoundInSmiOnlyArray);
    197   }
    198 #if V8_TARGET_ARCH_S390X
    199   __ stg(hole_int64, MemOperand(r9, 0));
    200 #else
    201   __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
    202   __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
    203 #endif
    204   __ AddP(r9, Operand(8));
    205 
    206   __ bind(&entry);
    207   __ CmpP(r9, array_end);
    208   __ blt(&loop);
    209 
    210   __ pop(r14);
    211   __ bind(&done);
    212 }
    213 
    214 void ElementsTransitionGenerator::GenerateDoubleToObject(
    215     MacroAssembler* masm, Register receiver, Register key, Register value,
    216     Register target_map, AllocationSiteMode mode, Label* fail) {
    217   // Register lr contains the return address.
    218   Label loop, convert_hole, gc_required, only_change_map;
    219   Register elements = r6;
    220   Register array = r8;
    221   Register length = r7;
    222   Register scratch = r1;
    223   Register scratch3 = r9;
    224   Register hole_value = r9;
    225 
    226   // Verify input registers don't conflict with locals.
    227   DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
    228                      scratch));
    229 
    230   if (mode == TRACK_ALLOCATION_SITE) {
    231     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
    232   }
    233 
    234   // Check for empty arrays, which only require a map transition and no changes
    235   // to the backing store.
    236   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
    237   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
    238   __ beq(&only_change_map);
    239 
    240   __ Push(target_map, receiver, key, value);
    241   __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
    242   // elements: source FixedDoubleArray
    243   // length: number of elements (smi-tagged)
    244 
    245   // Allocate new FixedArray.
    246   // Re-use value and target_map registers, as they have been saved on the
    247   // stack.
    248   Register array_size = value;
    249   Register allocate_scratch = target_map;
    250   __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
    251   __ SmiToPtrArrayOffset(r0, length);
    252   __ AddP(array_size, r0);
    253   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
    254               NO_ALLOCATION_FLAGS);
    255   // array: destination FixedArray, tagged as heap object
    256   // Set destination FixedDoubleArray's length and map.
    257   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
    258   __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
    259             r0);
    260   __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
    261 
    262   // Prepare for conversion loop.
    263   Register src_elements = elements;
    264   Register dst_elements = target_map;
    265   Register dst_end = length;
    266   Register heap_number_map = scratch;
    267   __ AddP(src_elements,
    268           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
    269   __ SmiToPtrArrayOffset(length, length);
    270   __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
    271 
    272   Label initialization_loop, loop_done;
    273   __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
    274   __ beq(&loop_done, Label::kNear);
    275 
    276   // Allocating heap numbers in the loop below can fail and cause a jump to
    277   // gc_required. We can't leave a partly initialized FixedArray behind,
    278   // so pessimistically fill it with holes now.
    279   __ AddP(dst_elements, array,
    280           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
    281   __ bind(&initialization_loop);
    282   __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
    283   __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
    284   __ BranchOnCount(scratch, &initialization_loop);
    285 
    286   __ AddP(dst_elements, array,
    287           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    288   __ AddP(dst_end, dst_elements, length);
    289   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
    290   // Using offsetted addresses in src_elements to fully take advantage of
    291   // post-indexing.
    292   // dst_elements: begin of destination FixedArray element fields, not tagged
    293   // src_elements: begin of source FixedDoubleArray element fields,
    294   //               not tagged, +4
    295   // dst_end: end of destination FixedArray, not tagged
    296   // array: destination FixedArray
    297   // hole_value: the-hole pointer
    298   // heap_number_map: heap number map
    299   __ b(&loop, Label::kNear);
    300 
    301   // Call into runtime if GC is required.
    302   __ bind(&gc_required);
    303   __ Pop(target_map, receiver, key, value);
    304   __ b(fail);
    305 
    306   // Replace the-hole NaN with the-hole pointer.
    307   __ bind(&convert_hole);
    308   __ StoreP(hole_value, MemOperand(dst_elements));
    309   __ AddP(dst_elements, Operand(kPointerSize));
    310   __ CmpLogicalP(dst_elements, dst_end);
    311   __ bge(&loop_done);
    312 
    313   __ bind(&loop);
    314   Register upper_bits = key;
    315   __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
    316   __ AddP(src_elements, Operand(kDoubleSize));
    317   // upper_bits: current element's upper 32 bit
    318   // src_elements: address of next element's upper 32 bit
    319   __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
    320   __ beq(&convert_hole, Label::kNear);
    321 
    322   // Non-hole double, copy value into a heap number.
    323   Register heap_number = receiver;
    324   Register scratch2 = value;
    325   __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
    326                         &gc_required);
    327 // heap_number: new heap number
    328 #if V8_TARGET_ARCH_S390X
    329   __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
    330   // subtract tag for std
    331   __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
    332   __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
    333 #else
    334   __ LoadlW(scratch2,
    335             MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
    336   __ LoadlW(upper_bits,
    337             MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
    338   __ StoreW(scratch2,
    339             FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
    340   __ StoreW(upper_bits,
    341             FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
    342 #endif
    343   __ LoadRR(scratch2, dst_elements);
    344   __ StoreP(heap_number, MemOperand(dst_elements));
    345   __ AddP(dst_elements, Operand(kPointerSize));
    346   __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
    347                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
    348   __ CmpLogicalP(dst_elements, dst_end);
    349   __ blt(&loop);
    350   __ bind(&loop_done);
    351 
    352   __ Pop(target_map, receiver, key, value);
    353   // Replace receiver's backing store with newly created and filled FixedArray.
    354   __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
    355   __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
    356                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
    357                       OMIT_SMI_CHECK);
    358 
    359   __ bind(&only_change_map);
    360   // Update receiver's map.
    361   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
    362   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
    363                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
    364                       OMIT_SMI_CHECK);
    365 }
    366 
    367 // assume ip can be used as a scratch register below
    368 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
    369                                        Register index, Register result,
    370                                        Label* call_runtime) {
    371   // Fetch the instance type of the receiver into result register.
    372   __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
    373   __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
    374 
    375   // We need special handling for indirect strings.
    376   Label check_sequential;
    377   __ mov(r0, Operand(kIsIndirectStringMask));
    378   __ AndP(r0, result);
    379   __ beq(&check_sequential, Label::kNear /*, cr0*/);
    380 
    381   // Dispatch on the indirect string shape: slice or cons.
    382   Label cons_string;
    383   __ mov(ip, Operand(kSlicedNotConsMask));
    384   __ LoadRR(r0, result);
    385   __ AndP(r0, ip /*, SetRC*/);  // Should be okay to remove RC
    386   __ beq(&cons_string, Label::kNear /*, cr0*/);
    387 
    388   // Handle slices.
    389   Label indirect_string_loaded;
    390   __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
    391   __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
    392   __ SmiUntag(ip, result);
    393   __ AddP(index, ip);
    394   __ b(&indirect_string_loaded, Label::kNear);
    395 
    396   // Handle cons strings.
    397   // Check whether the right hand side is the empty string (i.e. if
    398   // this is really a flat string in a cons string). If that is not
    399   // the case we would rather go to the runtime system now to flatten
    400   // the string.
    401   __ bind(&cons_string);
    402   __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
    403   __ CompareRoot(result, Heap::kempty_stringRootIndex);
    404   __ bne(call_runtime);
    405   // Get the first of the two strings and load its instance type.
    406   __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
    407 
    408   __ bind(&indirect_string_loaded);
    409   __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
    410   __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
    411 
    412   // Distinguish sequential and external strings. Only these two string
    413   // representations can reach here (slices and flat cons strings have been
    414   // reduced to the underlying sequential or external string).
    415   Label external_string, check_encoding;
    416   __ bind(&check_sequential);
    417   STATIC_ASSERT(kSeqStringTag == 0);
    418   __ mov(r0, Operand(kStringRepresentationMask));
    419   __ AndP(r0, result);
    420   __ bne(&external_string, Label::kNear);
    421 
    422   // Prepare sequential strings
    423   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
    424   __ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
    425   __ b(&check_encoding, Label::kNear);
    426 
    427   // Handle external strings.
    428   __ bind(&external_string);
    429   if (FLAG_debug_code) {
    430     // Assert that we do not have a cons or slice (indirect strings) here.
    431     // Sequential strings have already been ruled out.
    432     __ mov(r0, Operand(kIsIndirectStringMask));
    433     __ AndP(r0, result);
    434     __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
    435   }
    436   // Rule out short external strings.
    437   STATIC_ASSERT(kShortExternalStringTag != 0);
    438   __ mov(r0, Operand(kShortExternalStringMask));
    439   __ AndP(r0, result);
    440   __ bne(call_runtime /*, cr0*/);
    441   __ LoadP(string,
    442            FieldMemOperand(string, ExternalString::kResourceDataOffset));
    443 
    444   Label one_byte, done;
    445   __ bind(&check_encoding);
    446   STATIC_ASSERT(kTwoByteStringTag == 0);
    447   __ mov(r0, Operand(kStringEncodingMask));
    448   __ AndP(r0, result);
    449   __ bne(&one_byte, Label::kNear);
    450   // Two-byte string.
    451   __ ShiftLeftP(result, index, Operand(1));
    452   __ LoadLogicalHalfWordP(result, MemOperand(string, result));
    453   __ b(&done, Label::kNear);
    454   __ bind(&one_byte);
    455   // One-byte string.
    456   __ LoadlB(result, MemOperand(string, index));
    457   __ bind(&done);
    458 }
    459 
    460 #undef __
    461 
    462 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
    463   USE(isolate);
    464   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
    465   // Since patcher is a large object, allocate it dynamically when needed,
    466   // to avoid overloading the stack in stress conditions.
    467   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
    468   // the process, before ARM simulator ICache is setup.
    469   base::SmartPointer<CodePatcher> patcher(
    470       new CodePatcher(isolate, young_sequence_.start(),
    471                       young_sequence_.length(), CodePatcher::DONT_FLUSH));
    472   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
    473   patcher->masm()->PushStandardFrame(r3);
    474 }
    475 
    476 #ifdef DEBUG
    477 bool CodeAgingHelper::IsOld(byte* candidate) const {
    478   return Assembler::IsNop(Assembler::instr_at(candidate));
    479 }
    480 #endif
    481 
    482 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
    483   bool result = isolate->code_aging_helper()->IsYoung(sequence);
    484   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
    485   return result;
    486 }
    487 
    488 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
    489                                MarkingParity* parity) {
    490   if (IsYoungSequence(isolate, sequence)) {
    491     *age = kNoAgeCodeAge;
    492     *parity = NO_MARKING_PARITY;
    493   } else {
    494     Code* code = NULL;
    495     Address target_address =
    496         Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
    497     Code* stub = GetCodeFromTargetAddress(target_address);
    498     GetCodeAgeAndParity(stub, age, parity);
    499   }
    500 }
    501 
    502 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
    503                                 MarkingParity parity) {
    504   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
    505   if (age == kNoAgeCodeAge) {
    506     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
    507     Assembler::FlushICache(isolate, sequence, young_length);
    508   } else {
    509     // FIXED_SEQUENCE
    510     Code* stub = GetCodeAgeStub(isolate, age, parity);
    511     CodePatcher patcher(isolate, sequence, young_length);
    512     intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
    513     // We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
    514     // knows where to pick up the return address
    515     //
    516     // Since we can no longer guarentee ip will hold the branch address
    517     // because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
    518     // can calculate the branch address offset
    519     patcher.masm()->nop();  // marker to detect sequence (see IsOld)
    520     patcher.masm()->CleanseP(r14);
    521     patcher.masm()->Push(r14);
    522     patcher.masm()->mov(r2, Operand(target));
    523     patcher.masm()->Call(r2);
    524     for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
    525          i += 2) {
    526       // TODO(joransiu): Create nop function to pad
    527       //       (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
    528       patcher.masm()->nop();  // 2-byte nops().
    529     }
    530   }
    531 }
    532 
    533 }  // namespace internal
    534 }  // namespace v8
    535 
    536 #endif  // V8_TARGET_ARCH_S390
    537