Home | History | Annotate | Download | only in ppc
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/codegen.h"
      6 #include "src/deoptimizer.h"
      7 #include "src/full-codegen/full-codegen.h"
      8 #include "src/register-configuration.h"
      9 #include "src/safepoint-table.h"
     10 
     11 namespace v8 {
     12 namespace internal {
     13 
     14 const int Deoptimizer::table_entry_size_ = 8;
     15 
     16 
     17 int Deoptimizer::patch_size() {
     18 #if V8_TARGET_ARCH_PPC64
     19   const int kCallInstructionSizeInWords = 7;
     20 #else
     21   const int kCallInstructionSizeInWords = 4;
     22 #endif
     23   return kCallInstructionSizeInWords * Assembler::kInstrSize;
     24 }
     25 
     26 
     27 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
     28   // Empty because there is no need for relocation information for the code
     29   // patching in Deoptimizer::PatchCodeForDeoptimization below.
     30 }
     31 
     32 
     33 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
     34   Address code_start_address = code->instruction_start();
     35 
     36   // Invalidate the relocation information, as it will become invalid by the
     37   // code patching below, and is not needed any more.
     38   code->InvalidateRelocation();
     39 
     40   if (FLAG_zap_code_space) {
     41     // Fail hard and early if we enter this code object again.
     42     byte* pointer = code->FindCodeAgeSequence();
     43     if (pointer != NULL) {
     44       pointer += kNoCodeAgeSequenceLength;
     45     } else {
     46       pointer = code->instruction_start();
     47     }
     48     CodePatcher patcher(isolate, pointer, 1);
     49     patcher.masm()->bkpt(0);
     50 
     51     DeoptimizationInputData* data =
     52         DeoptimizationInputData::cast(code->deoptimization_data());
     53     int osr_offset = data->OsrPcOffset()->value();
     54     if (osr_offset > 0) {
     55       CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
     56                               1);
     57       osr_patcher.masm()->bkpt(0);
     58     }
     59   }
     60 
     61   DeoptimizationInputData* deopt_data =
     62       DeoptimizationInputData::cast(code->deoptimization_data());
     63 #ifdef DEBUG
     64   Address prev_call_address = NULL;
     65 #endif
     66   // For each LLazyBailout instruction insert a call to the corresponding
     67   // deoptimization entry.
     68   for (int i = 0; i < deopt_data->DeoptCount(); i++) {
     69     if (deopt_data->Pc(i)->value() == -1) continue;
     70     Address call_address = code_start_address + deopt_data->Pc(i)->value();
     71     Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
     72     // We need calls to have a predictable size in the unoptimized code, but
     73     // this is optimized code, so we don't have to have a predictable size.
     74     int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
     75         deopt_entry, kRelocInfo_NONEPTR);
     76     int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
     77     DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
     78     DCHECK(call_size_in_bytes <= patch_size());
     79     CodePatcher patcher(isolate, call_address, call_size_in_words);
     80     patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
     81     DCHECK(prev_call_address == NULL ||
     82            call_address >= prev_call_address + patch_size());
     83     DCHECK(call_address + patch_size() <= code->instruction_end());
     84 #ifdef DEBUG
     85     prev_call_address = call_address;
     86 #endif
     87   }
     88 }
     89 
     90 
     91 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
     92   // Set the register values. The values are not important as there are no
     93   // callee saved registers in JavaScript frames, so all registers are
     94   // spilled. Registers fp and sp are set to the correct values though.
     95   // We ensure the values are Smis to avoid confusing the garbage
     96   // collector in the event that any values are retreived and stored
     97   // elsewhere.
     98 
     99   for (int i = 0; i < Register::kNumRegisters; i++) {
    100     input_->SetRegister(i, reinterpret_cast<intptr_t>(Smi::FromInt(i)));
    101   }
    102   input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
    103   input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
    104   for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
    105     input_->SetDoubleRegister(i, 0.0);
    106   }
    107 
    108   // Fill the frame content from the actual data on the frame.
    109   for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
    110     input_->SetFrameSlot(
    111         i, reinterpret_cast<intptr_t>(Memory::Address_at(tos + i)));
    112   }
    113 }
    114 
    115 
    116 void Deoptimizer::SetPlatformCompiledStubRegisters(
    117     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
    118   ApiFunction function(descriptor->deoptimization_handler());
    119   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
    120   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
    121   int params = descriptor->GetHandlerParameterCount();
    122   output_frame->SetRegister(r3.code(), params);
    123   output_frame->SetRegister(r4.code(), handler);
    124 }
    125 
    126 
    127 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
    128   for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
    129     double double_value = input_->GetDoubleRegister(i);
    130     output_frame->SetDoubleRegister(i, double_value);
    131   }
    132 }
    133 
    134 
    135 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
    136   // There is no dynamic alignment padding on PPC in the input frame.
    137   return false;
    138 }
    139 
    140 
    141 #define __ masm()->
    142 
    143 // This code tries to be close to ia32 code so that any changes can be
    144 // easily ported.
    145 void Deoptimizer::TableEntryGenerator::Generate() {
    146   GeneratePrologue();
    147 
    148   // Unlike on ARM we don't save all the registers, just the useful ones.
    149   // For the rest, there are gaps on the stack, so the offsets remain the same.
    150   const int kNumberOfRegisters = Register::kNumRegisters;
    151 
    152   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
    153   RegList saved_regs = restored_regs | sp.bit();
    154 
    155   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
    156 
    157   // Save all double registers before messing with them.
    158   __ subi(sp, sp, Operand(kDoubleRegsSize));
    159   const RegisterConfiguration* config =
    160       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
    161   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
    162     int code = config->GetAllocatableDoubleCode(i);
    163     const DoubleRegister dreg = DoubleRegister::from_code(code);
    164     int offset = code * kDoubleSize;
    165     __ stfd(dreg, MemOperand(sp, offset));
    166   }
    167 
    168   // Push saved_regs (needed to populate FrameDescription::registers_).
    169   // Leave gaps for other registers.
    170   __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize));
    171   for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
    172     if ((saved_regs & (1 << i)) != 0) {
    173       __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i));
    174     }
    175   }
    176 
    177   __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
    178   __ StoreP(fp, MemOperand(ip));
    179 
    180   const int kSavedRegistersAreaSize =
    181       (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
    182 
    183   // Get the bailout id from the stack.
    184   __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize));
    185 
    186   // Get the address of the location in the code object (r6) (return
    187   // address for lazy deoptimization) and compute the fp-to-sp delta in
    188   // register r7.
    189   __ mflr(r6);
    190   // Correct one word for bailout id.
    191   __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
    192   __ sub(r7, fp, r7);
    193 
    194   // Allocate a new deoptimizer object.
    195   // Pass six arguments in r3 to r8.
    196   __ PrepareCallCFunction(6, r8);
    197   __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
    198   __ li(r4, Operand(type()));  // bailout type,
    199   // r5: bailout id already loaded.
    200   // r6: code address or 0 already loaded.
    201   // r7: Fp-to-sp delta.
    202   __ mov(r8, Operand(ExternalReference::isolate_address(isolate())));
    203   // Call Deoptimizer::New().
    204   {
    205     AllowExternalCallThatCantCauseGC scope(masm());
    206     __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
    207   }
    208 
    209   // Preserve "deoptimizer" object in register r3 and get the input
    210   // frame descriptor pointer to r4 (deoptimizer->input_);
    211   __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
    212 
    213   // Copy core registers into FrameDescription::registers_[kNumRegisters].
    214   DCHECK(Register::kNumRegisters == kNumberOfRegisters);
    215   for (int i = 0; i < kNumberOfRegisters; i++) {
    216     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
    217     __ LoadP(r5, MemOperand(sp, i * kPointerSize));
    218     __ StoreP(r5, MemOperand(r4, offset));
    219   }
    220 
    221   int double_regs_offset = FrameDescription::double_registers_offset();
    222   // Copy double registers to
    223   // double_registers_[DoubleRegister::kNumRegisters]
    224   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
    225     int code = config->GetAllocatableDoubleCode(i);
    226     int dst_offset = code * kDoubleSize + double_regs_offset;
    227     int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
    228     __ lfd(d0, MemOperand(sp, src_offset));
    229     __ stfd(d0, MemOperand(r4, dst_offset));
    230   }
    231 
    232   // Remove the bailout id and the saved registers from the stack.
    233   __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
    234 
    235   // Compute a pointer to the unwinding limit in register r5; that is
    236   // the first stack slot not part of the input frame.
    237   __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
    238   __ add(r5, r5, sp);
    239 
    240   // Unwind the stack down to - but not including - the unwinding
    241   // limit and copy the contents of the activation frame to the input
    242   // frame description.
    243   __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
    244   Label pop_loop;
    245   Label pop_loop_header;
    246   __ b(&pop_loop_header);
    247   __ bind(&pop_loop);
    248   __ pop(r7);
    249   __ StoreP(r7, MemOperand(r6, 0));
    250   __ addi(r6, r6, Operand(kPointerSize));
    251   __ bind(&pop_loop_header);
    252   __ cmp(r5, sp);
    253   __ bne(&pop_loop);
    254 
    255   // Compute the output frame in the deoptimizer.
    256   __ push(r3);  // Preserve deoptimizer object across call.
    257   // r3: deoptimizer object; r4: scratch.
    258   __ PrepareCallCFunction(1, r4);
    259   // Call Deoptimizer::ComputeOutputFrames().
    260   {
    261     AllowExternalCallThatCantCauseGC scope(masm());
    262     __ CallCFunction(
    263         ExternalReference::compute_output_frames_function(isolate()), 1);
    264   }
    265   __ pop(r3);  // Restore deoptimizer object (class Deoptimizer).
    266 
    267   // Replace the current (input) frame with the output frames.
    268   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
    269   // Outer loop state: r7 = current "FrameDescription** output_",
    270   // r4 = one past the last FrameDescription**.
    271   __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
    272   __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset()));  // r7 is output_.
    273   __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2));
    274   __ add(r4, r7, r4);
    275   __ b(&outer_loop_header);
    276 
    277   __ bind(&outer_push_loop);
    278   // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
    279   __ LoadP(r5, MemOperand(r7, 0));  // output_[ix]
    280   __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
    281   __ b(&inner_loop_header);
    282 
    283   __ bind(&inner_push_loop);
    284   __ addi(r6, r6, Operand(-sizeof(intptr_t)));
    285   __ add(r9, r5, r6);
    286   __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
    287   __ push(r9);
    288 
    289   __ bind(&inner_loop_header);
    290   __ cmpi(r6, Operand::Zero());
    291   __ bne(&inner_push_loop);  // test for gt?
    292 
    293   __ addi(r7, r7, Operand(kPointerSize));
    294   __ bind(&outer_loop_header);
    295   __ cmp(r7, r4);
    296   __ blt(&outer_push_loop);
    297 
    298   __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
    299   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
    300     int code = config->GetAllocatableDoubleCode(i);
    301     const DoubleRegister dreg = DoubleRegister::from_code(code);
    302     int src_offset = code * kDoubleSize + double_regs_offset;
    303     __ lfd(dreg, MemOperand(r4, src_offset));
    304   }
    305 
    306   // Push state, pc, and continuation from the last output frame.
    307   __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset()));
    308   __ push(r9);
    309   __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
    310   __ push(r9);
    311   __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
    312   __ push(r9);
    313 
    314   // Restore the registers from the last output frame.
    315   DCHECK(!(ip.bit() & restored_regs));
    316   __ mr(ip, r5);
    317   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
    318     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
    319     if ((restored_regs & (1 << i)) != 0) {
    320       __ LoadP(ToRegister(i), MemOperand(ip, offset));
    321     }
    322   }
    323 
    324   __ InitializeRootRegister();
    325 
    326   __ pop(ip);  // get continuation, leave pc on stack
    327   __ pop(r0);
    328   __ mtlr(r0);
    329   __ Jump(ip);
    330   __ stop("Unreachable.");
    331 }
    332 
    333 
    334 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
    335   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
    336 
    337   // Create a sequence of deoptimization entries.
    338   // Note that registers are still live when jumping to an entry.
    339   Label done;
    340   for (int i = 0; i < count(); i++) {
    341     int start = masm()->pc_offset();
    342     USE(start);
    343     __ li(ip, Operand(i));
    344     __ b(&done);
    345     DCHECK(masm()->pc_offset() - start == table_entry_size_);
    346   }
    347   __ bind(&done);
    348   __ push(ip);
    349 }
    350 
    351 
    352 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
    353   SetFrameSlot(offset, value);
    354 }
    355 
    356 
    357 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
    358   SetFrameSlot(offset, value);
    359 }
    360 
    361 
    362 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
    363   DCHECK(FLAG_enable_embedded_constant_pool);
    364   SetFrameSlot(offset, value);
    365 }
    366 
    367 
    368 #undef __
    369 }  // namespace internal
    370 }  // namespace v8
    371