1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #if V8_TARGET_ARCH_IA32 6 7 #include "src/codegen.h" 8 #include "src/deoptimizer.h" 9 #include "src/full-codegen/full-codegen.h" 10 #include "src/ia32/frames-ia32.h" 11 #include "src/register-configuration.h" 12 #include "src/safepoint-table.h" 13 14 namespace v8 { 15 namespace internal { 16 17 const int Deoptimizer::table_entry_size_ = 10; 18 19 20 int Deoptimizer::patch_size() { 21 return Assembler::kCallInstructionLength; 22 } 23 24 25 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { 26 Isolate* isolate = code->GetIsolate(); 27 HandleScope scope(isolate); 28 29 // Compute the size of relocation information needed for the code 30 // patching in Deoptimizer::PatchCodeForDeoptimization below. 31 int min_reloc_size = 0; 32 int prev_pc_offset = 0; 33 DeoptimizationInputData* deopt_data = 34 DeoptimizationInputData::cast(code->deoptimization_data()); 35 for (int i = 0; i < deopt_data->DeoptCount(); i++) { 36 int pc_offset = deopt_data->Pc(i)->value(); 37 if (pc_offset == -1) continue; 38 pc_offset = pc_offset + 1; // We will encode the pc offset after the call. 39 DCHECK_GE(pc_offset, prev_pc_offset); 40 int pc_delta = pc_offset - prev_pc_offset; 41 // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes 42 // if encodable with small pc delta encoding and up to 6 bytes 43 // otherwise. 44 if (pc_delta <= RelocInfo::kMaxSmallPCDelta) { 45 min_reloc_size += 2; 46 } else { 47 min_reloc_size += 6; 48 } 49 prev_pc_offset = pc_offset; 50 } 51 52 // If the relocation information is not big enough we create a new 53 // relocation info object that is padded with comments to make it 54 // big enough for lazy doptimization. 55 int reloc_length = code->relocation_info()->length(); 56 if (min_reloc_size > reloc_length) { 57 int comment_reloc_size = RelocInfo::kMinRelocCommentSize; 58 // Padding needed. 59 int min_padding = min_reloc_size - reloc_length; 60 // Number of comments needed to take up at least that much space. 61 int additional_comments = 62 (min_padding + comment_reloc_size - 1) / comment_reloc_size; 63 // Actual padding size. 64 int padding = additional_comments * comment_reloc_size; 65 // Allocate new relocation info and copy old relocation to the end 66 // of the new relocation info array because relocation info is 67 // written and read backwards. 68 Factory* factory = isolate->factory(); 69 Handle<ByteArray> new_reloc = 70 factory->NewByteArray(reloc_length + padding, TENURED); 71 MemCopy(new_reloc->GetDataStartAddress() + padding, 72 code->relocation_info()->GetDataStartAddress(), reloc_length); 73 // Create a relocation writer to write the comments in the padding 74 // space. Use position 0 for everything to ensure short encoding. 75 RelocInfoWriter reloc_info_writer( 76 new_reloc->GetDataStartAddress() + padding, 0); 77 intptr_t comment_string 78 = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString); 79 RelocInfo rinfo(isolate, 0, RelocInfo::COMMENT, comment_string, NULL); 80 for (int i = 0; i < additional_comments; ++i) { 81 #ifdef DEBUG 82 byte* pos_before = reloc_info_writer.pos(); 83 #endif 84 reloc_info_writer.Write(&rinfo); 85 DCHECK(RelocInfo::kMinRelocCommentSize == 86 pos_before - reloc_info_writer.pos()); 87 } 88 // Replace relocation information on the code object. 89 code->set_relocation_info(*new_reloc); 90 } 91 } 92 93 94 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { 95 Address code_start_address = code->instruction_start(); 96 97 if (FLAG_zap_code_space) { 98 // Fail hard and early if we enter this code object again. 99 byte* pointer = code->FindCodeAgeSequence(); 100 if (pointer != NULL) { 101 pointer += kNoCodeAgeSequenceLength; 102 } else { 103 pointer = code->instruction_start(); 104 } 105 CodePatcher patcher(isolate, pointer, 1); 106 patcher.masm()->int3(); 107 108 DeoptimizationInputData* data = 109 DeoptimizationInputData::cast(code->deoptimization_data()); 110 int osr_offset = data->OsrPcOffset()->value(); 111 if (osr_offset > 0) { 112 CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset, 113 1); 114 osr_patcher.masm()->int3(); 115 } 116 } 117 118 // We will overwrite the code's relocation info in-place. Relocation info 119 // is written backward. The relocation info is the payload of a byte 120 // array. Later on we will slide this to the start of the byte array and 121 // create a filler object in the remaining space. 122 ByteArray* reloc_info = code->relocation_info(); 123 Address reloc_end_address = reloc_info->address() + reloc_info->Size(); 124 RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address); 125 126 // Since the call is a relative encoding, write new 127 // reloc info. We do not need any of the existing reloc info because the 128 // existing code will not be used again (we zap it in debug builds). 129 // 130 // Emit call to lazy deoptimization at all lazy deopt points. 131 DeoptimizationInputData* deopt_data = 132 DeoptimizationInputData::cast(code->deoptimization_data()); 133 #ifdef DEBUG 134 Address prev_call_address = NULL; 135 #endif 136 // For each LLazyBailout instruction insert a call to the corresponding 137 // deoptimization entry. 138 for (int i = 0; i < deopt_data->DeoptCount(); i++) { 139 if (deopt_data->Pc(i)->value() == -1) continue; 140 // Patch lazy deoptimization entry. 141 Address call_address = code_start_address + deopt_data->Pc(i)->value(); 142 CodePatcher patcher(isolate, call_address, patch_size()); 143 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); 144 patcher.masm()->call(deopt_entry, RelocInfo::NONE32); 145 // We use RUNTIME_ENTRY for deoptimization bailouts. 146 RelocInfo rinfo(isolate, call_address + 1, // 1 after the call opcode. 147 RelocInfo::RUNTIME_ENTRY, 148 reinterpret_cast<intptr_t>(deopt_entry), NULL); 149 reloc_info_writer.Write(&rinfo); 150 DCHECK_GE(reloc_info_writer.pos(), 151 reloc_info->address() + ByteArray::kHeaderSize); 152 DCHECK(prev_call_address == NULL || 153 call_address >= prev_call_address + patch_size()); 154 DCHECK(call_address + patch_size() <= code->instruction_end()); 155 #ifdef DEBUG 156 prev_call_address = call_address; 157 #endif 158 } 159 160 // Move the relocation info to the beginning of the byte array. 161 const int new_reloc_length = reloc_end_address - reloc_info_writer.pos(); 162 MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_length); 163 164 // Right trim the relocation info to free up remaining space. 165 const int delta = reloc_info->length() - new_reloc_length; 166 if (delta > 0) { 167 isolate->heap()->RightTrimFixedArray(reloc_info, delta); 168 } 169 } 170 171 172 void Deoptimizer::SetPlatformCompiledStubRegisters( 173 FrameDescription* output_frame, CodeStubDescriptor* descriptor) { 174 intptr_t handler = 175 reinterpret_cast<intptr_t>(descriptor->deoptimization_handler()); 176 int params = descriptor->GetHandlerParameterCount(); 177 output_frame->SetRegister(eax.code(), params); 178 output_frame->SetRegister(ebx.code(), handler); 179 } 180 181 182 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { 183 for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) { 184 Float64 double_value = input_->GetDoubleRegister(i); 185 output_frame->SetDoubleRegister(i, double_value); 186 } 187 } 188 189 #define __ masm()-> 190 191 void Deoptimizer::TableEntryGenerator::Generate() { 192 GeneratePrologue(); 193 194 // Save all general purpose registers before messing with them. 195 const int kNumberOfRegisters = Register::kNumRegisters; 196 197 const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters; 198 __ sub(esp, Immediate(kDoubleRegsSize)); 199 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft(); 200 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { 201 int code = config->GetAllocatableDoubleCode(i); 202 XMMRegister xmm_reg = XMMRegister::from_code(code); 203 int offset = code * kDoubleSize; 204 __ movsd(Operand(esp, offset), xmm_reg); 205 } 206 207 __ pushad(); 208 209 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate()); 210 __ mov(Operand::StaticVariable(c_entry_fp_address), ebp); 211 212 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + 213 kDoubleRegsSize; 214 215 // Get the bailout id from the stack. 216 __ mov(ebx, Operand(esp, kSavedRegistersAreaSize)); 217 218 // Get the address of the location in the code object 219 // and compute the fp-to-sp delta in register edx. 220 __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); 221 __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); 222 223 __ sub(edx, ebp); 224 __ neg(edx); 225 226 // Allocate a new deoptimizer object. 227 __ PrepareCallCFunction(6, eax); 228 __ mov(eax, Immediate(0)); 229 Label context_check; 230 __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset)); 231 __ JumpIfSmi(edi, &context_check); 232 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 233 __ bind(&context_check); 234 __ mov(Operand(esp, 0 * kPointerSize), eax); // Function. 235 __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type. 236 __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id. 237 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0. 238 __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. 239 __ mov(Operand(esp, 5 * kPointerSize), 240 Immediate(ExternalReference::isolate_address(isolate()))); 241 { 242 AllowExternalCallThatCantCauseGC scope(masm()); 243 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); 244 } 245 246 // Preserve deoptimizer object in register eax and get the input 247 // frame descriptor pointer. 248 __ mov(ebx, Operand(eax, Deoptimizer::input_offset())); 249 250 // Fill in the input registers. 251 for (int i = kNumberOfRegisters - 1; i >= 0; i--) { 252 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); 253 __ pop(Operand(ebx, offset)); 254 } 255 256 int double_regs_offset = FrameDescription::double_registers_offset(); 257 // Fill in the double input registers. 258 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { 259 int code = config->GetAllocatableDoubleCode(i); 260 int dst_offset = code * kDoubleSize + double_regs_offset; 261 int src_offset = code * kDoubleSize; 262 __ movsd(xmm0, Operand(esp, src_offset)); 263 __ movsd(Operand(ebx, dst_offset), xmm0); 264 } 265 266 // Clear FPU all exceptions. 267 // TODO(ulan): Find out why the TOP register is not zero here in some cases, 268 // and check that the generated code never deoptimizes with unbalanced stack. 269 __ fnclex(); 270 271 // Remove the bailout id, return address and the double registers. 272 __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize)); 273 274 // Compute a pointer to the unwinding limit in register ecx; that is 275 // the first stack slot not part of the input frame. 276 __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); 277 __ add(ecx, esp); 278 279 // Unwind the stack down to - but not including - the unwinding 280 // limit and copy the contents of the activation frame to the input 281 // frame description. 282 __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset())); 283 Label pop_loop_header; 284 __ jmp(&pop_loop_header); 285 Label pop_loop; 286 __ bind(&pop_loop); 287 __ pop(Operand(edx, 0)); 288 __ add(edx, Immediate(sizeof(uint32_t))); 289 __ bind(&pop_loop_header); 290 __ cmp(ecx, esp); 291 __ j(not_equal, &pop_loop); 292 293 // Compute the output frame in the deoptimizer. 294 __ push(eax); 295 __ PrepareCallCFunction(1, ebx); 296 __ mov(Operand(esp, 0 * kPointerSize), eax); 297 { 298 AllowExternalCallThatCantCauseGC scope(masm()); 299 __ CallCFunction( 300 ExternalReference::compute_output_frames_function(isolate()), 1); 301 } 302 __ pop(eax); 303 304 __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset())); 305 306 // Replace the current (input) frame with the output frames. 307 Label outer_push_loop, inner_push_loop, 308 outer_loop_header, inner_loop_header; 309 // Outer loop state: eax = current FrameDescription**, edx = one past the 310 // last FrameDescription**. 311 __ mov(edx, Operand(eax, Deoptimizer::output_count_offset())); 312 __ mov(eax, Operand(eax, Deoptimizer::output_offset())); 313 __ lea(edx, Operand(eax, edx, times_4, 0)); 314 __ jmp(&outer_loop_header); 315 __ bind(&outer_push_loop); 316 // Inner loop state: ebx = current FrameDescription*, ecx = loop index. 317 __ mov(ebx, Operand(eax, 0)); 318 __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); 319 __ jmp(&inner_loop_header); 320 __ bind(&inner_push_loop); 321 __ sub(ecx, Immediate(sizeof(uint32_t))); 322 __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); 323 __ bind(&inner_loop_header); 324 __ test(ecx, ecx); 325 __ j(not_zero, &inner_push_loop); 326 __ add(eax, Immediate(kPointerSize)); 327 __ bind(&outer_loop_header); 328 __ cmp(eax, edx); 329 __ j(below, &outer_push_loop); 330 331 // In case of a failed STUB, we have to restore the XMM registers. 332 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { 333 int code = config->GetAllocatableDoubleCode(i); 334 XMMRegister xmm_reg = XMMRegister::from_code(code); 335 int src_offset = code * kDoubleSize + double_regs_offset; 336 __ movsd(xmm_reg, Operand(ebx, src_offset)); 337 } 338 339 // Push state, pc, and continuation from the last output frame. 340 __ push(Operand(ebx, FrameDescription::state_offset())); 341 __ push(Operand(ebx, FrameDescription::pc_offset())); 342 __ push(Operand(ebx, FrameDescription::continuation_offset())); 343 344 345 // Push the registers from the last output frame. 346 for (int i = 0; i < kNumberOfRegisters; i++) { 347 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); 348 __ push(Operand(ebx, offset)); 349 } 350 351 // Restore the registers from the stack. 352 __ popad(); 353 354 // Return to the continuation point. 355 __ ret(0); 356 } 357 358 359 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { 360 // Create a sequence of deoptimization entries. 361 Label done; 362 for (int i = 0; i < count(); i++) { 363 int start = masm()->pc_offset(); 364 USE(start); 365 __ push_imm32(i); 366 __ jmp(&done); 367 DCHECK(masm()->pc_offset() - start == table_entry_size_); 368 } 369 __ bind(&done); 370 } 371 372 373 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { 374 SetFrameSlot(offset, value); 375 } 376 377 378 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { 379 SetFrameSlot(offset, value); 380 } 381 382 383 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { 384 // No embedded constant pool support. 385 UNREACHABLE(); 386 } 387 388 389 #undef __ 390 391 392 } // namespace internal 393 } // namespace v8 394 395 #endif // V8_TARGET_ARCH_IA32 396