1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #if V8_TARGET_ARCH_X87 6 7 #include "src/crankshaft/x87/lithium-codegen-x87.h" 8 9 #include "src/base/bits.h" 10 #include "src/builtins/builtins-constructor.h" 11 #include "src/code-factory.h" 12 #include "src/code-stubs.h" 13 #include "src/codegen.h" 14 #include "src/crankshaft/hydrogen-osr.h" 15 #include "src/deoptimizer.h" 16 #include "src/ic/ic.h" 17 #include "src/ic/stub-cache.h" 18 #include "src/x87/frames-x87.h" 19 20 namespace v8 { 21 namespace internal { 22 23 // When invoking builtins, we need to record the safepoint in the middle of 24 // the invoke instruction sequence generated by the macro assembler. 25 class SafepointGenerator final : public CallWrapper { 26 public: 27 SafepointGenerator(LCodeGen* codegen, 28 LPointerMap* pointers, 29 Safepoint::DeoptMode mode) 30 : codegen_(codegen), 31 pointers_(pointers), 32 deopt_mode_(mode) {} 33 virtual ~SafepointGenerator() {} 34 35 void BeforeCall(int call_size) const override {} 36 37 void AfterCall() const override { 38 codegen_->RecordSafepoint(pointers_, deopt_mode_); 39 } 40 41 private: 42 LCodeGen* codegen_; 43 LPointerMap* pointers_; 44 Safepoint::DeoptMode deopt_mode_; 45 }; 46 47 48 #define __ masm()-> 49 50 bool LCodeGen::GenerateCode() { 51 LPhase phase("Z_Code generation", chunk()); 52 DCHECK(is_unused()); 53 status_ = GENERATING; 54 55 // Open a frame scope to indicate that there is a frame on the stack. The 56 // MANUAL indicates that the scope shouldn't actually generate code to set up 57 // the frame (that is done in GeneratePrologue). 58 FrameScope frame_scope(masm_, StackFrame::MANUAL); 59 60 return GeneratePrologue() && 61 GenerateBody() && 62 GenerateDeferredCode() && 63 GenerateJumpTable() && 64 GenerateSafepointTable(); 65 } 66 67 68 void LCodeGen::FinishCode(Handle<Code> code) { 69 DCHECK(is_done()); 70 code->set_stack_slots(GetTotalFrameSlotCount()); 71 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 72 PopulateDeoptimizationData(code); 73 if (info()->ShouldEnsureSpaceForLazyDeopt()) { 74 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); 75 } 76 } 77 78 79 #ifdef _MSC_VER 80 void LCodeGen::MakeSureStackPagesMapped(int offset) { 81 const int kPageSize = 4 * KB; 82 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { 83 __ mov(Operand(esp, offset), eax); 84 } 85 } 86 #endif 87 88 89 bool LCodeGen::GeneratePrologue() { 90 DCHECK(is_generating()); 91 92 if (info()->IsOptimizing()) { 93 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 94 } 95 96 info()->set_prologue_offset(masm_->pc_offset()); 97 if (NeedsEagerFrame()) { 98 DCHECK(!frame_is_built_); 99 frame_is_built_ = true; 100 if (info()->IsStub()) { 101 __ StubPrologue(StackFrame::STUB); 102 } else { 103 __ Prologue(info()->GeneratePreagedPrologue()); 104 } 105 } 106 107 // Reserve space for the stack slots needed by the code. 108 int slots = GetStackSlotCount(); 109 DCHECK(slots != 0 || !info()->IsOptimizing()); 110 if (slots > 0) { 111 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 112 #ifdef _MSC_VER 113 MakeSureStackPagesMapped(slots * kPointerSize); 114 #endif 115 if (FLAG_debug_code) { 116 __ push(eax); 117 __ mov(Operand(eax), Immediate(slots)); 118 Label loop; 119 __ bind(&loop); 120 __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue)); 121 __ dec(eax); 122 __ j(not_zero, &loop); 123 __ pop(eax); 124 } 125 } 126 127 // Initailize FPU state. 128 __ fninit(); 129 130 return !is_aborted(); 131 } 132 133 134 void LCodeGen::DoPrologue(LPrologue* instr) { 135 Comment(";;; Prologue begin"); 136 137 // Possibly allocate a local context. 138 if (info_->scope()->NeedsContext()) { 139 Comment(";;; Allocate local context"); 140 bool need_write_barrier = true; 141 // Argument to NewContext is the function, which is still in edi. 142 int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 143 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; 144 if (info()->scope()->is_script_scope()) { 145 __ push(edi); 146 __ Push(info()->scope()->scope_info()); 147 __ CallRuntime(Runtime::kNewScriptContext); 148 deopt_mode = Safepoint::kLazyDeopt; 149 } else { 150 if (slots <= 151 ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) { 152 Callable callable = CodeFactory::FastNewFunctionContext( 153 isolate(), info()->scope()->scope_type()); 154 __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), 155 Immediate(slots)); 156 __ Call(callable.code(), RelocInfo::CODE_TARGET); 157 // Result of the FastNewFunctionContext builtin is always in new space. 158 need_write_barrier = false; 159 } else { 160 __ Push(edi); 161 __ Push(Smi::FromInt(info()->scope()->scope_type())); 162 __ CallRuntime(Runtime::kNewFunctionContext); 163 } 164 } 165 RecordSafepoint(deopt_mode); 166 167 // Context is returned in eax. It replaces the context passed to us. 168 // It's saved in the stack and kept live in esi. 169 __ mov(esi, eax); 170 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax); 171 172 // Copy parameters into context if necessary. 173 int num_parameters = info()->scope()->num_parameters(); 174 int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; 175 for (int i = first_parameter; i < num_parameters; i++) { 176 Variable* var = (i == -1) ? info()->scope()->receiver() 177 : info()->scope()->parameter(i); 178 if (var->IsContextSlot()) { 179 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 180 (num_parameters - 1 - i) * kPointerSize; 181 // Load parameter from stack. 182 __ mov(eax, Operand(ebp, parameter_offset)); 183 // Store it in the context. 184 int context_offset = Context::SlotOffset(var->index()); 185 __ mov(Operand(esi, context_offset), eax); 186 // Update the write barrier. This clobbers eax and ebx. 187 if (need_write_barrier) { 188 __ RecordWriteContextSlot(esi, context_offset, eax, ebx, 189 kDontSaveFPRegs); 190 } else if (FLAG_debug_code) { 191 Label done; 192 __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); 193 __ Abort(kExpectedNewSpaceObject); 194 __ bind(&done); 195 } 196 } 197 } 198 Comment(";;; End allocate local context"); 199 } 200 201 Comment(";;; Prologue end"); 202 } 203 204 205 void LCodeGen::GenerateOsrPrologue() { 206 // Generate the OSR entry prologue at the first unknown OSR value, or if there 207 // are none, at the OSR entrypoint instruction. 208 if (osr_pc_offset_ >= 0) return; 209 210 osr_pc_offset_ = masm()->pc_offset(); 211 212 // Interpreter is the first tier compiler now. It will run the code generated 213 // by TurboFan compiler which will always put "1" on x87 FPU stack. 214 // This behavior will affect crankshaft's x87 FPU stack depth check under 215 // debug mode. 216 // Need to reset the FPU stack here for this scenario. 217 __ fninit(); 218 219 // Adjust the frame size, subsuming the unoptimized frame into the 220 // optimized frame. 221 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 222 DCHECK(slots >= 0); 223 __ sub(esp, Immediate(slots * kPointerSize)); 224 } 225 226 227 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 228 if (instr->IsCall()) { 229 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 230 } 231 if (!instr->IsLazyBailout() && !instr->IsGap()) { 232 safepoints_.BumpLastLazySafepointIndex(); 233 } 234 FlushX87StackIfNecessary(instr); 235 } 236 237 238 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { 239 // When return from function call, FPU should be initialized again. 240 if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) { 241 bool double_result = instr->HasDoubleRegisterResult(); 242 if (double_result) { 243 __ lea(esp, Operand(esp, -kDoubleSize)); 244 __ fstp_d(Operand(esp, 0)); 245 } 246 __ fninit(); 247 if (double_result) { 248 __ fld_d(Operand(esp, 0)); 249 __ lea(esp, Operand(esp, kDoubleSize)); 250 } 251 } 252 if (instr->IsGoto()) { 253 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this); 254 } else if (FLAG_debug_code && FLAG_enable_slow_asserts && 255 !instr->IsGap() && !instr->IsReturn()) { 256 if (instr->ClobbersDoubleRegisters(isolate())) { 257 if (instr->HasDoubleRegisterResult()) { 258 DCHECK_EQ(1, x87_stack_.depth()); 259 } else { 260 DCHECK_EQ(0, x87_stack_.depth()); 261 } 262 } 263 __ VerifyX87StackDepth(x87_stack_.depth()); 264 } 265 } 266 267 268 bool LCodeGen::GenerateJumpTable() { 269 if (!jump_table_.length()) return !is_aborted(); 270 271 Label needs_frame; 272 Comment(";;; -------------------- Jump table --------------------"); 273 274 for (int i = 0; i < jump_table_.length(); i++) { 275 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; 276 __ bind(&table_entry->label); 277 Address entry = table_entry->address; 278 DeoptComment(table_entry->deopt_info); 279 if (table_entry->needs_frame) { 280 DCHECK(!info()->saves_caller_doubles()); 281 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); 282 __ call(&needs_frame); 283 } else { 284 __ call(entry, RelocInfo::RUNTIME_ENTRY); 285 } 286 } 287 if (needs_frame.is_linked()) { 288 __ bind(&needs_frame); 289 /* stack layout 290 3: entry address 291 2: return address <-- esp 292 1: garbage 293 0: garbage 294 */ 295 __ push(MemOperand(esp, 0)); // Copy return address. 296 __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address. 297 298 /* stack layout 299 4: entry address 300 3: return address 301 1: return address 302 0: entry address <-- esp 303 */ 304 __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp. 305 // Fill ebp with the right stack frame address. 306 __ lea(ebp, MemOperand(esp, 3 * kPointerSize)); 307 308 // This variant of deopt can only be used with stubs. Since we don't 309 // have a function pointer to install in the stack frame that we're 310 // building, install a special marker there instead. 311 DCHECK(info()->IsStub()); 312 __ mov(MemOperand(esp, 2 * kPointerSize), 313 Immediate(Smi::FromInt(StackFrame::STUB))); 314 315 /* stack layout 316 3: old ebp 317 2: stub marker 318 1: return address 319 0: entry address <-- esp 320 */ 321 __ ret(0); // Call the continuation without clobbering registers. 322 } 323 return !is_aborted(); 324 } 325 326 327 bool LCodeGen::GenerateDeferredCode() { 328 DCHECK(is_generating()); 329 if (deferred_.length() > 0) { 330 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 331 LDeferredCode* code = deferred_[i]; 332 X87Stack copy(code->x87_stack()); 333 x87_stack_ = copy; 334 335 HValue* value = 336 instructions_->at(code->instruction_index())->hydrogen_value(); 337 RecordAndWritePosition(value->position()); 338 339 Comment(";;; <@%d,#%d> " 340 "-------------------- Deferred %s --------------------", 341 code->instruction_index(), 342 code->instr()->hydrogen_value()->id(), 343 code->instr()->Mnemonic()); 344 __ bind(code->entry()); 345 if (NeedsDeferredFrame()) { 346 Comment(";;; Build frame"); 347 DCHECK(!frame_is_built_); 348 DCHECK(info()->IsStub()); 349 frame_is_built_ = true; 350 // Build the frame in such a way that esi isn't trashed. 351 __ push(ebp); // Caller's frame pointer. 352 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); 353 __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp)); 354 Comment(";;; Deferred code"); 355 } 356 code->Generate(); 357 if (NeedsDeferredFrame()) { 358 __ bind(code->done()); 359 Comment(";;; Destroy frame"); 360 DCHECK(frame_is_built_); 361 frame_is_built_ = false; 362 __ mov(esp, ebp); 363 __ pop(ebp); 364 } 365 __ jmp(code->exit()); 366 } 367 } 368 369 // Deferred code is the last part of the instruction sequence. Mark 370 // the generated code as done unless we bailed out. 371 if (!is_aborted()) status_ = DONE; 372 return !is_aborted(); 373 } 374 375 376 bool LCodeGen::GenerateSafepointTable() { 377 DCHECK(is_done()); 378 if (info()->ShouldEnsureSpaceForLazyDeopt()) { 379 // For lazy deoptimization we need space to patch a call after every call. 380 // Ensure there is always space for such patching, even if the code ends 381 // in a call. 382 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); 383 while (masm()->pc_offset() < target_offset) { 384 masm()->nop(); 385 } 386 } 387 safepoints_.Emit(masm(), GetTotalFrameSlotCount()); 388 return !is_aborted(); 389 } 390 391 392 Register LCodeGen::ToRegister(int code) const { 393 return Register::from_code(code); 394 } 395 396 397 X87Register LCodeGen::ToX87Register(int code) const { 398 return X87Register::from_code(code); 399 } 400 401 402 void LCodeGen::X87LoadForUsage(X87Register reg) { 403 DCHECK(x87_stack_.Contains(reg)); 404 x87_stack_.Fxch(reg); 405 x87_stack_.pop(); 406 } 407 408 409 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { 410 DCHECK(x87_stack_.Contains(reg1)); 411 DCHECK(x87_stack_.Contains(reg2)); 412 if (reg1.is(reg2) && x87_stack_.depth() == 1) { 413 __ fld(x87_stack_.st(reg1)); 414 x87_stack_.push(reg1); 415 x87_stack_.pop(); 416 x87_stack_.pop(); 417 } else { 418 x87_stack_.Fxch(reg1, 1); 419 x87_stack_.Fxch(reg2); 420 x87_stack_.pop(); 421 x87_stack_.pop(); 422 } 423 } 424 425 426 int LCodeGen::X87Stack::GetLayout() { 427 int layout = stack_depth_; 428 for (int i = 0; i < stack_depth_; i++) { 429 layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3)); 430 } 431 432 return layout; 433 } 434 435 436 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { 437 DCHECK(is_mutable_); 438 DCHECK(Contains(reg) && stack_depth_ > other_slot); 439 int i = ArrayIndex(reg); 440 int st = st2idx(i); 441 if (st != other_slot) { 442 int other_i = st2idx(other_slot); 443 X87Register other = stack_[other_i]; 444 stack_[other_i] = reg; 445 stack_[i] = other; 446 if (st == 0) { 447 __ fxch(other_slot); 448 } else if (other_slot == 0) { 449 __ fxch(st); 450 } else { 451 __ fxch(st); 452 __ fxch(other_slot); 453 __ fxch(st); 454 } 455 } 456 } 457 458 459 int LCodeGen::X87Stack::st2idx(int pos) { 460 return stack_depth_ - pos - 1; 461 } 462 463 464 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { 465 for (int i = 0; i < stack_depth_; i++) { 466 if (stack_[i].is(reg)) return i; 467 } 468 UNREACHABLE(); 469 return -1; 470 } 471 472 473 bool LCodeGen::X87Stack::Contains(X87Register reg) { 474 for (int i = 0; i < stack_depth_; i++) { 475 if (stack_[i].is(reg)) return true; 476 } 477 return false; 478 } 479 480 481 void LCodeGen::X87Stack::Free(X87Register reg) { 482 DCHECK(is_mutable_); 483 DCHECK(Contains(reg)); 484 int i = ArrayIndex(reg); 485 int st = st2idx(i); 486 if (st > 0) { 487 // keep track of how fstp(i) changes the order of elements 488 int tos_i = st2idx(0); 489 stack_[i] = stack_[tos_i]; 490 } 491 pop(); 492 __ fstp(st); 493 } 494 495 496 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { 497 if (x87_stack_.Contains(dst)) { 498 x87_stack_.Fxch(dst); 499 __ fstp(0); 500 } else { 501 x87_stack_.push(dst); 502 } 503 X87Fld(src, opts); 504 } 505 506 507 void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) { 508 if (x87_stack_.Contains(dst)) { 509 x87_stack_.Fxch(dst); 510 __ fstp(0); 511 x87_stack_.pop(); 512 // Push ST(i) onto the FPU register stack 513 __ fld(x87_stack_.st(src)); 514 x87_stack_.push(dst); 515 } else { 516 // Push ST(i) onto the FPU register stack 517 __ fld(x87_stack_.st(src)); 518 x87_stack_.push(dst); 519 } 520 } 521 522 523 void LCodeGen::X87Fld(Operand src, X87OperandType opts) { 524 DCHECK(!src.is_reg_only()); 525 switch (opts) { 526 case kX87DoubleOperand: 527 __ fld_d(src); 528 break; 529 case kX87FloatOperand: 530 __ fld_s(src); 531 break; 532 case kX87IntOperand: 533 __ fild_s(src); 534 break; 535 default: 536 UNREACHABLE(); 537 } 538 } 539 540 541 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { 542 DCHECK(!dst.is_reg_only()); 543 x87_stack_.Fxch(src); 544 switch (opts) { 545 case kX87DoubleOperand: 546 __ fst_d(dst); 547 break; 548 case kX87FloatOperand: 549 __ fst_s(dst); 550 break; 551 case kX87IntOperand: 552 __ fist_s(dst); 553 break; 554 default: 555 UNREACHABLE(); 556 } 557 } 558 559 560 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { 561 DCHECK(is_mutable_); 562 if (Contains(reg)) { 563 Free(reg); 564 } 565 // Mark this register as the next register to write to 566 stack_[stack_depth_] = reg; 567 } 568 569 570 void LCodeGen::X87Stack::CommitWrite(X87Register reg) { 571 DCHECK(is_mutable_); 572 // Assert the reg is prepared to write, but not on the virtual stack yet 573 DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) && 574 stack_depth_ < X87Register::kMaxNumAllocatableRegisters); 575 stack_depth_++; 576 } 577 578 579 void LCodeGen::X87PrepareBinaryOp( 580 X87Register left, X87Register right, X87Register result) { 581 // You need to use DefineSameAsFirst for x87 instructions 582 DCHECK(result.is(left)); 583 x87_stack_.Fxch(right, 1); 584 x87_stack_.Fxch(left); 585 } 586 587 588 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { 589 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) { 590 bool double_inputs = instr->HasDoubleRegisterInput(); 591 592 // Flush stack from tos down, since FreeX87() will mess with tos 593 for (int i = stack_depth_-1; i >= 0; i--) { 594 X87Register reg = stack_[i]; 595 // Skip registers which contain the inputs for the next instruction 596 // when flushing the stack 597 if (double_inputs && instr->IsDoubleInput(reg, cgen)) { 598 continue; 599 } 600 Free(reg); 601 if (i < stack_depth_-1) i++; 602 } 603 } 604 if (instr->IsReturn()) { 605 while (stack_depth_ > 0) { 606 __ fstp(0); 607 stack_depth_--; 608 } 609 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); 610 } 611 } 612 613 614 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr, 615 LCodeGen* cgen) { 616 // For going to a joined block, an explicit LClobberDoubles is inserted before 617 // LGoto. Because all used x87 registers are spilled to stack slots. The 618 // ResolvePhis phase of register allocator could guarantee the two input's x87 619 // stacks have the same layout. So don't check stack_depth_ <= 1 here. 620 int goto_block_id = goto_instr->block_id(); 621 if (current_block_id + 1 != goto_block_id) { 622 // If we have a value on the x87 stack on leaving a block, it must be a 623 // phi input. If the next block we compile is not the join block, we have 624 // to discard the stack state. 625 // Before discarding the stack state, we need to save it if the "goto block" 626 // has unreachable last predecessor when FLAG_unreachable_code_elimination. 627 if (FLAG_unreachable_code_elimination) { 628 int length = goto_instr->block()->predecessors()->length(); 629 bool has_unreachable_last_predecessor = false; 630 for (int i = 0; i < length; i++) { 631 HBasicBlock* block = goto_instr->block()->predecessors()->at(i); 632 if (block->IsUnreachable() && 633 (block->block_id() + 1) == goto_block_id) { 634 has_unreachable_last_predecessor = true; 635 } 636 } 637 if (has_unreachable_last_predecessor) { 638 if (cgen->x87_stack_map_.find(goto_block_id) == 639 cgen->x87_stack_map_.end()) { 640 X87Stack* stack = new (cgen->zone()) X87Stack(*this); 641 cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack)); 642 } 643 } 644 } 645 646 // Discard the stack state. 647 stack_depth_ = 0; 648 } 649 } 650 651 652 void LCodeGen::EmitFlushX87ForDeopt() { 653 // The deoptimizer does not support X87 Registers. But as long as we 654 // deopt from a stub its not a problem, since we will re-materialize the 655 // original stub inputs, which can't be double registers. 656 // DCHECK(info()->IsStub()); 657 if (FLAG_debug_code && FLAG_enable_slow_asserts) { 658 __ pushfd(); 659 __ VerifyX87StackDepth(x87_stack_.depth()); 660 __ popfd(); 661 } 662 663 // Flush X87 stack in the deoptimizer entry. 664 } 665 666 667 Register LCodeGen::ToRegister(LOperand* op) const { 668 DCHECK(op->IsRegister()); 669 return ToRegister(op->index()); 670 } 671 672 673 X87Register LCodeGen::ToX87Register(LOperand* op) const { 674 DCHECK(op->IsDoubleRegister()); 675 return ToX87Register(op->index()); 676 } 677 678 679 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 680 return ToRepresentation(op, Representation::Integer32()); 681 } 682 683 684 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 685 const Representation& r) const { 686 HConstant* constant = chunk_->LookupConstant(op); 687 if (r.IsExternal()) { 688 return reinterpret_cast<int32_t>( 689 constant->ExternalReferenceValue().address()); 690 } 691 int32_t value = constant->Integer32Value(); 692 if (r.IsInteger32()) return value; 693 DCHECK(r.IsSmiOrTagged()); 694 return reinterpret_cast<int32_t>(Smi::FromInt(value)); 695 } 696 697 698 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 699 HConstant* constant = chunk_->LookupConstant(op); 700 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 701 return constant->handle(isolate()); 702 } 703 704 705 double LCodeGen::ToDouble(LConstantOperand* op) const { 706 HConstant* constant = chunk_->LookupConstant(op); 707 DCHECK(constant->HasDoubleValue()); 708 return constant->DoubleValue(); 709 } 710 711 712 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { 713 HConstant* constant = chunk_->LookupConstant(op); 714 DCHECK(constant->HasExternalReferenceValue()); 715 return constant->ExternalReferenceValue(); 716 } 717 718 719 bool LCodeGen::IsInteger32(LConstantOperand* op) const { 720 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 721 } 722 723 724 bool LCodeGen::IsSmi(LConstantOperand* op) const { 725 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 726 } 727 728 729 static int ArgumentsOffsetWithoutFrame(int index) { 730 DCHECK(index < 0); 731 return -(index + 1) * kPointerSize + kPCOnStackSize; 732 } 733 734 735 Operand LCodeGen::ToOperand(LOperand* op) const { 736 if (op->IsRegister()) return Operand(ToRegister(op)); 737 DCHECK(!op->IsDoubleRegister()); 738 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); 739 if (NeedsEagerFrame()) { 740 return Operand(ebp, FrameSlotToFPOffset(op->index())); 741 } else { 742 // Retrieve parameter without eager stack-frame relative to the 743 // stack-pointer. 744 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); 745 } 746 } 747 748 749 Operand LCodeGen::HighOperand(LOperand* op) { 750 DCHECK(op->IsDoubleStackSlot()); 751 if (NeedsEagerFrame()) { 752 return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize); 753 } else { 754 // Retrieve parameter without eager stack-frame relative to the 755 // stack-pointer. 756 return Operand( 757 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); 758 } 759 } 760 761 762 void LCodeGen::WriteTranslation(LEnvironment* environment, 763 Translation* translation) { 764 if (environment == NULL) return; 765 766 // The translation includes one command per value in the environment. 767 int translation_size = environment->translation_size(); 768 769 WriteTranslation(environment->outer(), translation); 770 WriteTranslationFrame(environment, translation); 771 772 int object_index = 0; 773 int dematerialized_index = 0; 774 for (int i = 0; i < translation_size; ++i) { 775 LOperand* value = environment->values()->at(i); 776 AddToTranslation(environment, 777 translation, 778 value, 779 environment->HasTaggedValueAt(i), 780 environment->HasUint32ValueAt(i), 781 &object_index, 782 &dematerialized_index); 783 } 784 } 785 786 787 void LCodeGen::AddToTranslation(LEnvironment* environment, 788 Translation* translation, 789 LOperand* op, 790 bool is_tagged, 791 bool is_uint32, 792 int* object_index_pointer, 793 int* dematerialized_index_pointer) { 794 if (op == LEnvironment::materialization_marker()) { 795 int object_index = (*object_index_pointer)++; 796 if (environment->ObjectIsDuplicateAt(object_index)) { 797 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 798 translation->DuplicateObject(dupe_of); 799 return; 800 } 801 int object_length = environment->ObjectLengthAt(object_index); 802 if (environment->ObjectIsArgumentsAt(object_index)) { 803 translation->BeginArgumentsObject(object_length); 804 } else { 805 translation->BeginCapturedObject(object_length); 806 } 807 int dematerialized_index = *dematerialized_index_pointer; 808 int env_offset = environment->translation_size() + dematerialized_index; 809 *dematerialized_index_pointer += object_length; 810 for (int i = 0; i < object_length; ++i) { 811 LOperand* value = environment->values()->at(env_offset + i); 812 AddToTranslation(environment, 813 translation, 814 value, 815 environment->HasTaggedValueAt(env_offset + i), 816 environment->HasUint32ValueAt(env_offset + i), 817 object_index_pointer, 818 dematerialized_index_pointer); 819 } 820 return; 821 } 822 823 if (op->IsStackSlot()) { 824 int index = op->index(); 825 if (is_tagged) { 826 translation->StoreStackSlot(index); 827 } else if (is_uint32) { 828 translation->StoreUint32StackSlot(index); 829 } else { 830 translation->StoreInt32StackSlot(index); 831 } 832 } else if (op->IsDoubleStackSlot()) { 833 int index = op->index(); 834 translation->StoreDoubleStackSlot(index); 835 } else if (op->IsRegister()) { 836 Register reg = ToRegister(op); 837 if (is_tagged) { 838 translation->StoreRegister(reg); 839 } else if (is_uint32) { 840 translation->StoreUint32Register(reg); 841 } else { 842 translation->StoreInt32Register(reg); 843 } 844 } else if (op->IsDoubleRegister()) { 845 X87Register reg = ToX87Register(op); 846 translation->StoreDoubleRegister(reg); 847 } else if (op->IsConstantOperand()) { 848 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 849 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 850 translation->StoreLiteral(src_index); 851 } else { 852 UNREACHABLE(); 853 } 854 } 855 856 857 void LCodeGen::CallCodeGeneric(Handle<Code> code, 858 RelocInfo::Mode mode, 859 LInstruction* instr, 860 SafepointMode safepoint_mode) { 861 DCHECK(instr != NULL); 862 __ call(code, mode); 863 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 864 865 // Signal that we don't inline smi code before these stubs in the 866 // optimizing code generator. 867 if (code->kind() == Code::BINARY_OP_IC || 868 code->kind() == Code::COMPARE_IC) { 869 __ nop(); 870 } 871 } 872 873 874 void LCodeGen::CallCode(Handle<Code> code, 875 RelocInfo::Mode mode, 876 LInstruction* instr) { 877 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); 878 } 879 880 881 void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc, 882 LInstruction* instr, SaveFPRegsMode save_doubles) { 883 DCHECK(instr != NULL); 884 DCHECK(instr->HasPointerMap()); 885 886 __ CallRuntime(fun, argc, save_doubles); 887 888 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 889 890 DCHECK(info()->is_calling()); 891 } 892 893 894 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 895 if (context->IsRegister()) { 896 if (!ToRegister(context).is(esi)) { 897 __ mov(esi, ToRegister(context)); 898 } 899 } else if (context->IsStackSlot()) { 900 __ mov(esi, ToOperand(context)); 901 } else if (context->IsConstantOperand()) { 902 HConstant* constant = 903 chunk_->LookupConstant(LConstantOperand::cast(context)); 904 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate()))); 905 } else { 906 UNREACHABLE(); 907 } 908 } 909 910 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 911 int argc, 912 LInstruction* instr, 913 LOperand* context) { 914 LoadContextFromDeferred(context); 915 916 __ CallRuntimeSaveDoubles(id); 917 RecordSafepointWithRegisters( 918 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 919 920 DCHECK(info()->is_calling()); 921 } 922 923 924 void LCodeGen::RegisterEnvironmentForDeoptimization( 925 LEnvironment* environment, Safepoint::DeoptMode mode) { 926 environment->set_has_been_used(); 927 if (!environment->HasBeenRegistered()) { 928 // Physical stack frame layout: 929 // -x ............. -4 0 ..................................... y 930 // [incoming arguments] [spill slots] [pushed outgoing arguments] 931 932 // Layout of the environment: 933 // 0 ..................................................... size-1 934 // [parameters] [locals] [expression stack including arguments] 935 936 // Layout of the translation: 937 // 0 ........................................................ size - 1 + 4 938 // [expression stack including arguments] [locals] [4 words] [parameters] 939 // |>------------ translation_size ------------<| 940 941 int frame_count = 0; 942 int jsframe_count = 0; 943 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 944 ++frame_count; 945 if (e->frame_type() == JS_FUNCTION) { 946 ++jsframe_count; 947 } 948 } 949 Translation translation(&translations_, frame_count, jsframe_count, zone()); 950 WriteTranslation(environment, &translation); 951 int deoptimization_index = deoptimizations_.length(); 952 int pc_offset = masm()->pc_offset(); 953 environment->Register(deoptimization_index, 954 translation.index(), 955 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 956 deoptimizations_.Add(environment, zone()); 957 } 958 } 959 960 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, 961 DeoptimizeReason deopt_reason, 962 Deoptimizer::BailoutType bailout_type) { 963 LEnvironment* environment = instr->environment(); 964 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 965 DCHECK(environment->HasBeenRegistered()); 966 int id = environment->deoptimization_index(); 967 Address entry = 968 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 969 if (entry == NULL) { 970 Abort(kBailoutWasNotPrepared); 971 return; 972 } 973 974 if (DeoptEveryNTimes()) { 975 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 976 Label no_deopt; 977 __ pushfd(); 978 __ push(eax); 979 __ mov(eax, Operand::StaticVariable(count)); 980 __ sub(eax, Immediate(1)); 981 __ j(not_zero, &no_deopt, Label::kNear); 982 if (FLAG_trap_on_deopt) __ int3(); 983 __ mov(eax, Immediate(FLAG_deopt_every_n_times)); 984 __ mov(Operand::StaticVariable(count), eax); 985 __ pop(eax); 986 __ popfd(); 987 DCHECK(frame_is_built_); 988 // Put the x87 stack layout in TOS. 989 if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt(); 990 __ push(Immediate(x87_stack_.GetLayout())); 991 __ fild_s(MemOperand(esp, 0)); 992 // Don't touch eflags. 993 __ lea(esp, Operand(esp, kPointerSize)); 994 __ call(entry, RelocInfo::RUNTIME_ENTRY); 995 __ bind(&no_deopt); 996 __ mov(Operand::StaticVariable(count), eax); 997 __ pop(eax); 998 __ popfd(); 999 } 1000 1001 // Put the x87 stack layout in TOS, so that we can save x87 fp registers in 1002 // the correct location. 1003 { 1004 Label done; 1005 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); 1006 if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt(); 1007 1008 int x87_stack_layout = x87_stack_.GetLayout(); 1009 __ push(Immediate(x87_stack_layout)); 1010 __ fild_s(MemOperand(esp, 0)); 1011 // Don't touch eflags. 1012 __ lea(esp, Operand(esp, kPointerSize)); 1013 __ bind(&done); 1014 } 1015 1016 if (info()->ShouldTrapOnDeopt()) { 1017 Label done; 1018 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); 1019 __ int3(); 1020 __ bind(&done); 1021 } 1022 1023 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); 1024 1025 DCHECK(info()->IsStub() || frame_is_built_); 1026 if (cc == no_condition && frame_is_built_) { 1027 DeoptComment(deopt_info); 1028 __ call(entry, RelocInfo::RUNTIME_ENTRY); 1029 } else { 1030 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, 1031 !frame_is_built_); 1032 // We often have several deopts to the same entry, reuse the last 1033 // jump entry if this is the case. 1034 if (FLAG_trace_deopt || isolate()->is_profiling() || 1035 jump_table_.is_empty() || 1036 !table_entry.IsEquivalentTo(jump_table_.last())) { 1037 jump_table_.Add(table_entry, zone()); 1038 } 1039 if (cc == no_condition) { 1040 __ jmp(&jump_table_.last().label); 1041 } else { 1042 __ j(cc, &jump_table_.last().label); 1043 } 1044 } 1045 } 1046 1047 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, 1048 DeoptimizeReason deopt_reason) { 1049 Deoptimizer::BailoutType bailout_type = info()->IsStub() 1050 ? Deoptimizer::LAZY 1051 : Deoptimizer::EAGER; 1052 DeoptimizeIf(cc, instr, deopt_reason, bailout_type); 1053 } 1054 1055 1056 void LCodeGen::RecordSafepointWithLazyDeopt( 1057 LInstruction* instr, SafepointMode safepoint_mode) { 1058 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 1059 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 1060 } else { 1061 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 1062 RecordSafepointWithRegisters( 1063 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 1064 } 1065 } 1066 1067 1068 void LCodeGen::RecordSafepoint( 1069 LPointerMap* pointers, 1070 Safepoint::Kind kind, 1071 int arguments, 1072 Safepoint::DeoptMode deopt_mode) { 1073 DCHECK(kind == expected_safepoint_kind_); 1074 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 1075 Safepoint safepoint = 1076 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); 1077 for (int i = 0; i < operands->length(); i++) { 1078 LOperand* pointer = operands->at(i); 1079 if (pointer->IsStackSlot()) { 1080 safepoint.DefinePointerSlot(pointer->index(), zone()); 1081 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 1082 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 1083 } 1084 } 1085 } 1086 1087 1088 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 1089 Safepoint::DeoptMode mode) { 1090 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode); 1091 } 1092 1093 1094 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) { 1095 LPointerMap empty_pointers(zone()); 1096 RecordSafepoint(&empty_pointers, mode); 1097 } 1098 1099 1100 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 1101 int arguments, 1102 Safepoint::DeoptMode mode) { 1103 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode); 1104 } 1105 1106 1107 static const char* LabelType(LLabel* label) { 1108 if (label->is_loop_header()) return " (loop header)"; 1109 if (label->is_osr_entry()) return " (OSR entry)"; 1110 return ""; 1111 } 1112 1113 1114 void LCodeGen::DoLabel(LLabel* label) { 1115 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 1116 current_instruction_, 1117 label->hydrogen_value()->id(), 1118 label->block_id(), 1119 LabelType(label)); 1120 __ bind(label->label()); 1121 current_block_ = label->block_id(); 1122 if (label->block()->predecessors()->length() > 1) { 1123 // A join block's x87 stack is that of its last visited predecessor. 1124 // If the last visited predecessor block is unreachable, the stack state 1125 // will be wrong. In such case, use the x87 stack of reachable predecessor. 1126 X87StackMap::const_iterator it = x87_stack_map_.find(current_block_); 1127 // Restore x87 stack. 1128 if (it != x87_stack_map_.end()) { 1129 x87_stack_ = *(it->second); 1130 } 1131 } 1132 DoGap(label); 1133 } 1134 1135 1136 void LCodeGen::DoParallelMove(LParallelMove* move) { 1137 resolver_.Resolve(move); 1138 } 1139 1140 1141 void LCodeGen::DoGap(LGap* gap) { 1142 for (int i = LGap::FIRST_INNER_POSITION; 1143 i <= LGap::LAST_INNER_POSITION; 1144 i++) { 1145 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 1146 LParallelMove* move = gap->GetParallelMove(inner_pos); 1147 if (move != NULL) DoParallelMove(move); 1148 } 1149 } 1150 1151 1152 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 1153 DoGap(instr); 1154 } 1155 1156 1157 void LCodeGen::DoParameter(LParameter* instr) { 1158 // Nothing to do. 1159 } 1160 1161 1162 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 1163 GenerateOsrPrologue(); 1164 } 1165 1166 1167 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { 1168 Register dividend = ToRegister(instr->dividend()); 1169 int32_t divisor = instr->divisor(); 1170 DCHECK(dividend.is(ToRegister(instr->result()))); 1171 1172 // Theoretically, a variation of the branch-free code for integer division by 1173 // a power of 2 (calculating the remainder via an additional multiplication 1174 // (which gets simplified to an 'and') and subtraction) should be faster, and 1175 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 1176 // indicate that positive dividends are heavily favored, so the branching 1177 // version performs better. 1178 HMod* hmod = instr->hydrogen(); 1179 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1180 Label dividend_is_not_negative, done; 1181 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 1182 __ test(dividend, dividend); 1183 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); 1184 // Note that this is correct even for kMinInt operands. 1185 __ neg(dividend); 1186 __ and_(dividend, mask); 1187 __ neg(dividend); 1188 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1189 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); 1190 } 1191 __ jmp(&done, Label::kNear); 1192 } 1193 1194 __ bind(÷nd_is_not_negative); 1195 __ and_(dividend, mask); 1196 __ bind(&done); 1197 } 1198 1199 1200 void LCodeGen::DoModByConstI(LModByConstI* instr) { 1201 Register dividend = ToRegister(instr->dividend()); 1202 int32_t divisor = instr->divisor(); 1203 DCHECK(ToRegister(instr->result()).is(eax)); 1204 1205 if (divisor == 0) { 1206 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); 1207 return; 1208 } 1209 1210 __ TruncatingDiv(dividend, Abs(divisor)); 1211 __ imul(edx, edx, Abs(divisor)); 1212 __ mov(eax, dividend); 1213 __ sub(eax, edx); 1214 1215 // Check for negative zero. 1216 HMod* hmod = instr->hydrogen(); 1217 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1218 Label remainder_not_zero; 1219 __ j(not_zero, &remainder_not_zero, Label::kNear); 1220 __ cmp(dividend, Immediate(0)); 1221 DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero); 1222 __ bind(&remainder_not_zero); 1223 } 1224 } 1225 1226 1227 void LCodeGen::DoModI(LModI* instr) { 1228 HMod* hmod = instr->hydrogen(); 1229 1230 Register left_reg = ToRegister(instr->left()); 1231 DCHECK(left_reg.is(eax)); 1232 Register right_reg = ToRegister(instr->right()); 1233 DCHECK(!right_reg.is(eax)); 1234 DCHECK(!right_reg.is(edx)); 1235 Register result_reg = ToRegister(instr->result()); 1236 DCHECK(result_reg.is(edx)); 1237 1238 Label done; 1239 // Check for x % 0, idiv would signal a divide error. We have to 1240 // deopt in this case because we can't return a NaN. 1241 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1242 __ test(right_reg, Operand(right_reg)); 1243 DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); 1244 } 1245 1246 // Check for kMinInt % -1, idiv would signal a divide error. We 1247 // have to deopt if we care about -0, because we can't return that. 1248 if (hmod->CheckFlag(HValue::kCanOverflow)) { 1249 Label no_overflow_possible; 1250 __ cmp(left_reg, kMinInt); 1251 __ j(not_equal, &no_overflow_possible, Label::kNear); 1252 __ cmp(right_reg, -1); 1253 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1254 DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero); 1255 } else { 1256 __ j(not_equal, &no_overflow_possible, Label::kNear); 1257 __ Move(result_reg, Immediate(0)); 1258 __ jmp(&done, Label::kNear); 1259 } 1260 __ bind(&no_overflow_possible); 1261 } 1262 1263 // Sign extend dividend in eax into edx:eax. 1264 __ cdq(); 1265 1266 // If we care about -0, test if the dividend is <0 and the result is 0. 1267 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1268 Label positive_left; 1269 __ test(left_reg, Operand(left_reg)); 1270 __ j(not_sign, &positive_left, Label::kNear); 1271 __ idiv(right_reg); 1272 __ test(result_reg, Operand(result_reg)); 1273 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); 1274 __ jmp(&done, Label::kNear); 1275 __ bind(&positive_left); 1276 } 1277 __ idiv(right_reg); 1278 __ bind(&done); 1279 } 1280 1281 1282 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1283 Register dividend = ToRegister(instr->dividend()); 1284 int32_t divisor = instr->divisor(); 1285 Register result = ToRegister(instr->result()); 1286 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 1287 DCHECK(!result.is(dividend)); 1288 1289 // Check for (0 / -x) that will produce negative zero. 1290 HDiv* hdiv = instr->hydrogen(); 1291 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1292 __ test(dividend, dividend); 1293 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); 1294 } 1295 // Check for (kMinInt / -1). 1296 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1297 __ cmp(dividend, kMinInt); 1298 DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); 1299 } 1300 // Deoptimize if remainder will not be 0. 1301 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1302 divisor != 1 && divisor != -1) { 1303 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1304 __ test(dividend, Immediate(mask)); 1305 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision); 1306 } 1307 __ Move(result, dividend); 1308 int32_t shift = WhichPowerOf2Abs(divisor); 1309 if (shift > 0) { 1310 // The arithmetic shift is always OK, the 'if' is an optimization only. 1311 if (shift > 1) __ sar(result, 31); 1312 __ shr(result, 32 - shift); 1313 __ add(result, dividend); 1314 __ sar(result, shift); 1315 } 1316 if (divisor < 0) __ neg(result); 1317 } 1318 1319 1320 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1321 Register dividend = ToRegister(instr->dividend()); 1322 int32_t divisor = instr->divisor(); 1323 DCHECK(ToRegister(instr->result()).is(edx)); 1324 1325 if (divisor == 0) { 1326 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); 1327 return; 1328 } 1329 1330 // Check for (0 / -x) that will produce negative zero. 1331 HDiv* hdiv = instr->hydrogen(); 1332 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1333 __ test(dividend, dividend); 1334 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); 1335 } 1336 1337 __ TruncatingDiv(dividend, Abs(divisor)); 1338 if (divisor < 0) __ neg(edx); 1339 1340 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1341 __ mov(eax, edx); 1342 __ imul(eax, eax, divisor); 1343 __ sub(eax, dividend); 1344 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision); 1345 } 1346 } 1347 1348 1349 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1350 void LCodeGen::DoDivI(LDivI* instr) { 1351 HBinaryOperation* hdiv = instr->hydrogen(); 1352 Register dividend = ToRegister(instr->dividend()); 1353 Register divisor = ToRegister(instr->divisor()); 1354 Register remainder = ToRegister(instr->temp()); 1355 DCHECK(dividend.is(eax)); 1356 DCHECK(remainder.is(edx)); 1357 DCHECK(ToRegister(instr->result()).is(eax)); 1358 DCHECK(!divisor.is(eax)); 1359 DCHECK(!divisor.is(edx)); 1360 1361 // Check for x / 0. 1362 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1363 __ test(divisor, divisor); 1364 DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); 1365 } 1366 1367 // Check for (0 / -x) that will produce negative zero. 1368 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1369 Label dividend_not_zero; 1370 __ test(dividend, dividend); 1371 __ j(not_zero, ÷nd_not_zero, Label::kNear); 1372 __ test(divisor, divisor); 1373 DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); 1374 __ bind(÷nd_not_zero); 1375 } 1376 1377 // Check for (kMinInt / -1). 1378 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1379 Label dividend_not_min_int; 1380 __ cmp(dividend, kMinInt); 1381 __ j(not_zero, ÷nd_not_min_int, Label::kNear); 1382 __ cmp(divisor, -1); 1383 DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); 1384 __ bind(÷nd_not_min_int); 1385 } 1386 1387 // Sign extend to edx (= remainder). 1388 __ cdq(); 1389 __ idiv(divisor); 1390 1391 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1392 // Deoptimize if remainder is not 0. 1393 __ test(remainder, remainder); 1394 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision); 1395 } 1396 } 1397 1398 1399 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1400 Register dividend = ToRegister(instr->dividend()); 1401 int32_t divisor = instr->divisor(); 1402 DCHECK(dividend.is(ToRegister(instr->result()))); 1403 1404 // If the divisor is positive, things are easy: There can be no deopts and we 1405 // can simply do an arithmetic right shift. 1406 if (divisor == 1) return; 1407 int32_t shift = WhichPowerOf2Abs(divisor); 1408 if (divisor > 1) { 1409 __ sar(dividend, shift); 1410 return; 1411 } 1412 1413 // If the divisor is negative, we have to negate and handle edge cases. 1414 __ neg(dividend); 1415 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1416 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); 1417 } 1418 1419 // Dividing by -1 is basically negation, unless we overflow. 1420 if (divisor == -1) { 1421 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1422 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 1423 } 1424 return; 1425 } 1426 1427 // If the negation could not overflow, simply shifting is OK. 1428 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1429 __ sar(dividend, shift); 1430 return; 1431 } 1432 1433 Label not_kmin_int, done; 1434 __ j(no_overflow, ¬_kmin_int, Label::kNear); 1435 __ mov(dividend, Immediate(kMinInt / divisor)); 1436 __ jmp(&done, Label::kNear); 1437 __ bind(¬_kmin_int); 1438 __ sar(dividend, shift); 1439 __ bind(&done); 1440 } 1441 1442 1443 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1444 Register dividend = ToRegister(instr->dividend()); 1445 int32_t divisor = instr->divisor(); 1446 DCHECK(ToRegister(instr->result()).is(edx)); 1447 1448 if (divisor == 0) { 1449 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero); 1450 return; 1451 } 1452 1453 // Check for (0 / -x) that will produce negative zero. 1454 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1455 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1456 __ test(dividend, dividend); 1457 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero); 1458 } 1459 1460 // Easy case: We need no dynamic check for the dividend and the flooring 1461 // division is the same as the truncating division. 1462 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1463 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1464 __ TruncatingDiv(dividend, Abs(divisor)); 1465 if (divisor < 0) __ neg(edx); 1466 return; 1467 } 1468 1469 // In the general case we may need to adjust before and after the truncating 1470 // division to get a flooring division. 1471 Register temp = ToRegister(instr->temp3()); 1472 DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx)); 1473 Label needs_adjustment, done; 1474 __ cmp(dividend, Immediate(0)); 1475 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); 1476 __ TruncatingDiv(dividend, Abs(divisor)); 1477 if (divisor < 0) __ neg(edx); 1478 __ jmp(&done, Label::kNear); 1479 __ bind(&needs_adjustment); 1480 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1)); 1481 __ TruncatingDiv(temp, Abs(divisor)); 1482 if (divisor < 0) __ neg(edx); 1483 __ dec(edx); 1484 __ bind(&done); 1485 } 1486 1487 1488 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1489 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1490 HBinaryOperation* hdiv = instr->hydrogen(); 1491 Register dividend = ToRegister(instr->dividend()); 1492 Register divisor = ToRegister(instr->divisor()); 1493 Register remainder = ToRegister(instr->temp()); 1494 Register result = ToRegister(instr->result()); 1495 DCHECK(dividend.is(eax)); 1496 DCHECK(remainder.is(edx)); 1497 DCHECK(result.is(eax)); 1498 DCHECK(!divisor.is(eax)); 1499 DCHECK(!divisor.is(edx)); 1500 1501 // Check for x / 0. 1502 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1503 __ test(divisor, divisor); 1504 DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero); 1505 } 1506 1507 // Check for (0 / -x) that will produce negative zero. 1508 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1509 Label dividend_not_zero; 1510 __ test(dividend, dividend); 1511 __ j(not_zero, ÷nd_not_zero, Label::kNear); 1512 __ test(divisor, divisor); 1513 DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); 1514 __ bind(÷nd_not_zero); 1515 } 1516 1517 // Check for (kMinInt / -1). 1518 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1519 Label dividend_not_min_int; 1520 __ cmp(dividend, kMinInt); 1521 __ j(not_zero, ÷nd_not_min_int, Label::kNear); 1522 __ cmp(divisor, -1); 1523 DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow); 1524 __ bind(÷nd_not_min_int); 1525 } 1526 1527 // Sign extend to edx (= remainder). 1528 __ cdq(); 1529 __ idiv(divisor); 1530 1531 Label done; 1532 __ test(remainder, remainder); 1533 __ j(zero, &done, Label::kNear); 1534 __ xor_(remainder, divisor); 1535 __ sar(remainder, 31); 1536 __ add(result, remainder); 1537 __ bind(&done); 1538 } 1539 1540 1541 void LCodeGen::DoMulI(LMulI* instr) { 1542 Register left = ToRegister(instr->left()); 1543 LOperand* right = instr->right(); 1544 1545 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1546 __ mov(ToRegister(instr->temp()), left); 1547 } 1548 1549 if (right->IsConstantOperand()) { 1550 // Try strength reductions on the multiplication. 1551 // All replacement instructions are at most as long as the imul 1552 // and have better latency. 1553 int constant = ToInteger32(LConstantOperand::cast(right)); 1554 if (constant == -1) { 1555 __ neg(left); 1556 } else if (constant == 0) { 1557 __ xor_(left, Operand(left)); 1558 } else if (constant == 2) { 1559 __ add(left, Operand(left)); 1560 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1561 // If we know that the multiplication can't overflow, it's safe to 1562 // use instructions that don't set the overflow flag for the 1563 // multiplication. 1564 switch (constant) { 1565 case 1: 1566 // Do nothing. 1567 break; 1568 case 3: 1569 __ lea(left, Operand(left, left, times_2, 0)); 1570 break; 1571 case 4: 1572 __ shl(left, 2); 1573 break; 1574 case 5: 1575 __ lea(left, Operand(left, left, times_4, 0)); 1576 break; 1577 case 8: 1578 __ shl(left, 3); 1579 break; 1580 case 9: 1581 __ lea(left, Operand(left, left, times_8, 0)); 1582 break; 1583 case 16: 1584 __ shl(left, 4); 1585 break; 1586 default: 1587 __ imul(left, left, constant); 1588 break; 1589 } 1590 } else { 1591 __ imul(left, left, constant); 1592 } 1593 } else { 1594 if (instr->hydrogen()->representation().IsSmi()) { 1595 __ SmiUntag(left); 1596 } 1597 __ imul(left, ToOperand(right)); 1598 } 1599 1600 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1601 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 1602 } 1603 1604 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1605 // Bail out if the result is supposed to be negative zero. 1606 Label done; 1607 __ test(left, Operand(left)); 1608 __ j(not_zero, &done); 1609 if (right->IsConstantOperand()) { 1610 if (ToInteger32(LConstantOperand::cast(right)) < 0) { 1611 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); 1612 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { 1613 __ cmp(ToRegister(instr->temp()), Immediate(0)); 1614 DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero); 1615 } 1616 } else { 1617 // Test the non-zero operand for negative sign. 1618 __ or_(ToRegister(instr->temp()), ToOperand(right)); 1619 DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero); 1620 } 1621 __ bind(&done); 1622 } 1623 } 1624 1625 1626 void LCodeGen::DoBitI(LBitI* instr) { 1627 LOperand* left = instr->left(); 1628 LOperand* right = instr->right(); 1629 DCHECK(left->Equals(instr->result())); 1630 DCHECK(left->IsRegister()); 1631 1632 if (right->IsConstantOperand()) { 1633 int32_t right_operand = 1634 ToRepresentation(LConstantOperand::cast(right), 1635 instr->hydrogen()->representation()); 1636 switch (instr->op()) { 1637 case Token::BIT_AND: 1638 __ and_(ToRegister(left), right_operand); 1639 break; 1640 case Token::BIT_OR: 1641 __ or_(ToRegister(left), right_operand); 1642 break; 1643 case Token::BIT_XOR: 1644 if (right_operand == int32_t(~0)) { 1645 __ not_(ToRegister(left)); 1646 } else { 1647 __ xor_(ToRegister(left), right_operand); 1648 } 1649 break; 1650 default: 1651 UNREACHABLE(); 1652 break; 1653 } 1654 } else { 1655 switch (instr->op()) { 1656 case Token::BIT_AND: 1657 __ and_(ToRegister(left), ToOperand(right)); 1658 break; 1659 case Token::BIT_OR: 1660 __ or_(ToRegister(left), ToOperand(right)); 1661 break; 1662 case Token::BIT_XOR: 1663 __ xor_(ToRegister(left), ToOperand(right)); 1664 break; 1665 default: 1666 UNREACHABLE(); 1667 break; 1668 } 1669 } 1670 } 1671 1672 1673 void LCodeGen::DoShiftI(LShiftI* instr) { 1674 LOperand* left = instr->left(); 1675 LOperand* right = instr->right(); 1676 DCHECK(left->Equals(instr->result())); 1677 DCHECK(left->IsRegister()); 1678 if (right->IsRegister()) { 1679 DCHECK(ToRegister(right).is(ecx)); 1680 1681 switch (instr->op()) { 1682 case Token::ROR: 1683 __ ror_cl(ToRegister(left)); 1684 break; 1685 case Token::SAR: 1686 __ sar_cl(ToRegister(left)); 1687 break; 1688 case Token::SHR: 1689 __ shr_cl(ToRegister(left)); 1690 if (instr->can_deopt()) { 1691 __ test(ToRegister(left), ToRegister(left)); 1692 DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); 1693 } 1694 break; 1695 case Token::SHL: 1696 __ shl_cl(ToRegister(left)); 1697 break; 1698 default: 1699 UNREACHABLE(); 1700 break; 1701 } 1702 } else { 1703 int value = ToInteger32(LConstantOperand::cast(right)); 1704 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1705 switch (instr->op()) { 1706 case Token::ROR: 1707 if (shift_count == 0 && instr->can_deopt()) { 1708 __ test(ToRegister(left), ToRegister(left)); 1709 DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); 1710 } else { 1711 __ ror(ToRegister(left), shift_count); 1712 } 1713 break; 1714 case Token::SAR: 1715 if (shift_count != 0) { 1716 __ sar(ToRegister(left), shift_count); 1717 } 1718 break; 1719 case Token::SHR: 1720 if (shift_count != 0) { 1721 __ shr(ToRegister(left), shift_count); 1722 } else if (instr->can_deopt()) { 1723 __ test(ToRegister(left), ToRegister(left)); 1724 DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue); 1725 } 1726 break; 1727 case Token::SHL: 1728 if (shift_count != 0) { 1729 if (instr->hydrogen_value()->representation().IsSmi() && 1730 instr->can_deopt()) { 1731 if (shift_count != 1) { 1732 __ shl(ToRegister(left), shift_count - 1); 1733 } 1734 __ SmiTag(ToRegister(left)); 1735 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 1736 } else { 1737 __ shl(ToRegister(left), shift_count); 1738 } 1739 } 1740 break; 1741 default: 1742 UNREACHABLE(); 1743 break; 1744 } 1745 } 1746 } 1747 1748 1749 void LCodeGen::DoSubI(LSubI* instr) { 1750 LOperand* left = instr->left(); 1751 LOperand* right = instr->right(); 1752 DCHECK(left->Equals(instr->result())); 1753 1754 if (right->IsConstantOperand()) { 1755 __ sub(ToOperand(left), 1756 ToImmediate(right, instr->hydrogen()->representation())); 1757 } else { 1758 __ sub(ToRegister(left), ToOperand(right)); 1759 } 1760 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1761 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 1762 } 1763 } 1764 1765 1766 void LCodeGen::DoConstantI(LConstantI* instr) { 1767 __ Move(ToRegister(instr->result()), Immediate(instr->value())); 1768 } 1769 1770 1771 void LCodeGen::DoConstantS(LConstantS* instr) { 1772 __ Move(ToRegister(instr->result()), Immediate(instr->value())); 1773 } 1774 1775 1776 void LCodeGen::DoConstantD(LConstantD* instr) { 1777 uint64_t const bits = instr->bits(); 1778 uint32_t const lower = static_cast<uint32_t>(bits); 1779 uint32_t const upper = static_cast<uint32_t>(bits >> 32); 1780 DCHECK(instr->result()->IsDoubleRegister()); 1781 1782 __ push(Immediate(upper)); 1783 __ push(Immediate(lower)); 1784 X87Register reg = ToX87Register(instr->result()); 1785 X87Mov(reg, Operand(esp, 0)); 1786 __ add(Operand(esp), Immediate(kDoubleSize)); 1787 } 1788 1789 1790 void LCodeGen::DoConstantE(LConstantE* instr) { 1791 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); 1792 } 1793 1794 1795 void LCodeGen::DoConstantT(LConstantT* instr) { 1796 Register reg = ToRegister(instr->result()); 1797 Handle<Object> object = instr->value(isolate()); 1798 AllowDeferredHandleDereference smi_check; 1799 __ LoadObject(reg, object); 1800 } 1801 1802 1803 Operand LCodeGen::BuildSeqStringOperand(Register string, 1804 LOperand* index, 1805 String::Encoding encoding) { 1806 if (index->IsConstantOperand()) { 1807 int offset = ToRepresentation(LConstantOperand::cast(index), 1808 Representation::Integer32()); 1809 if (encoding == String::TWO_BYTE_ENCODING) { 1810 offset *= kUC16Size; 1811 } 1812 STATIC_ASSERT(kCharSize == 1); 1813 return FieldOperand(string, SeqString::kHeaderSize + offset); 1814 } 1815 return FieldOperand( 1816 string, ToRegister(index), 1817 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, 1818 SeqString::kHeaderSize); 1819 } 1820 1821 1822 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1823 String::Encoding encoding = instr->hydrogen()->encoding(); 1824 Register result = ToRegister(instr->result()); 1825 Register string = ToRegister(instr->string()); 1826 1827 if (FLAG_debug_code) { 1828 __ push(string); 1829 __ mov(string, FieldOperand(string, HeapObject::kMapOffset)); 1830 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset)); 1831 1832 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); 1833 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1834 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1835 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING 1836 ? one_byte_seq_type : two_byte_seq_type)); 1837 __ Check(equal, kUnexpectedStringType); 1838 __ pop(string); 1839 } 1840 1841 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1842 if (encoding == String::ONE_BYTE_ENCODING) { 1843 __ movzx_b(result, operand); 1844 } else { 1845 __ movzx_w(result, operand); 1846 } 1847 } 1848 1849 1850 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1851 String::Encoding encoding = instr->hydrogen()->encoding(); 1852 Register string = ToRegister(instr->string()); 1853 1854 if (FLAG_debug_code) { 1855 Register value = ToRegister(instr->value()); 1856 Register index = ToRegister(instr->index()); 1857 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1858 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1859 int encoding_mask = 1860 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 1861 ? one_byte_seq_type : two_byte_seq_type; 1862 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 1863 } 1864 1865 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1866 if (instr->value()->IsConstantOperand()) { 1867 int value = ToRepresentation(LConstantOperand::cast(instr->value()), 1868 Representation::Integer32()); 1869 DCHECK_LE(0, value); 1870 if (encoding == String::ONE_BYTE_ENCODING) { 1871 DCHECK_LE(value, String::kMaxOneByteCharCode); 1872 __ mov_b(operand, static_cast<int8_t>(value)); 1873 } else { 1874 DCHECK_LE(value, String::kMaxUtf16CodeUnit); 1875 __ mov_w(operand, static_cast<int16_t>(value)); 1876 } 1877 } else { 1878 Register value = ToRegister(instr->value()); 1879 if (encoding == String::ONE_BYTE_ENCODING) { 1880 __ mov_b(operand, value); 1881 } else { 1882 __ mov_w(operand, value); 1883 } 1884 } 1885 } 1886 1887 1888 void LCodeGen::DoAddI(LAddI* instr) { 1889 LOperand* left = instr->left(); 1890 LOperand* right = instr->right(); 1891 1892 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { 1893 if (right->IsConstantOperand()) { 1894 int32_t offset = ToRepresentation(LConstantOperand::cast(right), 1895 instr->hydrogen()->representation()); 1896 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); 1897 } else { 1898 Operand address(ToRegister(left), ToRegister(right), times_1, 0); 1899 __ lea(ToRegister(instr->result()), address); 1900 } 1901 } else { 1902 if (right->IsConstantOperand()) { 1903 __ add(ToOperand(left), 1904 ToImmediate(right, instr->hydrogen()->representation())); 1905 } else { 1906 __ add(ToRegister(left), ToOperand(right)); 1907 } 1908 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1909 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 1910 } 1911 } 1912 } 1913 1914 1915 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1916 LOperand* left = instr->left(); 1917 LOperand* right = instr->right(); 1918 DCHECK(left->Equals(instr->result())); 1919 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1920 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1921 Label return_left; 1922 Condition condition = (operation == HMathMinMax::kMathMin) 1923 ? less_equal 1924 : greater_equal; 1925 if (right->IsConstantOperand()) { 1926 Operand left_op = ToOperand(left); 1927 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()), 1928 instr->hydrogen()->representation()); 1929 __ cmp(left_op, immediate); 1930 __ j(condition, &return_left, Label::kNear); 1931 __ mov(left_op, immediate); 1932 } else { 1933 Register left_reg = ToRegister(left); 1934 Operand right_op = ToOperand(right); 1935 __ cmp(left_reg, right_op); 1936 __ j(condition, &return_left, Label::kNear); 1937 __ mov(left_reg, right_op); 1938 } 1939 __ bind(&return_left); 1940 } else { 1941 DCHECK(instr->hydrogen()->representation().IsDouble()); 1942 Label check_nan_left, check_zero, return_left, return_right; 1943 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; 1944 X87Register left_reg = ToX87Register(left); 1945 X87Register right_reg = ToX87Register(right); 1946 1947 X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result())); 1948 __ fld(1); 1949 __ fld(1); 1950 __ FCmp(); 1951 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. 1952 __ j(equal, &check_zero, Label::kNear); // left == right. 1953 __ j(condition, &return_left, Label::kNear); 1954 __ jmp(&return_right, Label::kNear); 1955 1956 __ bind(&check_zero); 1957 __ fld(0); 1958 __ fldz(); 1959 __ FCmp(); 1960 __ j(not_equal, &return_left, Label::kNear); // left == right != 0. 1961 // At this point, both left and right are either 0 or -0. 1962 if (operation == HMathMinMax::kMathMin) { 1963 // Push st0 and st1 to stack, then pop them to temp registers and OR them, 1964 // load it to left. 1965 Register scratch_reg = ToRegister(instr->temp()); 1966 __ fld(1); 1967 __ fld(1); 1968 __ sub(esp, Immediate(2 * kPointerSize)); 1969 __ fstp_s(MemOperand(esp, 0)); 1970 __ fstp_s(MemOperand(esp, kPointerSize)); 1971 __ pop(scratch_reg); 1972 __ or_(MemOperand(esp, 0), scratch_reg); 1973 X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand); 1974 __ pop(scratch_reg); // restore esp 1975 } else { 1976 // Since we operate on +0 and/or -0, addsd and andsd have the same effect. 1977 // Should put the result in stX0 1978 __ fadd_i(1); 1979 } 1980 __ jmp(&return_left, Label::kNear); 1981 1982 __ bind(&check_nan_left); 1983 __ fld(0); 1984 __ fld(0); 1985 __ FCmp(); // NaN check. 1986 __ j(parity_even, &return_left, Label::kNear); // left == NaN. 1987 1988 __ bind(&return_right); 1989 X87Mov(left_reg, right_reg); 1990 1991 __ bind(&return_left); 1992 } 1993 } 1994 1995 1996 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1997 X87Register left = ToX87Register(instr->left()); 1998 X87Register right = ToX87Register(instr->right()); 1999 X87Register result = ToX87Register(instr->result()); 2000 if (instr->op() != Token::MOD) { 2001 X87PrepareBinaryOp(left, right, result); 2002 } 2003 // Set the precision control to double-precision. 2004 __ X87SetFPUCW(0x027F); 2005 switch (instr->op()) { 2006 case Token::ADD: 2007 __ fadd_i(1); 2008 break; 2009 case Token::SUB: 2010 __ fsub_i(1); 2011 break; 2012 case Token::MUL: 2013 __ fmul_i(1); 2014 break; 2015 case Token::DIV: 2016 __ fdiv_i(1); 2017 break; 2018 case Token::MOD: { 2019 // Pass two doubles as arguments on the stack. 2020 __ PrepareCallCFunction(4, eax); 2021 X87Mov(Operand(esp, 1 * kDoubleSize), right); 2022 X87Mov(Operand(esp, 0), left); 2023 X87Free(right); 2024 DCHECK(left.is(result)); 2025 X87PrepareToWrite(result); 2026 __ CallCFunction( 2027 ExternalReference::mod_two_doubles_operation(isolate()), 2028 4); 2029 2030 // Return value is in st(0) on ia32. 2031 X87CommitWrite(result); 2032 break; 2033 } 2034 default: 2035 UNREACHABLE(); 2036 break; 2037 } 2038 2039 // Restore the default value of control word. 2040 __ X87SetFPUCW(0x037F); 2041 } 2042 2043 2044 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2045 DCHECK(ToRegister(instr->context()).is(esi)); 2046 DCHECK(ToRegister(instr->left()).is(edx)); 2047 DCHECK(ToRegister(instr->right()).is(eax)); 2048 DCHECK(ToRegister(instr->result()).is(eax)); 2049 2050 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code(); 2051 CallCode(code, RelocInfo::CODE_TARGET, instr); 2052 } 2053 2054 2055 template<class InstrType> 2056 void LCodeGen::EmitBranch(InstrType instr, Condition cc) { 2057 int left_block = instr->TrueDestination(chunk_); 2058 int right_block = instr->FalseDestination(chunk_); 2059 2060 int next_block = GetNextEmittedBlock(); 2061 2062 if (right_block == left_block || cc == no_condition) { 2063 EmitGoto(left_block); 2064 } else if (left_block == next_block) { 2065 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); 2066 } else if (right_block == next_block) { 2067 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 2068 } else { 2069 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 2070 __ jmp(chunk_->GetAssemblyLabel(right_block)); 2071 } 2072 } 2073 2074 2075 template <class InstrType> 2076 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) { 2077 int true_block = instr->TrueDestination(chunk_); 2078 if (cc == no_condition) { 2079 __ jmp(chunk_->GetAssemblyLabel(true_block)); 2080 } else { 2081 __ j(cc, chunk_->GetAssemblyLabel(true_block)); 2082 } 2083 } 2084 2085 2086 template<class InstrType> 2087 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { 2088 int false_block = instr->FalseDestination(chunk_); 2089 if (cc == no_condition) { 2090 __ jmp(chunk_->GetAssemblyLabel(false_block)); 2091 } else { 2092 __ j(cc, chunk_->GetAssemblyLabel(false_block)); 2093 } 2094 } 2095 2096 2097 void LCodeGen::DoBranch(LBranch* instr) { 2098 Representation r = instr->hydrogen()->value()->representation(); 2099 if (r.IsSmiOrInteger32()) { 2100 Register reg = ToRegister(instr->value()); 2101 __ test(reg, Operand(reg)); 2102 EmitBranch(instr, not_zero); 2103 } else if (r.IsDouble()) { 2104 X87Register reg = ToX87Register(instr->value()); 2105 X87LoadForUsage(reg); 2106 __ fldz(); 2107 __ FCmp(); 2108 EmitBranch(instr, not_zero); 2109 } else { 2110 DCHECK(r.IsTagged()); 2111 Register reg = ToRegister(instr->value()); 2112 HType type = instr->hydrogen()->value()->type(); 2113 if (type.IsBoolean()) { 2114 DCHECK(!info()->IsStub()); 2115 __ cmp(reg, factory()->true_value()); 2116 EmitBranch(instr, equal); 2117 } else if (type.IsSmi()) { 2118 DCHECK(!info()->IsStub()); 2119 __ test(reg, Operand(reg)); 2120 EmitBranch(instr, not_equal); 2121 } else if (type.IsJSArray()) { 2122 DCHECK(!info()->IsStub()); 2123 EmitBranch(instr, no_condition); 2124 } else if (type.IsHeapNumber()) { 2125 UNREACHABLE(); 2126 } else if (type.IsString()) { 2127 DCHECK(!info()->IsStub()); 2128 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 2129 EmitBranch(instr, not_equal); 2130 } else { 2131 ToBooleanHints expected = instr->hydrogen()->expected_input_types(); 2132 if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny; 2133 2134 if (expected & ToBooleanHint::kUndefined) { 2135 // undefined -> false. 2136 __ cmp(reg, factory()->undefined_value()); 2137 __ j(equal, instr->FalseLabel(chunk_)); 2138 } 2139 if (expected & ToBooleanHint::kBoolean) { 2140 // true -> true. 2141 __ cmp(reg, factory()->true_value()); 2142 __ j(equal, instr->TrueLabel(chunk_)); 2143 // false -> false. 2144 __ cmp(reg, factory()->false_value()); 2145 __ j(equal, instr->FalseLabel(chunk_)); 2146 } 2147 if (expected & ToBooleanHint::kNull) { 2148 // 'null' -> false. 2149 __ cmp(reg, factory()->null_value()); 2150 __ j(equal, instr->FalseLabel(chunk_)); 2151 } 2152 2153 if (expected & ToBooleanHint::kSmallInteger) { 2154 // Smis: 0 -> false, all other -> true. 2155 __ test(reg, Operand(reg)); 2156 __ j(equal, instr->FalseLabel(chunk_)); 2157 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2158 } else if (expected & ToBooleanHint::kNeedsMap) { 2159 // If we need a map later and have a Smi -> deopt. 2160 __ test(reg, Immediate(kSmiTagMask)); 2161 DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi); 2162 } 2163 2164 Register map = no_reg; // Keep the compiler happy. 2165 if (expected & ToBooleanHint::kNeedsMap) { 2166 map = ToRegister(instr->temp()); 2167 DCHECK(!map.is(reg)); 2168 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); 2169 2170 if (expected & ToBooleanHint::kCanBeUndetectable) { 2171 // Undetectable -> false. 2172 __ test_b(FieldOperand(map, Map::kBitFieldOffset), 2173 Immediate(1 << Map::kIsUndetectable)); 2174 __ j(not_zero, instr->FalseLabel(chunk_)); 2175 } 2176 } 2177 2178 if (expected & ToBooleanHint::kReceiver) { 2179 // spec object -> true. 2180 __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE); 2181 __ j(above_equal, instr->TrueLabel(chunk_)); 2182 } 2183 2184 if (expected & ToBooleanHint::kString) { 2185 // String value -> false iff empty. 2186 Label not_string; 2187 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); 2188 __ j(above_equal, ¬_string, Label::kNear); 2189 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 2190 __ j(not_zero, instr->TrueLabel(chunk_)); 2191 __ jmp(instr->FalseLabel(chunk_)); 2192 __ bind(¬_string); 2193 } 2194 2195 if (expected & ToBooleanHint::kSymbol) { 2196 // Symbol value -> true. 2197 __ CmpInstanceType(map, SYMBOL_TYPE); 2198 __ j(equal, instr->TrueLabel(chunk_)); 2199 } 2200 2201 if (expected & ToBooleanHint::kHeapNumber) { 2202 // heap number -> false iff +0, -0, or NaN. 2203 Label not_heap_number; 2204 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 2205 factory()->heap_number_map()); 2206 __ j(not_equal, ¬_heap_number, Label::kNear); 2207 __ fldz(); 2208 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); 2209 __ FCmp(); 2210 __ j(zero, instr->FalseLabel(chunk_)); 2211 __ jmp(instr->TrueLabel(chunk_)); 2212 __ bind(¬_heap_number); 2213 } 2214 2215 if (expected != ToBooleanHint::kAny) { 2216 // We've seen something for the first time -> deopt. 2217 // This can only happen if we are not generic already. 2218 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject); 2219 } 2220 } 2221 } 2222 } 2223 2224 2225 void LCodeGen::EmitGoto(int block) { 2226 if (!IsNextEmittedBlock(block)) { 2227 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2228 } 2229 } 2230 2231 2232 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { 2233 } 2234 2235 2236 void LCodeGen::DoGoto(LGoto* instr) { 2237 EmitGoto(instr->block_id()); 2238 } 2239 2240 2241 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 2242 Condition cond = no_condition; 2243 switch (op) { 2244 case Token::EQ: 2245 case Token::EQ_STRICT: 2246 cond = equal; 2247 break; 2248 case Token::NE: 2249 case Token::NE_STRICT: 2250 cond = not_equal; 2251 break; 2252 case Token::LT: 2253 cond = is_unsigned ? below : less; 2254 break; 2255 case Token::GT: 2256 cond = is_unsigned ? above : greater; 2257 break; 2258 case Token::LTE: 2259 cond = is_unsigned ? below_equal : less_equal; 2260 break; 2261 case Token::GTE: 2262 cond = is_unsigned ? above_equal : greater_equal; 2263 break; 2264 case Token::IN: 2265 case Token::INSTANCEOF: 2266 default: 2267 UNREACHABLE(); 2268 } 2269 return cond; 2270 } 2271 2272 2273 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2274 LOperand* left = instr->left(); 2275 LOperand* right = instr->right(); 2276 bool is_unsigned = 2277 instr->is_double() || 2278 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2279 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2280 Condition cc = TokenToCondition(instr->op(), is_unsigned); 2281 2282 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2283 // We can statically evaluate the comparison. 2284 double left_val = ToDouble(LConstantOperand::cast(left)); 2285 double right_val = ToDouble(LConstantOperand::cast(right)); 2286 int next_block = Token::EvalComparison(instr->op(), left_val, right_val) 2287 ? instr->TrueDestination(chunk_) 2288 : instr->FalseDestination(chunk_); 2289 EmitGoto(next_block); 2290 } else { 2291 if (instr->is_double()) { 2292 X87LoadForUsage(ToX87Register(right), ToX87Register(left)); 2293 __ FCmp(); 2294 // Don't base result on EFLAGS when a NaN is involved. Instead 2295 // jump to the false block. 2296 __ j(parity_even, instr->FalseLabel(chunk_)); 2297 } else { 2298 if (right->IsConstantOperand()) { 2299 __ cmp(ToOperand(left), 2300 ToImmediate(right, instr->hydrogen()->representation())); 2301 } else if (left->IsConstantOperand()) { 2302 __ cmp(ToOperand(right), 2303 ToImmediate(left, instr->hydrogen()->representation())); 2304 // We commuted the operands, so commute the condition. 2305 cc = CommuteCondition(cc); 2306 } else { 2307 __ cmp(ToRegister(left), ToOperand(right)); 2308 } 2309 } 2310 EmitBranch(instr, cc); 2311 } 2312 } 2313 2314 2315 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2316 Register left = ToRegister(instr->left()); 2317 2318 if (instr->right()->IsConstantOperand()) { 2319 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right())); 2320 __ CmpObject(left, right); 2321 } else { 2322 Operand right = ToOperand(instr->right()); 2323 __ cmp(left, right); 2324 } 2325 EmitBranch(instr, equal); 2326 } 2327 2328 2329 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2330 if (instr->hydrogen()->representation().IsTagged()) { 2331 Register input_reg = ToRegister(instr->object()); 2332 __ cmp(input_reg, factory()->the_hole_value()); 2333 EmitBranch(instr, equal); 2334 return; 2335 } 2336 2337 // Put the value to the top of stack 2338 X87Register src = ToX87Register(instr->object()); 2339 X87LoadForUsage(src); 2340 __ fld(0); 2341 __ fld(0); 2342 __ FCmp(); 2343 Label ok; 2344 __ j(parity_even, &ok, Label::kNear); 2345 __ fstp(0); 2346 EmitFalseBranch(instr, no_condition); 2347 __ bind(&ok); 2348 2349 2350 __ sub(esp, Immediate(kDoubleSize)); 2351 __ fstp_d(MemOperand(esp, 0)); 2352 2353 __ add(esp, Immediate(kDoubleSize)); 2354 int offset = sizeof(kHoleNanUpper32); 2355 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); 2356 EmitBranch(instr, equal); 2357 } 2358 2359 2360 Condition LCodeGen::EmitIsString(Register input, 2361 Register temp1, 2362 Label* is_not_string, 2363 SmiCheck check_needed = INLINE_SMI_CHECK) { 2364 if (check_needed == INLINE_SMI_CHECK) { 2365 __ JumpIfSmi(input, is_not_string); 2366 } 2367 2368 Condition cond = masm_->IsObjectStringType(input, temp1, temp1); 2369 2370 return cond; 2371 } 2372 2373 2374 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2375 Register reg = ToRegister(instr->value()); 2376 Register temp = ToRegister(instr->temp()); 2377 2378 SmiCheck check_needed = 2379 instr->hydrogen()->value()->type().IsHeapObject() 2380 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2381 2382 Condition true_cond = EmitIsString( 2383 reg, temp, instr->FalseLabel(chunk_), check_needed); 2384 2385 EmitBranch(instr, true_cond); 2386 } 2387 2388 2389 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2390 Operand input = ToOperand(instr->value()); 2391 2392 __ test(input, Immediate(kSmiTagMask)); 2393 EmitBranch(instr, zero); 2394 } 2395 2396 2397 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2398 Register input = ToRegister(instr->value()); 2399 Register temp = ToRegister(instr->temp()); 2400 2401 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2402 STATIC_ASSERT(kSmiTag == 0); 2403 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2404 } 2405 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); 2406 __ test_b(FieldOperand(temp, Map::kBitFieldOffset), 2407 Immediate(1 << Map::kIsUndetectable)); 2408 EmitBranch(instr, not_zero); 2409 } 2410 2411 2412 static Condition ComputeCompareCondition(Token::Value op) { 2413 switch (op) { 2414 case Token::EQ_STRICT: 2415 case Token::EQ: 2416 return equal; 2417 case Token::LT: 2418 return less; 2419 case Token::GT: 2420 return greater; 2421 case Token::LTE: 2422 return less_equal; 2423 case Token::GTE: 2424 return greater_equal; 2425 default: 2426 UNREACHABLE(); 2427 return no_condition; 2428 } 2429 } 2430 2431 2432 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2433 DCHECK(ToRegister(instr->context()).is(esi)); 2434 DCHECK(ToRegister(instr->left()).is(edx)); 2435 DCHECK(ToRegister(instr->right()).is(eax)); 2436 2437 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code(); 2438 CallCode(code, RelocInfo::CODE_TARGET, instr); 2439 __ CompareRoot(eax, Heap::kTrueValueRootIndex); 2440 EmitBranch(instr, equal); 2441 } 2442 2443 2444 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2445 InstanceType from = instr->from(); 2446 InstanceType to = instr->to(); 2447 if (from == FIRST_TYPE) return to; 2448 DCHECK(from == to || to == LAST_TYPE); 2449 return from; 2450 } 2451 2452 2453 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2454 InstanceType from = instr->from(); 2455 InstanceType to = instr->to(); 2456 if (from == to) return equal; 2457 if (to == LAST_TYPE) return above_equal; 2458 if (from == FIRST_TYPE) return below_equal; 2459 UNREACHABLE(); 2460 return equal; 2461 } 2462 2463 2464 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2465 Register input = ToRegister(instr->value()); 2466 Register temp = ToRegister(instr->temp()); 2467 2468 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2469 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2470 } 2471 2472 __ CmpObjectType(input, TestType(instr->hydrogen()), temp); 2473 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2474 } 2475 2476 // Branches to a label or falls through with the answer in the z flag. Trashes 2477 // the temp registers, but not the input. 2478 void LCodeGen::EmitClassOfTest(Label* is_true, 2479 Label* is_false, 2480 Handle<String>class_name, 2481 Register input, 2482 Register temp, 2483 Register temp2) { 2484 DCHECK(!input.is(temp)); 2485 DCHECK(!input.is(temp2)); 2486 DCHECK(!temp.is(temp2)); 2487 __ JumpIfSmi(input, is_false); 2488 2489 __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp); 2490 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); 2491 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { 2492 __ j(above_equal, is_true); 2493 } else { 2494 __ j(above_equal, is_false); 2495 } 2496 2497 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. 2498 // Check if the constructor in the map is a function. 2499 __ GetMapConstructor(temp, temp, temp2); 2500 // Objects with a non-function constructor have class 'Object'. 2501 __ CmpInstanceType(temp2, JS_FUNCTION_TYPE); 2502 if (String::Equals(class_name, isolate()->factory()->Object_string())) { 2503 __ j(not_equal, is_true); 2504 } else { 2505 __ j(not_equal, is_false); 2506 } 2507 2508 // temp now contains the constructor function. Grab the 2509 // instance class name from there. 2510 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2511 __ mov(temp, FieldOperand(temp, 2512 SharedFunctionInfo::kInstanceClassNameOffset)); 2513 // The class name we are testing against is internalized since it's a literal. 2514 // The name in the constructor is internalized because of the way the context 2515 // is booted. This routine isn't expected to work for random API-created 2516 // classes and it doesn't have to because you can't access it with natives 2517 // syntax. Since both sides are internalized it is sufficient to use an 2518 // identity comparison. 2519 __ cmp(temp, class_name); 2520 // End with the answer in the z flag. 2521 } 2522 2523 2524 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2525 Register input = ToRegister(instr->value()); 2526 Register temp = ToRegister(instr->temp()); 2527 Register temp2 = ToRegister(instr->temp2()); 2528 2529 Handle<String> class_name = instr->hydrogen()->class_name(); 2530 2531 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2532 class_name, input, temp, temp2); 2533 2534 EmitBranch(instr, equal); 2535 } 2536 2537 2538 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2539 Register reg = ToRegister(instr->value()); 2540 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); 2541 EmitBranch(instr, equal); 2542 } 2543 2544 2545 void LCodeGen::DoHasInPrototypeChainAndBranch( 2546 LHasInPrototypeChainAndBranch* instr) { 2547 Register const object = ToRegister(instr->object()); 2548 Register const object_map = ToRegister(instr->scratch()); 2549 Register const object_prototype = object_map; 2550 Register const prototype = ToRegister(instr->prototype()); 2551 2552 // The {object} must be a spec object. It's sufficient to know that {object} 2553 // is not a smi, since all other non-spec objects have {null} prototypes and 2554 // will be ruled out below. 2555 if (instr->hydrogen()->ObjectNeedsSmiCheck()) { 2556 __ test(object, Immediate(kSmiTagMask)); 2557 EmitFalseBranch(instr, zero); 2558 } 2559 2560 // Loop through the {object}s prototype chain looking for the {prototype}. 2561 __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset)); 2562 Label loop; 2563 __ bind(&loop); 2564 2565 // Deoptimize if the object needs to be access checked. 2566 __ test_b(FieldOperand(object_map, Map::kBitFieldOffset), 2567 Immediate(1 << Map::kIsAccessCheckNeeded)); 2568 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck); 2569 // Deoptimize for proxies. 2570 __ CmpInstanceType(object_map, JS_PROXY_TYPE); 2571 DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy); 2572 2573 __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset)); 2574 __ cmp(object_prototype, factory()->null_value()); 2575 EmitFalseBranch(instr, equal); 2576 __ cmp(object_prototype, prototype); 2577 EmitTrueBranch(instr, equal); 2578 __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset)); 2579 __ jmp(&loop); 2580 } 2581 2582 2583 void LCodeGen::DoCmpT(LCmpT* instr) { 2584 Token::Value op = instr->op(); 2585 2586 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2587 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2588 2589 Condition condition = ComputeCompareCondition(op); 2590 Label true_value, done; 2591 __ test(eax, Operand(eax)); 2592 __ j(condition, &true_value, Label::kNear); 2593 __ mov(ToRegister(instr->result()), factory()->false_value()); 2594 __ jmp(&done, Label::kNear); 2595 __ bind(&true_value); 2596 __ mov(ToRegister(instr->result()), factory()->true_value()); 2597 __ bind(&done); 2598 } 2599 2600 void LCodeGen::EmitReturn(LReturn* instr) { 2601 int extra_value_count = 1; 2602 2603 if (instr->has_constant_parameter_count()) { 2604 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2605 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); 2606 } else { 2607 DCHECK(info()->IsStub()); // Functions would need to drop one more value. 2608 Register reg = ToRegister(instr->parameter_count()); 2609 // The argument count parameter is a smi 2610 __ SmiUntag(reg); 2611 Register return_addr_reg = reg.is(ecx) ? ebx : ecx; 2612 2613 // emit code to restore stack based on instr->parameter_count() 2614 __ pop(return_addr_reg); // save return address 2615 __ shl(reg, kPointerSizeLog2); 2616 __ add(esp, reg); 2617 __ jmp(return_addr_reg); 2618 } 2619 } 2620 2621 2622 void LCodeGen::DoReturn(LReturn* instr) { 2623 if (FLAG_trace && info()->IsOptimizing()) { 2624 // Preserve the return value on the stack and rely on the runtime call 2625 // to return the value in the same register. We're leaving the code 2626 // managed by the register allocator and tearing down the frame, it's 2627 // safe to write to the context register. 2628 __ push(eax); 2629 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2630 __ CallRuntime(Runtime::kTraceExit); 2631 } 2632 if (NeedsEagerFrame()) { 2633 __ mov(esp, ebp); 2634 __ pop(ebp); 2635 } 2636 2637 EmitReturn(instr); 2638 } 2639 2640 2641 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2642 Register context = ToRegister(instr->context()); 2643 Register result = ToRegister(instr->result()); 2644 __ mov(result, ContextOperand(context, instr->slot_index())); 2645 2646 if (instr->hydrogen()->RequiresHoleCheck()) { 2647 __ cmp(result, factory()->the_hole_value()); 2648 if (instr->hydrogen()->DeoptimizesOnHole()) { 2649 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); 2650 } else { 2651 Label is_not_hole; 2652 __ j(not_equal, &is_not_hole, Label::kNear); 2653 __ mov(result, factory()->undefined_value()); 2654 __ bind(&is_not_hole); 2655 } 2656 } 2657 } 2658 2659 2660 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2661 Register context = ToRegister(instr->context()); 2662 Register value = ToRegister(instr->value()); 2663 2664 Label skip_assignment; 2665 2666 Operand target = ContextOperand(context, instr->slot_index()); 2667 if (instr->hydrogen()->RequiresHoleCheck()) { 2668 __ cmp(target, factory()->the_hole_value()); 2669 if (instr->hydrogen()->DeoptimizesOnHole()) { 2670 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); 2671 } else { 2672 __ j(not_equal, &skip_assignment, Label::kNear); 2673 } 2674 } 2675 2676 __ mov(target, value); 2677 if (instr->hydrogen()->NeedsWriteBarrier()) { 2678 SmiCheck check_needed = 2679 instr->hydrogen()->value()->type().IsHeapObject() 2680 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2681 Register temp = ToRegister(instr->temp()); 2682 int offset = Context::SlotOffset(instr->slot_index()); 2683 __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs, 2684 EMIT_REMEMBERED_SET, check_needed); 2685 } 2686 2687 __ bind(&skip_assignment); 2688 } 2689 2690 2691 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2692 HObjectAccess access = instr->hydrogen()->access(); 2693 int offset = access.offset(); 2694 2695 if (access.IsExternalMemory()) { 2696 Register result = ToRegister(instr->result()); 2697 MemOperand operand = instr->object()->IsConstantOperand() 2698 ? MemOperand::StaticVariable(ToExternalReference( 2699 LConstantOperand::cast(instr->object()))) 2700 : MemOperand(ToRegister(instr->object()), offset); 2701 __ Load(result, operand, access.representation()); 2702 return; 2703 } 2704 2705 Register object = ToRegister(instr->object()); 2706 if (instr->hydrogen()->representation().IsDouble()) { 2707 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); 2708 return; 2709 } 2710 2711 Register result = ToRegister(instr->result()); 2712 if (!access.IsInobject()) { 2713 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2714 object = result; 2715 } 2716 __ Load(result, FieldOperand(object, offset), access.representation()); 2717 } 2718 2719 2720 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { 2721 DCHECK(!operand->IsDoubleRegister()); 2722 if (operand->IsConstantOperand()) { 2723 Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); 2724 AllowDeferredHandleDereference smi_check; 2725 if (object->IsSmi()) { 2726 __ Push(Handle<Smi>::cast(object)); 2727 } else { 2728 __ PushHeapObject(Handle<HeapObject>::cast(object)); 2729 } 2730 } else if (operand->IsRegister()) { 2731 __ push(ToRegister(operand)); 2732 } else { 2733 __ push(ToOperand(operand)); 2734 } 2735 } 2736 2737 2738 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 2739 Register function = ToRegister(instr->function()); 2740 Register temp = ToRegister(instr->temp()); 2741 Register result = ToRegister(instr->result()); 2742 2743 // Get the prototype or initial map from the function. 2744 __ mov(result, 2745 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2746 2747 // Check that the function has a prototype or an initial map. 2748 __ cmp(Operand(result), Immediate(factory()->the_hole_value())); 2749 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); 2750 2751 // If the function does not have an initial map, we're done. 2752 Label done; 2753 __ CmpObjectType(result, MAP_TYPE, temp); 2754 __ j(not_equal, &done, Label::kNear); 2755 2756 // Get the prototype from the initial map. 2757 __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); 2758 2759 // All done. 2760 __ bind(&done); 2761 } 2762 2763 2764 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 2765 Register result = ToRegister(instr->result()); 2766 __ LoadRoot(result, instr->index()); 2767 } 2768 2769 2770 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 2771 Register arguments = ToRegister(instr->arguments()); 2772 Register result = ToRegister(instr->result()); 2773 if (instr->length()->IsConstantOperand() && 2774 instr->index()->IsConstantOperand()) { 2775 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 2776 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 2777 int index = (const_length - const_index) + 1; 2778 __ mov(result, Operand(arguments, index * kPointerSize)); 2779 } else { 2780 Register length = ToRegister(instr->length()); 2781 Operand index = ToOperand(instr->index()); 2782 // There are two words between the frame pointer and the last argument. 2783 // Subtracting from length accounts for one of them add one more. 2784 __ sub(length, index); 2785 __ mov(result, Operand(arguments, length, times_4, kPointerSize)); 2786 } 2787 } 2788 2789 2790 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 2791 ElementsKind elements_kind = instr->elements_kind(); 2792 LOperand* key = instr->key(); 2793 if (!key->IsConstantOperand() && 2794 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), 2795 elements_kind)) { 2796 __ SmiUntag(ToRegister(key)); 2797 } 2798 Operand operand(BuildFastArrayOperand( 2799 instr->elements(), 2800 key, 2801 instr->hydrogen()->key()->representation(), 2802 elements_kind, 2803 instr->base_offset())); 2804 if (elements_kind == FLOAT32_ELEMENTS) { 2805 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); 2806 } else if (elements_kind == FLOAT64_ELEMENTS) { 2807 X87Mov(ToX87Register(instr->result()), operand); 2808 } else { 2809 Register result(ToRegister(instr->result())); 2810 switch (elements_kind) { 2811 case INT8_ELEMENTS: 2812 __ movsx_b(result, operand); 2813 break; 2814 case UINT8_ELEMENTS: 2815 case UINT8_CLAMPED_ELEMENTS: 2816 __ movzx_b(result, operand); 2817 break; 2818 case INT16_ELEMENTS: 2819 __ movsx_w(result, operand); 2820 break; 2821 case UINT16_ELEMENTS: 2822 __ movzx_w(result, operand); 2823 break; 2824 case INT32_ELEMENTS: 2825 __ mov(result, operand); 2826 break; 2827 case UINT32_ELEMENTS: 2828 __ mov(result, operand); 2829 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 2830 __ test(result, Operand(result)); 2831 DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue); 2832 } 2833 break; 2834 case FLOAT32_ELEMENTS: 2835 case FLOAT64_ELEMENTS: 2836 case FAST_SMI_ELEMENTS: 2837 case FAST_ELEMENTS: 2838 case FAST_DOUBLE_ELEMENTS: 2839 case FAST_HOLEY_SMI_ELEMENTS: 2840 case FAST_HOLEY_ELEMENTS: 2841 case FAST_HOLEY_DOUBLE_ELEMENTS: 2842 case DICTIONARY_ELEMENTS: 2843 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 2844 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 2845 case FAST_STRING_WRAPPER_ELEMENTS: 2846 case SLOW_STRING_WRAPPER_ELEMENTS: 2847 case NO_ELEMENTS: 2848 UNREACHABLE(); 2849 break; 2850 } 2851 } 2852 } 2853 2854 2855 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 2856 if (instr->hydrogen()->RequiresHoleCheck()) { 2857 Operand hole_check_operand = BuildFastArrayOperand( 2858 instr->elements(), instr->key(), 2859 instr->hydrogen()->key()->representation(), 2860 FAST_DOUBLE_ELEMENTS, 2861 instr->base_offset() + sizeof(kHoleNanLower32)); 2862 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); 2863 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); 2864 } 2865 2866 Operand double_load_operand = BuildFastArrayOperand( 2867 instr->elements(), 2868 instr->key(), 2869 instr->hydrogen()->key()->representation(), 2870 FAST_DOUBLE_ELEMENTS, 2871 instr->base_offset()); 2872 X87Mov(ToX87Register(instr->result()), double_load_operand); 2873 } 2874 2875 2876 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 2877 Register result = ToRegister(instr->result()); 2878 2879 // Load the result. 2880 __ mov(result, 2881 BuildFastArrayOperand(instr->elements(), instr->key(), 2882 instr->hydrogen()->key()->representation(), 2883 FAST_ELEMENTS, instr->base_offset())); 2884 2885 // Check for the hole value. 2886 if (instr->hydrogen()->RequiresHoleCheck()) { 2887 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 2888 __ test(result, Immediate(kSmiTagMask)); 2889 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi); 2890 } else { 2891 __ cmp(result, factory()->the_hole_value()); 2892 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole); 2893 } 2894 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { 2895 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); 2896 Label done; 2897 __ cmp(result, factory()->the_hole_value()); 2898 __ j(not_equal, &done); 2899 if (info()->IsStub()) { 2900 // A stub can safely convert the hole to undefined only if the array 2901 // protector cell contains (Smi) Isolate::kProtectorValid. 2902 // Otherwise it needs to bail out. 2903 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); 2904 __ cmp(FieldOperand(result, PropertyCell::kValueOffset), 2905 Immediate(Smi::FromInt(Isolate::kProtectorValid))); 2906 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole); 2907 } 2908 __ mov(result, isolate()->factory()->undefined_value()); 2909 __ bind(&done); 2910 } 2911 } 2912 2913 2914 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 2915 if (instr->is_fixed_typed_array()) { 2916 DoLoadKeyedExternalArray(instr); 2917 } else if (instr->hydrogen()->representation().IsDouble()) { 2918 DoLoadKeyedFixedDoubleArray(instr); 2919 } else { 2920 DoLoadKeyedFixedArray(instr); 2921 } 2922 } 2923 2924 2925 Operand LCodeGen::BuildFastArrayOperand( 2926 LOperand* elements_pointer, 2927 LOperand* key, 2928 Representation key_representation, 2929 ElementsKind elements_kind, 2930 uint32_t base_offset) { 2931 Register elements_pointer_reg = ToRegister(elements_pointer); 2932 int element_shift_size = ElementsKindToShiftSize(elements_kind); 2933 int shift_size = element_shift_size; 2934 if (key->IsConstantOperand()) { 2935 int constant_value = ToInteger32(LConstantOperand::cast(key)); 2936 if (constant_value & 0xF0000000) { 2937 Abort(kArrayIndexConstantValueTooBig); 2938 } 2939 return Operand(elements_pointer_reg, 2940 ((constant_value) << shift_size) 2941 + base_offset); 2942 } else { 2943 // Take the tag bit into account while computing the shift size. 2944 if (key_representation.IsSmi() && (shift_size >= 1)) { 2945 shift_size -= kSmiTagSize; 2946 } 2947 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); 2948 return Operand(elements_pointer_reg, 2949 ToRegister(key), 2950 scale_factor, 2951 base_offset); 2952 } 2953 } 2954 2955 2956 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 2957 Register result = ToRegister(instr->result()); 2958 2959 if (instr->hydrogen()->from_inlined()) { 2960 __ lea(result, Operand(esp, -2 * kPointerSize)); 2961 } else if (instr->hydrogen()->arguments_adaptor()) { 2962 // Check for arguments adapter frame. 2963 Label done, adapted; 2964 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2965 __ mov(result, 2966 Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset)); 2967 __ cmp(Operand(result), 2968 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 2969 __ j(equal, &adapted, Label::kNear); 2970 2971 // No arguments adaptor frame. 2972 __ mov(result, Operand(ebp)); 2973 __ jmp(&done, Label::kNear); 2974 2975 // Arguments adaptor frame present. 2976 __ bind(&adapted); 2977 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2978 2979 // Result is the frame pointer for the frame if not adapted and for the real 2980 // frame below the adaptor frame if adapted. 2981 __ bind(&done); 2982 } else { 2983 __ mov(result, Operand(ebp)); 2984 } 2985 } 2986 2987 2988 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 2989 Operand elem = ToOperand(instr->elements()); 2990 Register result = ToRegister(instr->result()); 2991 2992 Label done; 2993 2994 // If no arguments adaptor frame the number of arguments is fixed. 2995 __ cmp(ebp, elem); 2996 __ mov(result, Immediate(scope()->num_parameters())); 2997 __ j(equal, &done, Label::kNear); 2998 2999 // Arguments adaptor frame present. Get argument length from there. 3000 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3001 __ mov(result, Operand(result, 3002 ArgumentsAdaptorFrameConstants::kLengthOffset)); 3003 __ SmiUntag(result); 3004 3005 // Argument length is in result register. 3006 __ bind(&done); 3007 } 3008 3009 3010 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3011 Register receiver = ToRegister(instr->receiver()); 3012 Register function = ToRegister(instr->function()); 3013 3014 // If the receiver is null or undefined, we have to pass the global 3015 // object as a receiver to normal functions. Values have to be 3016 // passed unchanged to builtins and strict-mode functions. 3017 Label receiver_ok, global_object; 3018 Label::Distance dist; 3019 3020 // For x87 debug version jitted code's size exceeds 128 bytes whether 3021 // FLAG_deopt_every_n_times 3022 // is set or not. Always use Label:kFar for label distance for debug mode. 3023 if (FLAG_debug_code) 3024 dist = Label::kFar; 3025 else 3026 dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 3027 3028 Register scratch = ToRegister(instr->temp()); 3029 3030 if (!instr->hydrogen()->known_function()) { 3031 // Do not transform the receiver to object for strict mode 3032 // functions. 3033 __ mov(scratch, 3034 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3035 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset), 3036 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); 3037 __ j(not_equal, &receiver_ok, dist); 3038 3039 // Do not transform the receiver to object for builtins. 3040 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset), 3041 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); 3042 __ j(not_equal, &receiver_ok, dist); 3043 } 3044 3045 // Normal function. Replace undefined or null with global receiver. 3046 __ cmp(receiver, factory()->null_value()); 3047 __ j(equal, &global_object, dist); 3048 __ cmp(receiver, factory()->undefined_value()); 3049 __ j(equal, &global_object, dist); 3050 3051 // The receiver should be a JS object. 3052 __ test(receiver, Immediate(kSmiTagMask)); 3053 DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi); 3054 __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch); 3055 DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject); 3056 3057 __ jmp(&receiver_ok, dist); 3058 __ bind(&global_object); 3059 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); 3060 __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX)); 3061 __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX)); 3062 __ bind(&receiver_ok); 3063 } 3064 3065 3066 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3067 Register receiver = ToRegister(instr->receiver()); 3068 Register function = ToRegister(instr->function()); 3069 Register length = ToRegister(instr->length()); 3070 Register elements = ToRegister(instr->elements()); 3071 DCHECK(receiver.is(eax)); // Used for parameter count. 3072 DCHECK(function.is(edi)); // Required by InvokeFunction. 3073 DCHECK(ToRegister(instr->result()).is(eax)); 3074 3075 // Copy the arguments to this function possibly from the 3076 // adaptor frame below it. 3077 const uint32_t kArgumentsLimit = 1 * KB; 3078 __ cmp(length, kArgumentsLimit); 3079 DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments); 3080 3081 __ push(receiver); 3082 __ mov(receiver, length); 3083 3084 // Loop through the arguments pushing them onto the execution 3085 // stack. 3086 Label invoke, loop; 3087 // length is a small non-negative integer, due to the test above. 3088 __ test(length, Operand(length)); 3089 __ j(zero, &invoke, Label::kNear); 3090 __ bind(&loop); 3091 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); 3092 __ dec(length); 3093 __ j(not_zero, &loop); 3094 3095 // Invoke the function. 3096 __ bind(&invoke); 3097 3098 InvokeFlag flag = CALL_FUNCTION; 3099 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { 3100 DCHECK(!info()->saves_caller_doubles()); 3101 // TODO(ishell): drop current frame before pushing arguments to the stack. 3102 flag = JUMP_FUNCTION; 3103 ParameterCount actual(eax); 3104 // It is safe to use ebx, ecx and edx as scratch registers here given that 3105 // 1) we are not going to return to caller function anyway, 3106 // 2) ebx (expected arguments count) and edx (new.target) will be 3107 // initialized below. 3108 PrepareForTailCall(actual, ebx, ecx, edx); 3109 } 3110 3111 DCHECK(instr->HasPointerMap()); 3112 LPointerMap* pointers = instr->pointer_map(); 3113 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); 3114 ParameterCount actual(eax); 3115 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); 3116 } 3117 3118 3119 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 3120 __ int3(); 3121 } 3122 3123 3124 void LCodeGen::DoPushArgument(LPushArgument* instr) { 3125 LOperand* argument = instr->value(); 3126 EmitPushTaggedOperand(argument); 3127 } 3128 3129 3130 void LCodeGen::DoDrop(LDrop* instr) { 3131 __ Drop(instr->count()); 3132 } 3133 3134 3135 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3136 Register result = ToRegister(instr->result()); 3137 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 3138 } 3139 3140 3141 void LCodeGen::DoContext(LContext* instr) { 3142 Register result = ToRegister(instr->result()); 3143 if (info()->IsOptimizing()) { 3144 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); 3145 } else { 3146 // If there is no frame, the context must be in esi. 3147 DCHECK(result.is(esi)); 3148 } 3149 } 3150 3151 3152 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3153 DCHECK(ToRegister(instr->context()).is(esi)); 3154 __ push(Immediate(instr->hydrogen()->declarations())); 3155 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); 3156 __ push(Immediate(instr->hydrogen()->feedback_vector())); 3157 CallRuntime(Runtime::kDeclareGlobals, instr); 3158 } 3159 3160 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3161 int formal_parameter_count, int arity, 3162 bool is_tail_call, LInstruction* instr) { 3163 bool dont_adapt_arguments = 3164 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3165 bool can_invoke_directly = 3166 dont_adapt_arguments || formal_parameter_count == arity; 3167 3168 Register function_reg = edi; 3169 3170 if (can_invoke_directly) { 3171 // Change context. 3172 __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset)); 3173 3174 // Always initialize new target and number of actual arguments. 3175 __ mov(edx, factory()->undefined_value()); 3176 __ mov(eax, arity); 3177 3178 bool is_self_call = function.is_identical_to(info()->closure()); 3179 3180 // Invoke function directly. 3181 if (is_self_call) { 3182 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location())); 3183 if (is_tail_call) { 3184 __ Jump(self, RelocInfo::CODE_TARGET); 3185 } else { 3186 __ Call(self, RelocInfo::CODE_TARGET); 3187 } 3188 } else { 3189 Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset); 3190 if (is_tail_call) { 3191 __ jmp(target); 3192 } else { 3193 __ call(target); 3194 } 3195 } 3196 3197 if (!is_tail_call) { 3198 // Set up deoptimization. 3199 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3200 } 3201 } else { 3202 // We need to adapt arguments. 3203 LPointerMap* pointers = instr->pointer_map(); 3204 SafepointGenerator generator( 3205 this, pointers, Safepoint::kLazyDeopt); 3206 ParameterCount actual(arity); 3207 ParameterCount expected(formal_parameter_count); 3208 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; 3209 __ InvokeFunction(function_reg, expected, actual, flag, generator); 3210 } 3211 } 3212 3213 3214 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 3215 DCHECK(ToRegister(instr->result()).is(eax)); 3216 3217 if (instr->hydrogen()->IsTailCall()) { 3218 if (NeedsEagerFrame()) __ leave(); 3219 3220 if (instr->target()->IsConstantOperand()) { 3221 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3222 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3223 __ jmp(code, RelocInfo::CODE_TARGET); 3224 } else { 3225 DCHECK(instr->target()->IsRegister()); 3226 Register target = ToRegister(instr->target()); 3227 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); 3228 __ jmp(target); 3229 } 3230 } else { 3231 LPointerMap* pointers = instr->pointer_map(); 3232 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3233 3234 if (instr->target()->IsConstantOperand()) { 3235 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3236 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3237 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 3238 __ call(code, RelocInfo::CODE_TARGET); 3239 } else { 3240 DCHECK(instr->target()->IsRegister()); 3241 Register target = ToRegister(instr->target()); 3242 generator.BeforeCall(__ CallSize(Operand(target))); 3243 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); 3244 __ call(target); 3245 } 3246 generator.AfterCall(); 3247 } 3248 } 3249 3250 3251 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3252 Register input_reg = ToRegister(instr->value()); 3253 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 3254 factory()->heap_number_map()); 3255 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); 3256 3257 Label slow, allocated, done; 3258 uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit(); 3259 available_regs &= ~input_reg.bit(); 3260 if (instr->context()->IsRegister()) { 3261 // Make sure that the context isn't overwritten in the AllocateHeapNumber 3262 // macro below. 3263 available_regs &= ~ToRegister(instr->context()).bit(); 3264 } 3265 3266 Register tmp = 3267 Register::from_code(base::bits::CountTrailingZeros32(available_regs)); 3268 available_regs &= ~tmp.bit(); 3269 Register tmp2 = 3270 Register::from_code(base::bits::CountTrailingZeros32(available_regs)); 3271 3272 // Preserve the value of all registers. 3273 PushSafepointRegistersScope scope(this); 3274 3275 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 3276 // Check the sign of the argument. If the argument is positive, just 3277 // return it. We do not need to patch the stack since |input| and 3278 // |result| are the same register and |input| will be restored 3279 // unchanged by popping safepoint registers. 3280 __ test(tmp, Immediate(HeapNumber::kSignMask)); 3281 __ j(zero, &done, Label::kNear); 3282 3283 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); 3284 __ jmp(&allocated, Label::kNear); 3285 3286 // Slow case: Call the runtime system to do the number allocation. 3287 __ bind(&slow); 3288 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, 3289 instr, instr->context()); 3290 // Set the pointer to the new heap number in tmp. 3291 if (!tmp.is(eax)) __ mov(tmp, eax); 3292 // Restore input_reg after call to runtime. 3293 __ LoadFromSafepointRegisterSlot(input_reg, input_reg); 3294 3295 __ bind(&allocated); 3296 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 3297 __ and_(tmp2, ~HeapNumber::kSignMask); 3298 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); 3299 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); 3300 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); 3301 __ StoreToSafepointRegisterSlot(input_reg, tmp); 3302 3303 __ bind(&done); 3304 } 3305 3306 3307 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3308 Register input_reg = ToRegister(instr->value()); 3309 __ test(input_reg, Operand(input_reg)); 3310 Label is_positive; 3311 __ j(not_sign, &is_positive, Label::kNear); 3312 __ neg(input_reg); // Sets flags. 3313 DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow); 3314 __ bind(&is_positive); 3315 } 3316 3317 3318 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3319 // Class for deferred case. 3320 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { 3321 public: 3322 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, 3323 LMathAbs* instr, 3324 const X87Stack& x87_stack) 3325 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 3326 void Generate() override { 3327 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3328 } 3329 LInstruction* instr() override { return instr_; } 3330 3331 private: 3332 LMathAbs* instr_; 3333 }; 3334 3335 DCHECK(instr->value()->Equals(instr->result())); 3336 Representation r = instr->hydrogen()->value()->representation(); 3337 3338 if (r.IsDouble()) { 3339 X87Register value = ToX87Register(instr->value()); 3340 X87Fxch(value); 3341 __ fabs(); 3342 } else if (r.IsSmiOrInteger32()) { 3343 EmitIntegerMathAbs(instr); 3344 } else { // Tagged case. 3345 DeferredMathAbsTaggedHeapNumber* deferred = 3346 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); 3347 Register input_reg = ToRegister(instr->value()); 3348 // Smi check. 3349 __ JumpIfNotSmi(input_reg, deferred->entry()); 3350 EmitIntegerMathAbs(instr); 3351 __ bind(deferred->exit()); 3352 } 3353 } 3354 3355 3356 void LCodeGen::DoMathFloor(LMathFloor* instr) { 3357 Register output_reg = ToRegister(instr->result()); 3358 X87Register input_reg = ToX87Register(instr->value()); 3359 X87Fxch(input_reg); 3360 3361 Label not_minus_zero, done; 3362 // Deoptimize on unordered. 3363 __ fldz(); 3364 __ fld(1); 3365 __ FCmp(); 3366 DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN); 3367 __ j(below, ¬_minus_zero, Label::kNear); 3368 3369 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3370 // Check for negative zero. 3371 __ j(not_equal, ¬_minus_zero, Label::kNear); 3372 // +- 0.0. 3373 __ fld(0); 3374 __ FXamSign(); 3375 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); 3376 __ Move(output_reg, Immediate(0)); 3377 __ jmp(&done, Label::kFar); 3378 } 3379 3380 // Positive input. 3381 // rc=01B, round down. 3382 __ bind(¬_minus_zero); 3383 __ fnclex(); 3384 __ X87SetRC(0x0400); 3385 __ sub(esp, Immediate(kPointerSize)); 3386 __ fist_s(Operand(esp, 0)); 3387 __ pop(output_reg); 3388 __ X87SetRC(0x0000); 3389 __ X87CheckIA(); 3390 DeoptimizeIf(equal, instr, DeoptimizeReason::kOverflow); 3391 __ fnclex(); 3392 __ X87SetRC(0x0000); 3393 __ bind(&done); 3394 } 3395 3396 3397 void LCodeGen::DoMathRound(LMathRound* instr) { 3398 X87Register input_reg = ToX87Register(instr->value()); 3399 Register result = ToRegister(instr->result()); 3400 X87Fxch(input_reg); 3401 Label below_one_half, below_minus_one_half, done; 3402 3403 ExternalReference one_half = ExternalReference::address_of_one_half(); 3404 ExternalReference minus_one_half = 3405 ExternalReference::address_of_minus_one_half(); 3406 3407 __ fld_d(Operand::StaticVariable(one_half)); 3408 __ fld(1); 3409 __ FCmp(); 3410 __ j(carry, &below_one_half); 3411 3412 // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x) 3413 __ fld(0); 3414 __ fadd_d(Operand::StaticVariable(one_half)); 3415 // rc=11B, round toward zero. 3416 __ X87SetRC(0x0c00); 3417 __ sub(esp, Immediate(kPointerSize)); 3418 // Clear exception bits. 3419 __ fnclex(); 3420 __ fistp_s(MemOperand(esp, 0)); 3421 // Restore round mode. 3422 __ X87SetRC(0x0000); 3423 // Check overflow. 3424 __ X87CheckIA(); 3425 __ pop(result); 3426 DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow); 3427 __ fnclex(); 3428 // Restore round mode. 3429 __ X87SetRC(0x0000); 3430 __ jmp(&done); 3431 3432 __ bind(&below_one_half); 3433 __ fld_d(Operand::StaticVariable(minus_one_half)); 3434 __ fld(1); 3435 __ FCmp(); 3436 __ j(carry, &below_minus_one_half); 3437 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if 3438 // we can ignore the difference between a result of -0 and +0. 3439 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3440 // If the sign is positive, we return +0. 3441 __ fld(0); 3442 __ FXamSign(); 3443 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); 3444 } 3445 __ Move(result, Immediate(0)); 3446 __ jmp(&done); 3447 3448 __ bind(&below_minus_one_half); 3449 __ fld(0); 3450 __ fadd_d(Operand::StaticVariable(one_half)); 3451 // rc=01B, round down. 3452 __ X87SetRC(0x0400); 3453 __ sub(esp, Immediate(kPointerSize)); 3454 // Clear exception bits. 3455 __ fnclex(); 3456 __ fistp_s(MemOperand(esp, 0)); 3457 // Restore round mode. 3458 __ X87SetRC(0x0000); 3459 // Check overflow. 3460 __ X87CheckIA(); 3461 __ pop(result); 3462 DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow); 3463 __ fnclex(); 3464 // Restore round mode. 3465 __ X87SetRC(0x0000); 3466 3467 __ bind(&done); 3468 } 3469 3470 3471 void LCodeGen::DoMathFround(LMathFround* instr) { 3472 X87Register input_reg = ToX87Register(instr->value()); 3473 X87Fxch(input_reg); 3474 __ sub(esp, Immediate(kPointerSize)); 3475 __ fstp_s(MemOperand(esp, 0)); 3476 X87Fld(MemOperand(esp, 0), kX87FloatOperand); 3477 __ add(esp, Immediate(kPointerSize)); 3478 } 3479 3480 3481 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3482 X87Register input_reg = ToX87Register(instr->value()); 3483 __ X87SetFPUCW(0x027F); 3484 X87Fxch(input_reg); 3485 __ fsqrt(); 3486 __ X87SetFPUCW(0x037F); 3487 } 3488 3489 3490 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3491 X87Register input_reg = ToX87Register(instr->value()); 3492 DCHECK(ToX87Register(instr->result()).is(input_reg)); 3493 X87Fxch(input_reg); 3494 // Note that according to ECMA-262 15.8.2.13: 3495 // Math.pow(-Infinity, 0.5) == Infinity 3496 // Math.sqrt(-Infinity) == NaN 3497 Label done, sqrt; 3498 // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1 3499 __ fxam(); 3500 __ push(eax); 3501 __ fnstsw_ax(); 3502 __ and_(eax, Immediate(0x4700)); 3503 __ cmp(eax, Immediate(0x0700)); 3504 __ j(not_equal, &sqrt, Label::kNear); 3505 // If input is -Infinity, return Infinity. 3506 __ fchs(); 3507 __ jmp(&done, Label::kNear); 3508 3509 // Square root. 3510 __ bind(&sqrt); 3511 __ fldz(); 3512 __ faddp(); // Convert -0 to +0. 3513 __ fsqrt(); 3514 __ bind(&done); 3515 __ pop(eax); 3516 } 3517 3518 3519 void LCodeGen::DoPower(LPower* instr) { 3520 Representation exponent_type = instr->hydrogen()->right()->representation(); 3521 X87Register result = ToX87Register(instr->result()); 3522 // Having marked this as a call, we can use any registers. 3523 X87Register base = ToX87Register(instr->left()); 3524 ExternalReference one_half = ExternalReference::address_of_one_half(); 3525 3526 if (exponent_type.IsSmi()) { 3527 Register exponent = ToRegister(instr->right()); 3528 X87LoadForUsage(base); 3529 __ SmiUntag(exponent); 3530 __ push(exponent); 3531 __ fild_s(MemOperand(esp, 0)); 3532 __ pop(exponent); 3533 } else if (exponent_type.IsTagged()) { 3534 Register exponent = ToRegister(instr->right()); 3535 Register temp = exponent.is(ecx) ? eax : ecx; 3536 Label no_deopt, done; 3537 X87LoadForUsage(base); 3538 __ JumpIfSmi(exponent, &no_deopt); 3539 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp); 3540 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); 3541 // Heap number(double) 3542 __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset)); 3543 __ jmp(&done); 3544 // SMI 3545 __ bind(&no_deopt); 3546 __ SmiUntag(exponent); 3547 __ push(exponent); 3548 __ fild_s(MemOperand(esp, 0)); 3549 __ pop(exponent); 3550 __ bind(&done); 3551 } else if (exponent_type.IsInteger32()) { 3552 Register exponent = ToRegister(instr->right()); 3553 X87LoadForUsage(base); 3554 __ push(exponent); 3555 __ fild_s(MemOperand(esp, 0)); 3556 __ pop(exponent); 3557 } else { 3558 DCHECK(exponent_type.IsDouble()); 3559 X87Register exponent_double = ToX87Register(instr->right()); 3560 X87LoadForUsage(base, exponent_double); 3561 } 3562 3563 // FP data stack {base, exponent(TOS)}. 3564 // Handle (exponent==+-0.5 && base == -0). 3565 Label not_plus_0; 3566 __ fld(0); 3567 __ fabs(); 3568 X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand); 3569 __ FCmp(); 3570 __ j(parity_even, ¬_plus_0, Label::kNear); // NaN. 3571 __ j(not_equal, ¬_plus_0, Label::kNear); 3572 __ fldz(); 3573 // FP data stack {base, exponent(TOS), zero}. 3574 __ faddp(2); 3575 __ bind(¬_plus_0); 3576 3577 { 3578 __ PrepareCallCFunction(4, eax); 3579 __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value. 3580 __ fstp_d(MemOperand(esp, 0)); // Base value. 3581 X87PrepareToWrite(result); 3582 __ CallCFunction(ExternalReference::power_double_double_function(isolate()), 3583 4); 3584 // Return value is in st(0) on ia32. 3585 X87CommitWrite(result); 3586 } 3587 } 3588 3589 3590 void LCodeGen::DoMathLog(LMathLog* instr) { 3591 DCHECK(instr->value()->Equals(instr->result())); 3592 X87Register result = ToX87Register(instr->result()); 3593 X87Register input_reg = ToX87Register(instr->value()); 3594 X87Fxch(input_reg); 3595 3596 // Pass one double as argument on the stack. 3597 __ PrepareCallCFunction(2, eax); 3598 __ fstp_d(MemOperand(esp, 0)); 3599 X87PrepareToWrite(result); 3600 __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2); 3601 // Return value is in st(0) on ia32. 3602 X87CommitWrite(result); 3603 } 3604 3605 3606 void LCodeGen::DoMathClz32(LMathClz32* instr) { 3607 Register input = ToRegister(instr->value()); 3608 Register result = ToRegister(instr->result()); 3609 3610 __ Lzcnt(result, input); 3611 } 3612 3613 void LCodeGen::DoMathCos(LMathCos* instr) { 3614 X87Register result = ToX87Register(instr->result()); 3615 X87Register input_reg = ToX87Register(instr->value()); 3616 __ fld(x87_stack_.st(input_reg)); 3617 3618 // Pass one double as argument on the stack. 3619 __ PrepareCallCFunction(2, eax); 3620 __ fstp_d(MemOperand(esp, 0)); 3621 X87PrepareToWrite(result); 3622 __ X87SetFPUCW(0x027F); 3623 __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2); 3624 __ X87SetFPUCW(0x037F); 3625 // Return value is in st(0) on ia32. 3626 X87CommitWrite(result); 3627 } 3628 3629 void LCodeGen::DoMathSin(LMathSin* instr) { 3630 X87Register result = ToX87Register(instr->result()); 3631 X87Register input_reg = ToX87Register(instr->value()); 3632 __ fld(x87_stack_.st(input_reg)); 3633 3634 // Pass one double as argument on the stack. 3635 __ PrepareCallCFunction(2, eax); 3636 __ fstp_d(MemOperand(esp, 0)); 3637 X87PrepareToWrite(result); 3638 __ X87SetFPUCW(0x027F); 3639 __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2); 3640 __ X87SetFPUCW(0x037F); 3641 // Return value is in st(0) on ia32. 3642 X87CommitWrite(result); 3643 } 3644 3645 void LCodeGen::DoMathExp(LMathExp* instr) { 3646 X87Register result = ToX87Register(instr->result()); 3647 X87Register input_reg = ToX87Register(instr->value()); 3648 __ fld(x87_stack_.st(input_reg)); 3649 3650 // Pass one double as argument on the stack. 3651 __ PrepareCallCFunction(2, eax); 3652 __ fstp_d(MemOperand(esp, 0)); 3653 X87PrepareToWrite(result); 3654 __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2); 3655 // Return value is in st(0) on ia32. 3656 X87CommitWrite(result); 3657 } 3658 3659 void LCodeGen::PrepareForTailCall(const ParameterCount& actual, 3660 Register scratch1, Register scratch2, 3661 Register scratch3) { 3662 #if DEBUG 3663 if (actual.is_reg()) { 3664 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); 3665 } else { 3666 DCHECK(!AreAliased(scratch1, scratch2, scratch3)); 3667 } 3668 #endif 3669 if (FLAG_code_comments) { 3670 if (actual.is_reg()) { 3671 Comment(";;; PrepareForTailCall, actual: %s {", 3672 RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( 3673 actual.reg().code())); 3674 } else { 3675 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); 3676 } 3677 } 3678 3679 // Check if next frame is an arguments adaptor frame. 3680 Register caller_args_count_reg = scratch1; 3681 Label no_arguments_adaptor, formal_parameter_count_loaded; 3682 __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3683 __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset), 3684 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3685 __ j(not_equal, &no_arguments_adaptor, Label::kNear); 3686 3687 // Drop current frame and load arguments count from arguments adaptor frame. 3688 __ mov(ebp, scratch2); 3689 __ mov(caller_args_count_reg, 3690 Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3691 __ SmiUntag(caller_args_count_reg); 3692 __ jmp(&formal_parameter_count_loaded, Label::kNear); 3693 3694 __ bind(&no_arguments_adaptor); 3695 // Load caller's formal parameter count. 3696 __ mov(caller_args_count_reg, 3697 Immediate(info()->literal()->parameter_count())); 3698 3699 __ bind(&formal_parameter_count_loaded); 3700 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3, 3701 ReturnAddressState::kNotOnStack, 0); 3702 Comment(";;; }"); 3703 } 3704 3705 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 3706 HInvokeFunction* hinstr = instr->hydrogen(); 3707 DCHECK(ToRegister(instr->context()).is(esi)); 3708 DCHECK(ToRegister(instr->function()).is(edi)); 3709 DCHECK(instr->HasPointerMap()); 3710 3711 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; 3712 3713 if (is_tail_call) { 3714 DCHECK(!info()->saves_caller_doubles()); 3715 ParameterCount actual(instr->arity()); 3716 // It is safe to use ebx, ecx and edx as scratch registers here given that 3717 // 1) we are not going to return to caller function anyway, 3718 // 2) ebx (expected arguments count) and edx (new.target) will be 3719 // initialized below. 3720 PrepareForTailCall(actual, ebx, ecx, edx); 3721 } 3722 3723 Handle<JSFunction> known_function = hinstr->known_function(); 3724 if (known_function.is_null()) { 3725 LPointerMap* pointers = instr->pointer_map(); 3726 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3727 ParameterCount actual(instr->arity()); 3728 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; 3729 __ InvokeFunction(edi, no_reg, actual, flag, generator); 3730 } else { 3731 CallKnownFunction(known_function, hinstr->formal_parameter_count(), 3732 instr->arity(), is_tail_call, instr); 3733 } 3734 } 3735 3736 3737 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 3738 DCHECK(ToRegister(instr->context()).is(esi)); 3739 DCHECK(ToRegister(instr->constructor()).is(edi)); 3740 DCHECK(ToRegister(instr->result()).is(eax)); 3741 3742 __ Move(eax, Immediate(instr->arity())); 3743 __ mov(ebx, instr->hydrogen()->site()); 3744 3745 ElementsKind kind = instr->hydrogen()->elements_kind(); 3746 AllocationSiteOverrideMode override_mode = 3747 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 3748 ? DISABLE_ALLOCATION_SITES 3749 : DONT_OVERRIDE; 3750 3751 if (instr->arity() == 0) { 3752 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 3753 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3754 } else if (instr->arity() == 1) { 3755 Label done; 3756 if (IsFastPackedElementsKind(kind)) { 3757 Label packed_case; 3758 // We might need a change here 3759 // look at the first argument 3760 __ mov(ecx, Operand(esp, 0)); 3761 __ test(ecx, ecx); 3762 __ j(zero, &packed_case, Label::kNear); 3763 3764 ElementsKind holey_kind = GetHoleyElementsKind(kind); 3765 ArraySingleArgumentConstructorStub stub(isolate(), 3766 holey_kind, 3767 override_mode); 3768 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3769 __ jmp(&done, Label::kNear); 3770 __ bind(&packed_case); 3771 } 3772 3773 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 3774 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3775 __ bind(&done); 3776 } else { 3777 ArrayNArgumentsConstructorStub stub(isolate()); 3778 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3779 } 3780 } 3781 3782 3783 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 3784 DCHECK(ToRegister(instr->context()).is(esi)); 3785 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); 3786 } 3787 3788 3789 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 3790 Register function = ToRegister(instr->function()); 3791 Register code_object = ToRegister(instr->code_object()); 3792 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); 3793 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); 3794 } 3795 3796 3797 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 3798 Register result = ToRegister(instr->result()); 3799 Register base = ToRegister(instr->base_object()); 3800 if (instr->offset()->IsConstantOperand()) { 3801 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 3802 __ lea(result, Operand(base, ToInteger32(offset))); 3803 } else { 3804 Register offset = ToRegister(instr->offset()); 3805 __ lea(result, Operand(base, offset, times_1, 0)); 3806 } 3807 } 3808 3809 3810 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 3811 Representation representation = instr->hydrogen()->field_representation(); 3812 3813 HObjectAccess access = instr->hydrogen()->access(); 3814 int offset = access.offset(); 3815 3816 if (access.IsExternalMemory()) { 3817 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 3818 MemOperand operand = instr->object()->IsConstantOperand() 3819 ? MemOperand::StaticVariable( 3820 ToExternalReference(LConstantOperand::cast(instr->object()))) 3821 : MemOperand(ToRegister(instr->object()), offset); 3822 if (instr->value()->IsConstantOperand()) { 3823 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 3824 __ mov(operand, Immediate(ToInteger32(operand_value))); 3825 } else { 3826 Register value = ToRegister(instr->value()); 3827 __ Store(value, operand, representation); 3828 } 3829 return; 3830 } 3831 3832 Register object = ToRegister(instr->object()); 3833 __ AssertNotSmi(object); 3834 DCHECK(!representation.IsSmi() || 3835 !instr->value()->IsConstantOperand() || 3836 IsSmi(LConstantOperand::cast(instr->value()))); 3837 if (representation.IsDouble()) { 3838 DCHECK(access.IsInobject()); 3839 DCHECK(!instr->hydrogen()->has_transition()); 3840 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 3841 X87Register value = ToX87Register(instr->value()); 3842 X87Mov(FieldOperand(object, offset), value); 3843 return; 3844 } 3845 3846 if (instr->hydrogen()->has_transition()) { 3847 Handle<Map> transition = instr->hydrogen()->transition_map(); 3848 AddDeprecationDependency(transition); 3849 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); 3850 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { 3851 Register temp = ToRegister(instr->temp()); 3852 Register temp_map = ToRegister(instr->temp_map()); 3853 __ mov(temp_map, transition); 3854 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); 3855 // Update the write barrier for the map field. 3856 __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs); 3857 } 3858 } 3859 3860 // Do the store. 3861 Register write_register = object; 3862 if (!access.IsInobject()) { 3863 write_register = ToRegister(instr->temp()); 3864 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); 3865 } 3866 3867 MemOperand operand = FieldOperand(write_register, offset); 3868 if (instr->value()->IsConstantOperand()) { 3869 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 3870 if (operand_value->IsRegister()) { 3871 Register value = ToRegister(operand_value); 3872 __ Store(value, operand, representation); 3873 } else if (representation.IsInteger32() || representation.IsExternal()) { 3874 Immediate immediate = ToImmediate(operand_value, representation); 3875 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 3876 __ mov(operand, immediate); 3877 } else { 3878 Handle<Object> handle_value = ToHandle(operand_value); 3879 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 3880 __ mov(operand, handle_value); 3881 } 3882 } else { 3883 Register value = ToRegister(instr->value()); 3884 __ Store(value, operand, representation); 3885 } 3886 3887 if (instr->hydrogen()->NeedsWriteBarrier()) { 3888 Register value = ToRegister(instr->value()); 3889 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; 3890 // Update the write barrier for the object for in-object properties. 3891 __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs, 3892 EMIT_REMEMBERED_SET, 3893 instr->hydrogen()->SmiCheckForWriteBarrier(), 3894 instr->hydrogen()->PointersToHereCheckForValue()); 3895 } 3896 } 3897 3898 3899 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 3900 Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal; 3901 if (instr->index()->IsConstantOperand()) { 3902 __ cmp(ToOperand(instr->length()), 3903 ToImmediate(LConstantOperand::cast(instr->index()), 3904 instr->hydrogen()->length()->representation())); 3905 cc = CommuteCondition(cc); 3906 } else if (instr->length()->IsConstantOperand()) { 3907 __ cmp(ToOperand(instr->index()), 3908 ToImmediate(LConstantOperand::cast(instr->length()), 3909 instr->hydrogen()->index()->representation())); 3910 } else { 3911 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); 3912 } 3913 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 3914 Label done; 3915 __ j(NegateCondition(cc), &done, Label::kNear); 3916 __ int3(); 3917 __ bind(&done); 3918 } else { 3919 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); 3920 } 3921 } 3922 3923 3924 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 3925 ElementsKind elements_kind = instr->elements_kind(); 3926 LOperand* key = instr->key(); 3927 if (!key->IsConstantOperand() && 3928 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), 3929 elements_kind)) { 3930 __ SmiUntag(ToRegister(key)); 3931 } 3932 Operand operand(BuildFastArrayOperand( 3933 instr->elements(), 3934 key, 3935 instr->hydrogen()->key()->representation(), 3936 elements_kind, 3937 instr->base_offset())); 3938 if (elements_kind == FLOAT32_ELEMENTS) { 3939 X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand); 3940 } else if (elements_kind == FLOAT64_ELEMENTS) { 3941 uint64_t int_val = kHoleNanInt64; 3942 int32_t lower = static_cast<int32_t>(int_val); 3943 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 3944 Operand operand2 = BuildFastArrayOperand( 3945 instr->elements(), instr->key(), 3946 instr->hydrogen()->key()->representation(), elements_kind, 3947 instr->base_offset() + kPointerSize); 3948 3949 Label no_special_nan_handling, done; 3950 X87Register value = ToX87Register(instr->value()); 3951 X87Fxch(value); 3952 __ lea(esp, Operand(esp, -kDoubleSize)); 3953 __ fst_d(MemOperand(esp, 0)); 3954 __ lea(esp, Operand(esp, kDoubleSize)); 3955 int offset = sizeof(kHoleNanUpper32); 3956 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); 3957 __ j(not_equal, &no_special_nan_handling, Label::kNear); 3958 __ mov(operand, Immediate(lower)); 3959 __ mov(operand2, Immediate(upper)); 3960 __ jmp(&done, Label::kNear); 3961 3962 __ bind(&no_special_nan_handling); 3963 __ fst_d(operand); 3964 __ bind(&done); 3965 } else { 3966 Register value = ToRegister(instr->value()); 3967 switch (elements_kind) { 3968 case UINT8_ELEMENTS: 3969 case INT8_ELEMENTS: 3970 case UINT8_CLAMPED_ELEMENTS: 3971 __ mov_b(operand, value); 3972 break; 3973 case UINT16_ELEMENTS: 3974 case INT16_ELEMENTS: 3975 __ mov_w(operand, value); 3976 break; 3977 case UINT32_ELEMENTS: 3978 case INT32_ELEMENTS: 3979 __ mov(operand, value); 3980 break; 3981 case FLOAT32_ELEMENTS: 3982 case FLOAT64_ELEMENTS: 3983 case FAST_SMI_ELEMENTS: 3984 case FAST_ELEMENTS: 3985 case FAST_DOUBLE_ELEMENTS: 3986 case FAST_HOLEY_SMI_ELEMENTS: 3987 case FAST_HOLEY_ELEMENTS: 3988 case FAST_HOLEY_DOUBLE_ELEMENTS: 3989 case DICTIONARY_ELEMENTS: 3990 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 3991 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 3992 case FAST_STRING_WRAPPER_ELEMENTS: 3993 case SLOW_STRING_WRAPPER_ELEMENTS: 3994 case NO_ELEMENTS: 3995 UNREACHABLE(); 3996 break; 3997 } 3998 } 3999 } 4000 4001 4002 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4003 Operand double_store_operand = BuildFastArrayOperand( 4004 instr->elements(), 4005 instr->key(), 4006 instr->hydrogen()->key()->representation(), 4007 FAST_DOUBLE_ELEMENTS, 4008 instr->base_offset()); 4009 4010 uint64_t int_val = kHoleNanInt64; 4011 int32_t lower = static_cast<int32_t>(int_val); 4012 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 4013 Operand double_store_operand2 = BuildFastArrayOperand( 4014 instr->elements(), instr->key(), 4015 instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, 4016 instr->base_offset() + kPointerSize); 4017 4018 if (instr->hydrogen()->IsConstantHoleStore()) { 4019 // This means we should store the (double) hole. No floating point 4020 // registers required. 4021 __ mov(double_store_operand, Immediate(lower)); 4022 __ mov(double_store_operand2, Immediate(upper)); 4023 } else { 4024 Label no_special_nan_handling, done; 4025 X87Register value = ToX87Register(instr->value()); 4026 X87Fxch(value); 4027 4028 if (instr->NeedsCanonicalization()) { 4029 __ fld(0); 4030 __ fld(0); 4031 __ FCmp(); 4032 __ j(parity_odd, &no_special_nan_handling, Label::kNear); 4033 // All NaNs are Canonicalized to 0x7fffffffffffffff 4034 __ mov(double_store_operand, Immediate(0xffffffff)); 4035 __ mov(double_store_operand2, Immediate(0x7fffffff)); 4036 __ jmp(&done, Label::kNear); 4037 } else { 4038 __ lea(esp, Operand(esp, -kDoubleSize)); 4039 __ fst_d(MemOperand(esp, 0)); 4040 __ lea(esp, Operand(esp, kDoubleSize)); 4041 int offset = sizeof(kHoleNanUpper32); 4042 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); 4043 __ j(not_equal, &no_special_nan_handling, Label::kNear); 4044 __ mov(double_store_operand, Immediate(lower)); 4045 __ mov(double_store_operand2, Immediate(upper)); 4046 __ jmp(&done, Label::kNear); 4047 } 4048 __ bind(&no_special_nan_handling); 4049 __ fst_d(double_store_operand); 4050 __ bind(&done); 4051 } 4052 } 4053 4054 4055 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4056 Register elements = ToRegister(instr->elements()); 4057 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 4058 4059 Operand operand = BuildFastArrayOperand( 4060 instr->elements(), 4061 instr->key(), 4062 instr->hydrogen()->key()->representation(), 4063 FAST_ELEMENTS, 4064 instr->base_offset()); 4065 if (instr->value()->IsRegister()) { 4066 __ mov(operand, ToRegister(instr->value())); 4067 } else { 4068 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 4069 if (IsSmi(operand_value)) { 4070 Immediate immediate = ToImmediate(operand_value, Representation::Smi()); 4071 __ mov(operand, immediate); 4072 } else { 4073 DCHECK(!IsInteger32(operand_value)); 4074 Handle<Object> handle_value = ToHandle(operand_value); 4075 __ mov(operand, handle_value); 4076 } 4077 } 4078 4079 if (instr->hydrogen()->NeedsWriteBarrier()) { 4080 DCHECK(instr->value()->IsRegister()); 4081 Register value = ToRegister(instr->value()); 4082 DCHECK(!instr->key()->IsConstantOperand()); 4083 SmiCheck check_needed = 4084 instr->hydrogen()->value()->type().IsHeapObject() 4085 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4086 // Compute address of modified element and store it into key register. 4087 __ lea(key, operand); 4088 __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET, 4089 check_needed, 4090 instr->hydrogen()->PointersToHereCheckForValue()); 4091 } 4092 } 4093 4094 4095 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4096 // By cases...external, fast-double, fast 4097 if (instr->is_fixed_typed_array()) { 4098 DoStoreKeyedExternalArray(instr); 4099 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4100 DoStoreKeyedFixedDoubleArray(instr); 4101 } else { 4102 DoStoreKeyedFixedArray(instr); 4103 } 4104 } 4105 4106 4107 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4108 Register object = ToRegister(instr->object()); 4109 Register temp = ToRegister(instr->temp()); 4110 Label no_memento_found; 4111 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); 4112 DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound); 4113 __ bind(&no_memento_found); 4114 } 4115 4116 4117 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { 4118 class DeferredMaybeGrowElements final : public LDeferredCode { 4119 public: 4120 DeferredMaybeGrowElements(LCodeGen* codegen, 4121 LMaybeGrowElements* instr, 4122 const X87Stack& x87_stack) 4123 : LDeferredCode(codegen, x87_stack), instr_(instr) {} 4124 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } 4125 LInstruction* instr() override { return instr_; } 4126 4127 private: 4128 LMaybeGrowElements* instr_; 4129 }; 4130 4131 Register result = eax; 4132 DeferredMaybeGrowElements* deferred = 4133 new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_); 4134 LOperand* key = instr->key(); 4135 LOperand* current_capacity = instr->current_capacity(); 4136 4137 DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); 4138 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); 4139 DCHECK(key->IsConstantOperand() || key->IsRegister()); 4140 DCHECK(current_capacity->IsConstantOperand() || 4141 current_capacity->IsRegister()); 4142 4143 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { 4144 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 4145 int32_t constant_capacity = 4146 ToInteger32(LConstantOperand::cast(current_capacity)); 4147 if (constant_key >= constant_capacity) { 4148 // Deferred case. 4149 __ jmp(deferred->entry()); 4150 } 4151 } else if (key->IsConstantOperand()) { 4152 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 4153 __ cmp(ToOperand(current_capacity), Immediate(constant_key)); 4154 __ j(less_equal, deferred->entry()); 4155 } else if (current_capacity->IsConstantOperand()) { 4156 int32_t constant_capacity = 4157 ToInteger32(LConstantOperand::cast(current_capacity)); 4158 __ cmp(ToRegister(key), Immediate(constant_capacity)); 4159 __ j(greater_equal, deferred->entry()); 4160 } else { 4161 __ cmp(ToRegister(key), ToRegister(current_capacity)); 4162 __ j(greater_equal, deferred->entry()); 4163 } 4164 4165 __ mov(result, ToOperand(instr->elements())); 4166 __ bind(deferred->exit()); 4167 } 4168 4169 4170 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { 4171 // TODO(3095996): Get rid of this. For now, we need to make the 4172 // result register contain a valid pointer because it is already 4173 // contained in the register pointer map. 4174 Register result = eax; 4175 __ Move(result, Immediate(0)); 4176 4177 // We have to call a stub. 4178 { 4179 PushSafepointRegistersScope scope(this); 4180 if (instr->object()->IsRegister()) { 4181 __ Move(result, ToRegister(instr->object())); 4182 } else { 4183 __ mov(result, ToOperand(instr->object())); 4184 } 4185 4186 LOperand* key = instr->key(); 4187 if (key->IsConstantOperand()) { 4188 LConstantOperand* constant_key = LConstantOperand::cast(key); 4189 int32_t int_key = ToInteger32(constant_key); 4190 if (Smi::IsValid(int_key)) { 4191 __ mov(ebx, Immediate(Smi::FromInt(int_key))); 4192 } else { 4193 // We should never get here at runtime because there is a smi check on 4194 // the key before this point. 4195 __ int3(); 4196 } 4197 } else { 4198 __ Move(ebx, ToRegister(key)); 4199 __ SmiTag(ebx); 4200 } 4201 4202 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); 4203 __ CallStub(&stub); 4204 RecordSafepointWithLazyDeopt( 4205 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4206 __ StoreToSafepointRegisterSlot(result, result); 4207 } 4208 4209 // Deopt on smi, which means the elements array changed to dictionary mode. 4210 __ test(result, Immediate(kSmiTagMask)); 4211 DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi); 4212 } 4213 4214 4215 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4216 Register object_reg = ToRegister(instr->object()); 4217 4218 Handle<Map> from_map = instr->original_map(); 4219 Handle<Map> to_map = instr->transitioned_map(); 4220 ElementsKind from_kind = instr->from_kind(); 4221 ElementsKind to_kind = instr->to_kind(); 4222 4223 Label not_applicable; 4224 bool is_simple_map_transition = 4225 IsSimpleMapChangeTransition(from_kind, to_kind); 4226 Label::Distance branch_distance = 4227 is_simple_map_transition ? Label::kNear : Label::kFar; 4228 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); 4229 __ j(not_equal, ¬_applicable, branch_distance); 4230 if (is_simple_map_transition) { 4231 Register new_map_reg = ToRegister(instr->new_map_temp()); 4232 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), 4233 Immediate(to_map)); 4234 // Write barrier. 4235 DCHECK_NOT_NULL(instr->temp()); 4236 __ RecordWriteForMap(object_reg, to_map, new_map_reg, 4237 ToRegister(instr->temp()), kDontSaveFPRegs); 4238 } else { 4239 DCHECK(ToRegister(instr->context()).is(esi)); 4240 DCHECK(object_reg.is(eax)); 4241 PushSafepointRegistersScope scope(this); 4242 __ mov(ebx, to_map); 4243 TransitionElementsKindStub stub(isolate(), from_kind, to_kind); 4244 __ CallStub(&stub); 4245 RecordSafepointWithLazyDeopt(instr, 4246 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4247 } 4248 __ bind(¬_applicable); 4249 } 4250 4251 4252 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4253 class DeferredStringCharCodeAt final : public LDeferredCode { 4254 public: 4255 DeferredStringCharCodeAt(LCodeGen* codegen, 4256 LStringCharCodeAt* instr, 4257 const X87Stack& x87_stack) 4258 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 4259 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } 4260 LInstruction* instr() override { return instr_; } 4261 4262 private: 4263 LStringCharCodeAt* instr_; 4264 }; 4265 4266 DeferredStringCharCodeAt* deferred = 4267 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); 4268 4269 StringCharLoadGenerator::Generate(masm(), 4270 factory(), 4271 ToRegister(instr->string()), 4272 ToRegister(instr->index()), 4273 ToRegister(instr->result()), 4274 deferred->entry()); 4275 __ bind(deferred->exit()); 4276 } 4277 4278 4279 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4280 Register string = ToRegister(instr->string()); 4281 Register result = ToRegister(instr->result()); 4282 4283 // TODO(3095996): Get rid of this. For now, we need to make the 4284 // result register contain a valid pointer because it is already 4285 // contained in the register pointer map. 4286 __ Move(result, Immediate(0)); 4287 4288 PushSafepointRegistersScope scope(this); 4289 __ push(string); 4290 // Push the index as a smi. This is safe because of the checks in 4291 // DoStringCharCodeAt above. 4292 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); 4293 if (instr->index()->IsConstantOperand()) { 4294 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()), 4295 Representation::Smi()); 4296 __ push(immediate); 4297 } else { 4298 Register index = ToRegister(instr->index()); 4299 __ SmiTag(index); 4300 __ push(index); 4301 } 4302 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, 4303 instr, instr->context()); 4304 __ AssertSmi(eax); 4305 __ SmiUntag(eax); 4306 __ StoreToSafepointRegisterSlot(result, eax); 4307 } 4308 4309 4310 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4311 class DeferredStringCharFromCode final : public LDeferredCode { 4312 public: 4313 DeferredStringCharFromCode(LCodeGen* codegen, 4314 LStringCharFromCode* instr, 4315 const X87Stack& x87_stack) 4316 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 4317 void Generate() override { 4318 codegen()->DoDeferredStringCharFromCode(instr_); 4319 } 4320 LInstruction* instr() override { return instr_; } 4321 4322 private: 4323 LStringCharFromCode* instr_; 4324 }; 4325 4326 DeferredStringCharFromCode* deferred = 4327 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); 4328 4329 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); 4330 Register char_code = ToRegister(instr->char_code()); 4331 Register result = ToRegister(instr->result()); 4332 DCHECK(!char_code.is(result)); 4333 4334 __ cmp(char_code, String::kMaxOneByteCharCode); 4335 __ j(above, deferred->entry()); 4336 __ Move(result, Immediate(factory()->single_character_string_cache())); 4337 __ mov(result, FieldOperand(result, 4338 char_code, times_pointer_size, 4339 FixedArray::kHeaderSize)); 4340 __ cmp(result, factory()->undefined_value()); 4341 __ j(equal, deferred->entry()); 4342 __ bind(deferred->exit()); 4343 } 4344 4345 4346 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4347 Register char_code = ToRegister(instr->char_code()); 4348 Register result = ToRegister(instr->result()); 4349 4350 // TODO(3095996): Get rid of this. For now, we need to make the 4351 // result register contain a valid pointer because it is already 4352 // contained in the register pointer map. 4353 __ Move(result, Immediate(0)); 4354 4355 PushSafepointRegistersScope scope(this); 4356 __ SmiTag(char_code); 4357 __ push(char_code); 4358 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, 4359 instr->context()); 4360 __ StoreToSafepointRegisterSlot(result, eax); 4361 } 4362 4363 4364 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4365 DCHECK(ToRegister(instr->context()).is(esi)); 4366 DCHECK(ToRegister(instr->left()).is(edx)); 4367 DCHECK(ToRegister(instr->right()).is(eax)); 4368 StringAddStub stub(isolate(), 4369 instr->hydrogen()->flags(), 4370 instr->hydrogen()->pretenure_flag()); 4371 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4372 } 4373 4374 4375 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4376 LOperand* input = instr->value(); 4377 LOperand* output = instr->result(); 4378 DCHECK(input->IsRegister() || input->IsStackSlot()); 4379 DCHECK(output->IsDoubleRegister()); 4380 if (input->IsRegister()) { 4381 Register input_reg = ToRegister(input); 4382 __ push(input_reg); 4383 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); 4384 __ pop(input_reg); 4385 } else { 4386 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); 4387 } 4388 } 4389 4390 4391 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4392 LOperand* input = instr->value(); 4393 LOperand* output = instr->result(); 4394 X87Register res = ToX87Register(output); 4395 X87PrepareToWrite(res); 4396 __ LoadUint32NoSSE2(ToRegister(input)); 4397 X87CommitWrite(res); 4398 } 4399 4400 4401 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4402 class DeferredNumberTagI final : public LDeferredCode { 4403 public: 4404 DeferredNumberTagI(LCodeGen* codegen, 4405 LNumberTagI* instr, 4406 const X87Stack& x87_stack) 4407 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 4408 void Generate() override { 4409 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), 4410 SIGNED_INT32); 4411 } 4412 LInstruction* instr() override { return instr_; } 4413 4414 private: 4415 LNumberTagI* instr_; 4416 }; 4417 4418 LOperand* input = instr->value(); 4419 DCHECK(input->IsRegister() && input->Equals(instr->result())); 4420 Register reg = ToRegister(input); 4421 4422 DeferredNumberTagI* deferred = 4423 new(zone()) DeferredNumberTagI(this, instr, x87_stack_); 4424 __ SmiTag(reg); 4425 __ j(overflow, deferred->entry()); 4426 __ bind(deferred->exit()); 4427 } 4428 4429 4430 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4431 class DeferredNumberTagU final : public LDeferredCode { 4432 public: 4433 DeferredNumberTagU(LCodeGen* codegen, 4434 LNumberTagU* instr, 4435 const X87Stack& x87_stack) 4436 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 4437 void Generate() override { 4438 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), 4439 UNSIGNED_INT32); 4440 } 4441 LInstruction* instr() override { return instr_; } 4442 4443 private: 4444 LNumberTagU* instr_; 4445 }; 4446 4447 LOperand* input = instr->value(); 4448 DCHECK(input->IsRegister() && input->Equals(instr->result())); 4449 Register reg = ToRegister(input); 4450 4451 DeferredNumberTagU* deferred = 4452 new(zone()) DeferredNumberTagU(this, instr, x87_stack_); 4453 __ cmp(reg, Immediate(Smi::kMaxValue)); 4454 __ j(above, deferred->entry()); 4455 __ SmiTag(reg); 4456 __ bind(deferred->exit()); 4457 } 4458 4459 4460 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, 4461 LOperand* value, 4462 LOperand* temp, 4463 IntegerSignedness signedness) { 4464 Label done, slow; 4465 Register reg = ToRegister(value); 4466 Register tmp = ToRegister(temp); 4467 4468 if (signedness == SIGNED_INT32) { 4469 // There was overflow, so bits 30 and 31 of the original integer 4470 // disagree. Try to allocate a heap number in new space and store 4471 // the value in there. If that fails, call the runtime system. 4472 __ SmiUntag(reg); 4473 __ xor_(reg, 0x80000000); 4474 __ push(reg); 4475 __ fild_s(Operand(esp, 0)); 4476 __ pop(reg); 4477 } else { 4478 // There's no fild variant for unsigned values, so zero-extend to a 64-bit 4479 // int manually. 4480 __ push(Immediate(0)); 4481 __ push(reg); 4482 __ fild_d(Operand(esp, 0)); 4483 __ pop(reg); 4484 __ pop(reg); 4485 } 4486 4487 if (FLAG_inline_new) { 4488 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); 4489 __ jmp(&done, Label::kNear); 4490 } 4491 4492 // Slow case: Call the runtime system to do the number allocation. 4493 __ bind(&slow); 4494 { 4495 // TODO(3095996): Put a valid pointer value in the stack slot where the 4496 // result register is stored, as this register is in the pointer map, but 4497 // contains an integer value. 4498 __ Move(reg, Immediate(0)); 4499 4500 // Preserve the value of all registers. 4501 PushSafepointRegistersScope scope(this); 4502 // Reset the context register. 4503 if (!reg.is(esi)) { 4504 __ Move(esi, Immediate(0)); 4505 } 4506 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4507 RecordSafepointWithRegisters( 4508 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4509 __ StoreToSafepointRegisterSlot(reg, eax); 4510 } 4511 4512 __ bind(&done); 4513 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); 4514 } 4515 4516 4517 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4518 class DeferredNumberTagD final : public LDeferredCode { 4519 public: 4520 DeferredNumberTagD(LCodeGen* codegen, 4521 LNumberTagD* instr, 4522 const X87Stack& x87_stack) 4523 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 4524 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } 4525 LInstruction* instr() override { return instr_; } 4526 4527 private: 4528 LNumberTagD* instr_; 4529 }; 4530 4531 Register reg = ToRegister(instr->result()); 4532 4533 // Put the value to the top of stack 4534 X87Register src = ToX87Register(instr->value()); 4535 // Don't use X87LoadForUsage here, which is only used by Instruction which 4536 // clobbers fp registers. 4537 x87_stack_.Fxch(src); 4538 4539 DeferredNumberTagD* deferred = 4540 new(zone()) DeferredNumberTagD(this, instr, x87_stack_); 4541 if (FLAG_inline_new) { 4542 Register tmp = ToRegister(instr->temp()); 4543 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 4544 } else { 4545 __ jmp(deferred->entry()); 4546 } 4547 __ bind(deferred->exit()); 4548 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); 4549 } 4550 4551 4552 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4553 // TODO(3095996): Get rid of this. For now, we need to make the 4554 // result register contain a valid pointer because it is already 4555 // contained in the register pointer map. 4556 Register reg = ToRegister(instr->result()); 4557 __ Move(reg, Immediate(0)); 4558 4559 PushSafepointRegistersScope scope(this); 4560 // Reset the context register. 4561 if (!reg.is(esi)) { 4562 __ Move(esi, Immediate(0)); 4563 } 4564 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4565 RecordSafepointWithRegisters( 4566 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4567 __ StoreToSafepointRegisterSlot(reg, eax); 4568 } 4569 4570 4571 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4572 HChange* hchange = instr->hydrogen(); 4573 Register input = ToRegister(instr->value()); 4574 if (hchange->CheckFlag(HValue::kCanOverflow) && 4575 hchange->value()->CheckFlag(HValue::kUint32)) { 4576 __ test(input, Immediate(0xc0000000)); 4577 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow); 4578 } 4579 __ SmiTag(input); 4580 if (hchange->CheckFlag(HValue::kCanOverflow) && 4581 !hchange->value()->CheckFlag(HValue::kUint32)) { 4582 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 4583 } 4584 } 4585 4586 4587 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4588 LOperand* input = instr->value(); 4589 Register result = ToRegister(input); 4590 DCHECK(input->IsRegister() && input->Equals(instr->result())); 4591 if (instr->needs_check()) { 4592 __ test(result, Immediate(kSmiTagMask)); 4593 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi); 4594 } else { 4595 __ AssertSmi(result); 4596 } 4597 __ SmiUntag(result); 4598 } 4599 4600 4601 void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg, 4602 Register temp_reg, X87Register res_reg, 4603 NumberUntagDMode mode) { 4604 bool can_convert_undefined_to_nan = instr->truncating(); 4605 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); 4606 4607 Label load_smi, done; 4608 4609 X87PrepareToWrite(res_reg); 4610 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4611 // Smi check. 4612 __ JumpIfSmi(input_reg, &load_smi); 4613 4614 // Heap number map check. 4615 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 4616 factory()->heap_number_map()); 4617 if (!can_convert_undefined_to_nan) { 4618 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); 4619 } else { 4620 Label heap_number, convert; 4621 __ j(equal, &heap_number); 4622 4623 // Convert undefined (or hole) to NaN. 4624 __ cmp(input_reg, factory()->undefined_value()); 4625 DeoptimizeIf(not_equal, instr, 4626 DeoptimizeReason::kNotAHeapNumberUndefined); 4627 4628 __ bind(&convert); 4629 __ push(Immediate(0xfff80000)); 4630 __ push(Immediate(0x00000000)); 4631 __ fld_d(MemOperand(esp, 0)); 4632 __ lea(esp, Operand(esp, kDoubleSize)); 4633 __ jmp(&done, Label::kNear); 4634 4635 __ bind(&heap_number); 4636 } 4637 // Heap number to x87 conversion. 4638 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); 4639 if (deoptimize_on_minus_zero) { 4640 __ fldz(); 4641 __ FCmp(); 4642 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); 4643 __ j(not_zero, &done, Label::kNear); 4644 4645 // Use general purpose registers to check if we have -0.0 4646 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 4647 __ test(temp_reg, Immediate(HeapNumber::kSignMask)); 4648 __ j(zero, &done, Label::kNear); 4649 4650 // Pop FPU stack before deoptimizing. 4651 __ fstp(0); 4652 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); 4653 } 4654 __ jmp(&done, Label::kNear); 4655 } else { 4656 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4657 } 4658 4659 __ bind(&load_smi); 4660 // Clobbering a temp is faster than re-tagging the 4661 // input register since we avoid dependencies. 4662 __ mov(temp_reg, input_reg); 4663 __ SmiUntag(temp_reg); // Untag smi before converting to float. 4664 __ push(temp_reg); 4665 __ fild_s(Operand(esp, 0)); 4666 __ add(esp, Immediate(kPointerSize)); 4667 __ bind(&done); 4668 X87CommitWrite(res_reg); 4669 } 4670 4671 4672 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { 4673 Register input_reg = ToRegister(instr->value()); 4674 4675 // The input was optimistically untagged; revert it. 4676 STATIC_ASSERT(kSmiTagSize == 1); 4677 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); 4678 4679 if (instr->truncating()) { 4680 Label truncate; 4681 Label::Distance truncate_distance = 4682 DeoptEveryNTimes() ? Label::kFar : Label::kNear; 4683 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 4684 factory()->heap_number_map()); 4685 __ j(equal, &truncate, truncate_distance); 4686 __ push(input_reg); 4687 __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg); 4688 __ pop(input_reg); 4689 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball); 4690 __ bind(&truncate); 4691 __ TruncateHeapNumberToI(input_reg, input_reg); 4692 } else { 4693 // TODO(olivf) Converting a number on the fpu is actually quite slow. We 4694 // should first try a fast conversion and then bailout to this slow case. 4695 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 4696 isolate()->factory()->heap_number_map()); 4697 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber); 4698 4699 __ sub(esp, Immediate(kPointerSize)); 4700 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); 4701 4702 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { 4703 Label no_precision_lost, not_nan, zero_check; 4704 __ fld(0); 4705 4706 __ fist_s(MemOperand(esp, 0)); 4707 __ fild_s(MemOperand(esp, 0)); 4708 __ FCmp(); 4709 __ pop(input_reg); 4710 4711 __ j(equal, &no_precision_lost, Label::kNear); 4712 __ fstp(0); 4713 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); 4714 __ bind(&no_precision_lost); 4715 4716 __ j(parity_odd, ¬_nan); 4717 __ fstp(0); 4718 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); 4719 __ bind(¬_nan); 4720 4721 __ test(input_reg, Operand(input_reg)); 4722 __ j(zero, &zero_check, Label::kNear); 4723 __ fstp(0); 4724 __ jmp(done); 4725 4726 __ bind(&zero_check); 4727 // To check for minus zero, we load the value again as float, and check 4728 // if that is still 0. 4729 __ sub(esp, Immediate(kPointerSize)); 4730 __ fstp_s(Operand(esp, 0)); 4731 __ pop(input_reg); 4732 __ test(input_reg, Operand(input_reg)); 4733 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero); 4734 } else { 4735 __ fist_s(MemOperand(esp, 0)); 4736 __ fild_s(MemOperand(esp, 0)); 4737 __ FCmp(); 4738 __ pop(input_reg); 4739 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision); 4740 DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN); 4741 } 4742 } 4743 } 4744 4745 4746 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4747 class DeferredTaggedToI final : public LDeferredCode { 4748 public: 4749 DeferredTaggedToI(LCodeGen* codegen, 4750 LTaggedToI* instr, 4751 const X87Stack& x87_stack) 4752 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 4753 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); } 4754 LInstruction* instr() override { return instr_; } 4755 4756 private: 4757 LTaggedToI* instr_; 4758 }; 4759 4760 LOperand* input = instr->value(); 4761 DCHECK(input->IsRegister()); 4762 Register input_reg = ToRegister(input); 4763 DCHECK(input_reg.is(ToRegister(instr->result()))); 4764 4765 if (instr->hydrogen()->value()->representation().IsSmi()) { 4766 __ SmiUntag(input_reg); 4767 } else { 4768 DeferredTaggedToI* deferred = 4769 new(zone()) DeferredTaggedToI(this, instr, x87_stack_); 4770 // Optimistically untag the input. 4771 // If the input is a HeapObject, SmiUntag will set the carry flag. 4772 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 4773 __ SmiUntag(input_reg); 4774 // Branch to deferred code if the input was tagged. 4775 // The deferred code will take care of restoring the tag. 4776 __ j(carry, deferred->entry()); 4777 __ bind(deferred->exit()); 4778 } 4779 } 4780 4781 4782 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4783 LOperand* input = instr->value(); 4784 DCHECK(input->IsRegister()); 4785 LOperand* temp = instr->temp(); 4786 DCHECK(temp->IsRegister()); 4787 LOperand* result = instr->result(); 4788 DCHECK(result->IsDoubleRegister()); 4789 4790 Register input_reg = ToRegister(input); 4791 Register temp_reg = ToRegister(temp); 4792 4793 HValue* value = instr->hydrogen()->value(); 4794 NumberUntagDMode mode = value->representation().IsSmi() 4795 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 4796 4797 EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result), 4798 mode); 4799 } 4800 4801 4802 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4803 LOperand* input = instr->value(); 4804 DCHECK(input->IsDoubleRegister()); 4805 LOperand* result = instr->result(); 4806 DCHECK(result->IsRegister()); 4807 Register result_reg = ToRegister(result); 4808 4809 if (instr->truncating()) { 4810 X87Register input_reg = ToX87Register(input); 4811 X87Fxch(input_reg); 4812 __ TruncateX87TOSToI(result_reg); 4813 } else { 4814 Label lost_precision, is_nan, minus_zero, done; 4815 X87Register input_reg = ToX87Register(input); 4816 X87Fxch(input_reg); 4817 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), 4818 &lost_precision, &is_nan, &minus_zero); 4819 __ jmp(&done); 4820 __ bind(&lost_precision); 4821 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); 4822 __ bind(&is_nan); 4823 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); 4824 __ bind(&minus_zero); 4825 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); 4826 __ bind(&done); 4827 } 4828 } 4829 4830 4831 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 4832 LOperand* input = instr->value(); 4833 DCHECK(input->IsDoubleRegister()); 4834 LOperand* result = instr->result(); 4835 DCHECK(result->IsRegister()); 4836 Register result_reg = ToRegister(result); 4837 4838 Label lost_precision, is_nan, minus_zero, done; 4839 X87Register input_reg = ToX87Register(input); 4840 X87Fxch(input_reg); 4841 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), 4842 &lost_precision, &is_nan, &minus_zero); 4843 __ jmp(&done); 4844 __ bind(&lost_precision); 4845 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision); 4846 __ bind(&is_nan); 4847 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN); 4848 __ bind(&minus_zero); 4849 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero); 4850 __ bind(&done); 4851 __ SmiTag(result_reg); 4852 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 4853 } 4854 4855 4856 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 4857 LOperand* input = instr->value(); 4858 __ test(ToOperand(input), Immediate(kSmiTagMask)); 4859 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi); 4860 } 4861 4862 4863 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 4864 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4865 LOperand* input = instr->value(); 4866 __ test(ToOperand(input), Immediate(kSmiTagMask)); 4867 DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi); 4868 } 4869 } 4870 4871 4872 void LCodeGen::DoCheckArrayBufferNotNeutered( 4873 LCheckArrayBufferNotNeutered* instr) { 4874 Register view = ToRegister(instr->view()); 4875 Register scratch = ToRegister(instr->scratch()); 4876 4877 __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset)); 4878 __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset), 4879 Immediate(1 << JSArrayBuffer::WasNeutered::kShift)); 4880 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds); 4881 } 4882 4883 4884 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 4885 Register input = ToRegister(instr->value()); 4886 Register temp = ToRegister(instr->temp()); 4887 4888 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); 4889 4890 if (instr->hydrogen()->is_interval_check()) { 4891 InstanceType first; 4892 InstanceType last; 4893 instr->hydrogen()->GetCheckInterval(&first, &last); 4894 4895 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first)); 4896 4897 // If there is only one type in the interval check for equality. 4898 if (first == last) { 4899 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType); 4900 } else { 4901 DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType); 4902 // Omit check for the last type. 4903 if (last != LAST_TYPE) { 4904 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last)); 4905 DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType); 4906 } 4907 } 4908 } else { 4909 uint8_t mask; 4910 uint8_t tag; 4911 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 4912 4913 if (base::bits::IsPowerOfTwo32(mask)) { 4914 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 4915 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask)); 4916 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, 4917 DeoptimizeReason::kWrongInstanceType); 4918 } else { 4919 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); 4920 __ and_(temp, mask); 4921 __ cmp(temp, tag); 4922 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType); 4923 } 4924 } 4925 } 4926 4927 4928 void LCodeGen::DoCheckValue(LCheckValue* instr) { 4929 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 4930 if (instr->hydrogen()->object_in_new_space()) { 4931 Register reg = ToRegister(instr->value()); 4932 Handle<Cell> cell = isolate()->factory()->NewCell(object); 4933 __ cmp(reg, Operand::ForCell(cell)); 4934 } else { 4935 Operand operand = ToOperand(instr->value()); 4936 __ cmp(operand, object); 4937 } 4938 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch); 4939 } 4940 4941 4942 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 4943 Label deopt, done; 4944 // If the map is not deprecated the migration attempt does not make sense. 4945 __ push(object); 4946 __ mov(object, FieldOperand(object, HeapObject::kMapOffset)); 4947 __ test(FieldOperand(object, Map::kBitField3Offset), 4948 Immediate(Map::Deprecated::kMask)); 4949 __ pop(object); 4950 __ j(zero, &deopt); 4951 4952 { 4953 PushSafepointRegistersScope scope(this); 4954 __ push(object); 4955 __ xor_(esi, esi); 4956 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 4957 RecordSafepointWithRegisters( 4958 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 4959 4960 __ test(eax, Immediate(kSmiTagMask)); 4961 } 4962 __ j(not_zero, &done); 4963 4964 __ bind(&deopt); 4965 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed); 4966 4967 __ bind(&done); 4968 } 4969 4970 4971 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 4972 class DeferredCheckMaps final : public LDeferredCode { 4973 public: 4974 DeferredCheckMaps(LCodeGen* codegen, 4975 LCheckMaps* instr, 4976 Register object, 4977 const X87Stack& x87_stack) 4978 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { 4979 SetExit(check_maps()); 4980 } 4981 void Generate() override { 4982 codegen()->DoDeferredInstanceMigration(instr_, object_); 4983 } 4984 Label* check_maps() { return &check_maps_; } 4985 LInstruction* instr() override { return instr_; } 4986 4987 private: 4988 LCheckMaps* instr_; 4989 Label check_maps_; 4990 Register object_; 4991 }; 4992 4993 if (instr->hydrogen()->IsStabilityCheck()) { 4994 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 4995 for (int i = 0; i < maps->size(); ++i) { 4996 AddStabilityDependency(maps->at(i).handle()); 4997 } 4998 return; 4999 } 5000 5001 LOperand* input = instr->value(); 5002 DCHECK(input->IsRegister()); 5003 Register reg = ToRegister(input); 5004 5005 DeferredCheckMaps* deferred = NULL; 5006 if (instr->hydrogen()->HasMigrationTarget()) { 5007 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); 5008 __ bind(deferred->check_maps()); 5009 } 5010 5011 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5012 Label success; 5013 for (int i = 0; i < maps->size() - 1; i++) { 5014 Handle<Map> map = maps->at(i).handle(); 5015 __ CompareMap(reg, map); 5016 __ j(equal, &success, Label::kNear); 5017 } 5018 5019 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5020 __ CompareMap(reg, map); 5021 if (instr->hydrogen()->HasMigrationTarget()) { 5022 __ j(not_equal, deferred->entry()); 5023 } else { 5024 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap); 5025 } 5026 5027 __ bind(&success); 5028 } 5029 5030 5031 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5032 X87Register value_reg = ToX87Register(instr->unclamped()); 5033 Register result_reg = ToRegister(instr->result()); 5034 X87Fxch(value_reg); 5035 __ ClampTOSToUint8(result_reg); 5036 } 5037 5038 5039 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5040 DCHECK(instr->unclamped()->Equals(instr->result())); 5041 Register value_reg = ToRegister(instr->result()); 5042 __ ClampUint8(value_reg); 5043 } 5044 5045 5046 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { 5047 Register input_reg = ToRegister(instr->unclamped()); 5048 Register result_reg = ToRegister(instr->result()); 5049 Register scratch = ToRegister(instr->scratch()); 5050 Register scratch2 = ToRegister(instr->scratch2()); 5051 Register scratch3 = ToRegister(instr->scratch3()); 5052 Label is_smi, done, heap_number, valid_exponent, 5053 largest_value, zero_result, maybe_nan_or_infinity; 5054 5055 __ JumpIfSmi(input_reg, &is_smi); 5056 5057 // Check for heap number 5058 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5059 factory()->heap_number_map()); 5060 __ j(equal, &heap_number, Label::kNear); 5061 5062 // Check for undefined. Undefined is converted to zero for clamping 5063 // conversions. 5064 __ cmp(input_reg, factory()->undefined_value()); 5065 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined); 5066 __ jmp(&zero_result, Label::kNear); 5067 5068 // Heap number 5069 __ bind(&heap_number); 5070 5071 // Surprisingly, all of the hand-crafted bit-manipulations below are much 5072 // faster than the x86 FPU built-in instruction, especially since "banker's 5073 // rounding" would be additionally very expensive 5074 5075 // Get exponent word. 5076 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 5077 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); 5078 5079 // Test for negative values --> clamp to zero 5080 __ test(scratch, scratch); 5081 __ j(negative, &zero_result, Label::kNear); 5082 5083 // Get exponent alone in scratch2. 5084 __ mov(scratch2, scratch); 5085 __ and_(scratch2, HeapNumber::kExponentMask); 5086 __ shr(scratch2, HeapNumber::kExponentShift); 5087 __ j(zero, &zero_result, Label::kNear); 5088 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); 5089 __ j(negative, &zero_result, Label::kNear); 5090 5091 const uint32_t non_int8_exponent = 7; 5092 __ cmp(scratch2, Immediate(non_int8_exponent + 1)); 5093 // If the exponent is too big, check for special values. 5094 __ j(greater, &maybe_nan_or_infinity, Label::kNear); 5095 5096 __ bind(&valid_exponent); 5097 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent 5098 // < 7. The shift bias is the number of bits to shift the mantissa such that 5099 // with an exponent of 7 such the that top-most one is in bit 30, allowing 5100 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to 5101 // 1). 5102 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; 5103 __ lea(result_reg, MemOperand(scratch2, shift_bias)); 5104 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the 5105 // top bits of the mantissa. 5106 __ and_(scratch, HeapNumber::kMantissaMask); 5107 // Put back the implicit 1 of the mantissa 5108 __ or_(scratch, 1 << HeapNumber::kExponentShift); 5109 // Shift up to round 5110 __ shl_cl(scratch); 5111 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then 5112 // use the bit in the "ones" place and add it to the "halves" place, which has 5113 // the effect of rounding to even. 5114 __ mov(scratch2, scratch); 5115 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; 5116 const uint32_t one_bit_shift = one_half_bit_shift + 1; 5117 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); 5118 __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); 5119 Label no_round; 5120 __ j(less, &no_round, Label::kNear); 5121 Label round_up; 5122 __ mov(scratch2, Immediate(1 << one_half_bit_shift)); 5123 __ j(greater, &round_up, Label::kNear); 5124 __ test(scratch3, scratch3); 5125 __ j(not_zero, &round_up, Label::kNear); 5126 __ mov(scratch2, scratch); 5127 __ and_(scratch2, Immediate(1 << one_bit_shift)); 5128 __ shr(scratch2, 1); 5129 __ bind(&round_up); 5130 __ add(scratch, scratch2); 5131 __ j(overflow, &largest_value, Label::kNear); 5132 __ bind(&no_round); 5133 __ shr(scratch, 23); 5134 __ mov(result_reg, scratch); 5135 __ jmp(&done, Label::kNear); 5136 5137 __ bind(&maybe_nan_or_infinity); 5138 // Check for NaN/Infinity, all other values map to 255 5139 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); 5140 __ j(not_equal, &largest_value, Label::kNear); 5141 5142 // Check for NaN, which differs from Infinity in that at least one mantissa 5143 // bit is set. 5144 __ and_(scratch, HeapNumber::kMantissaMask); 5145 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); 5146 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN 5147 // Infinity -> Fall through to map to 255. 5148 5149 __ bind(&largest_value); 5150 __ mov(result_reg, Immediate(255)); 5151 __ jmp(&done, Label::kNear); 5152 5153 __ bind(&zero_result); 5154 __ xor_(result_reg, result_reg); 5155 __ jmp(&done, Label::kNear); 5156 5157 // smi 5158 __ bind(&is_smi); 5159 if (!input_reg.is(result_reg)) { 5160 __ mov(result_reg, input_reg); 5161 } 5162 __ SmiUntag(result_reg); 5163 __ ClampUint8(result_reg); 5164 __ bind(&done); 5165 } 5166 5167 5168 void LCodeGen::DoAllocate(LAllocate* instr) { 5169 class DeferredAllocate final : public LDeferredCode { 5170 public: 5171 DeferredAllocate(LCodeGen* codegen, 5172 LAllocate* instr, 5173 const X87Stack& x87_stack) 5174 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 5175 void Generate() override { codegen()->DoDeferredAllocate(instr_); } 5176 LInstruction* instr() override { return instr_; } 5177 5178 private: 5179 LAllocate* instr_; 5180 }; 5181 5182 DeferredAllocate* deferred = 5183 new(zone()) DeferredAllocate(this, instr, x87_stack_); 5184 5185 Register result = ToRegister(instr->result()); 5186 Register temp = ToRegister(instr->temp()); 5187 5188 // Allocate memory for the object. 5189 AllocationFlags flags = NO_ALLOCATION_FLAGS; 5190 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5191 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5192 } 5193 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5194 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5195 flags = static_cast<AllocationFlags>(flags | PRETENURE); 5196 } 5197 5198 if (instr->hydrogen()->IsAllocationFoldingDominator()) { 5199 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR); 5200 } 5201 DCHECK(!instr->hydrogen()->IsAllocationFolded()); 5202 5203 if (instr->size()->IsConstantOperand()) { 5204 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5205 CHECK(size <= kMaxRegularHeapObjectSize); 5206 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); 5207 } else { 5208 Register size = ToRegister(instr->size()); 5209 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); 5210 } 5211 5212 __ bind(deferred->exit()); 5213 5214 if (instr->hydrogen()->MustPrefillWithFiller()) { 5215 if (instr->size()->IsConstantOperand()) { 5216 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5217 __ mov(temp, (size / kPointerSize) - 1); 5218 } else { 5219 temp = ToRegister(instr->size()); 5220 __ shr(temp, kPointerSizeLog2); 5221 __ dec(temp); 5222 } 5223 Label loop; 5224 __ bind(&loop); 5225 __ mov(FieldOperand(result, temp, times_pointer_size, 0), 5226 isolate()->factory()->one_pointer_filler_map()); 5227 __ dec(temp); 5228 __ j(not_zero, &loop); 5229 } 5230 } 5231 5232 void LCodeGen::DoFastAllocate(LFastAllocate* instr) { 5233 DCHECK(instr->hydrogen()->IsAllocationFolded()); 5234 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); 5235 Register result = ToRegister(instr->result()); 5236 Register temp = ToRegister(instr->temp()); 5237 5238 AllocationFlags flags = ALLOCATION_FOLDED; 5239 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5240 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5241 } 5242 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5243 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5244 flags = static_cast<AllocationFlags>(flags | PRETENURE); 5245 } 5246 if (instr->size()->IsConstantOperand()) { 5247 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5248 CHECK(size <= kMaxRegularHeapObjectSize); 5249 __ FastAllocate(size, result, temp, flags); 5250 } else { 5251 Register size = ToRegister(instr->size()); 5252 __ FastAllocate(size, result, temp, flags); 5253 } 5254 } 5255 5256 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5257 Register result = ToRegister(instr->result()); 5258 5259 // TODO(3095996): Get rid of this. For now, we need to make the 5260 // result register contain a valid pointer because it is already 5261 // contained in the register pointer map. 5262 __ Move(result, Immediate(Smi::kZero)); 5263 5264 PushSafepointRegistersScope scope(this); 5265 if (instr->size()->IsRegister()) { 5266 Register size = ToRegister(instr->size()); 5267 DCHECK(!size.is(result)); 5268 __ SmiTag(ToRegister(instr->size())); 5269 __ push(size); 5270 } else { 5271 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5272 if (size >= 0 && size <= Smi::kMaxValue) { 5273 __ push(Immediate(Smi::FromInt(size))); 5274 } else { 5275 // We should never get here at runtime => abort 5276 __ int3(); 5277 return; 5278 } 5279 } 5280 5281 int flags = AllocateDoubleAlignFlag::encode( 5282 instr->hydrogen()->MustAllocateDoubleAligned()); 5283 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5284 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5285 flags = AllocateTargetSpace::update(flags, OLD_SPACE); 5286 } else { 5287 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5288 } 5289 __ push(Immediate(Smi::FromInt(flags))); 5290 5291 CallRuntimeFromDeferred( 5292 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 5293 __ StoreToSafepointRegisterSlot(result, eax); 5294 5295 if (instr->hydrogen()->IsAllocationFoldingDominator()) { 5296 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; 5297 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5298 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5299 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE); 5300 } 5301 // If the allocation folding dominator allocate triggered a GC, allocation 5302 // happend in the runtime. We have to reset the top pointer to virtually 5303 // undo the allocation. 5304 ExternalReference allocation_top = 5305 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); 5306 __ sub(eax, Immediate(kHeapObjectTag)); 5307 __ mov(Operand::StaticVariable(allocation_top), eax); 5308 __ add(eax, Immediate(kHeapObjectTag)); 5309 } 5310 } 5311 5312 5313 void LCodeGen::DoTypeof(LTypeof* instr) { 5314 DCHECK(ToRegister(instr->context()).is(esi)); 5315 DCHECK(ToRegister(instr->value()).is(ebx)); 5316 Label end, do_call; 5317 Register value_register = ToRegister(instr->value()); 5318 __ JumpIfNotSmi(value_register, &do_call); 5319 __ mov(eax, Immediate(isolate()->factory()->number_string())); 5320 __ jmp(&end); 5321 __ bind(&do_call); 5322 Callable callable = CodeFactory::Typeof(isolate()); 5323 CallCode(callable.code(), RelocInfo::CODE_TARGET, instr); 5324 __ bind(&end); 5325 } 5326 5327 5328 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5329 Register input = ToRegister(instr->value()); 5330 Condition final_branch_condition = EmitTypeofIs(instr, input); 5331 if (final_branch_condition != no_condition) { 5332 EmitBranch(instr, final_branch_condition); 5333 } 5334 } 5335 5336 5337 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { 5338 Label* true_label = instr->TrueLabel(chunk_); 5339 Label* false_label = instr->FalseLabel(chunk_); 5340 Handle<String> type_name = instr->type_literal(); 5341 int left_block = instr->TrueDestination(chunk_); 5342 int right_block = instr->FalseDestination(chunk_); 5343 int next_block = GetNextEmittedBlock(); 5344 5345 Label::Distance true_distance = left_block == next_block ? Label::kNear 5346 : Label::kFar; 5347 Label::Distance false_distance = right_block == next_block ? Label::kNear 5348 : Label::kFar; 5349 Condition final_branch_condition = no_condition; 5350 if (String::Equals(type_name, factory()->number_string())) { 5351 __ JumpIfSmi(input, true_label, true_distance); 5352 __ cmp(FieldOperand(input, HeapObject::kMapOffset), 5353 factory()->heap_number_map()); 5354 final_branch_condition = equal; 5355 5356 } else if (String::Equals(type_name, factory()->string_string())) { 5357 __ JumpIfSmi(input, false_label, false_distance); 5358 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); 5359 final_branch_condition = below; 5360 5361 } else if (String::Equals(type_name, factory()->symbol_string())) { 5362 __ JumpIfSmi(input, false_label, false_distance); 5363 __ CmpObjectType(input, SYMBOL_TYPE, input); 5364 final_branch_condition = equal; 5365 5366 } else if (String::Equals(type_name, factory()->boolean_string())) { 5367 __ cmp(input, factory()->true_value()); 5368 __ j(equal, true_label, true_distance); 5369 __ cmp(input, factory()->false_value()); 5370 final_branch_condition = equal; 5371 5372 } else if (String::Equals(type_name, factory()->undefined_string())) { 5373 __ cmp(input, factory()->null_value()); 5374 __ j(equal, false_label, false_distance); 5375 __ JumpIfSmi(input, false_label, false_distance); 5376 // Check for undetectable objects => true. 5377 __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); 5378 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 5379 Immediate(1 << Map::kIsUndetectable)); 5380 final_branch_condition = not_zero; 5381 5382 } else if (String::Equals(type_name, factory()->function_string())) { 5383 __ JumpIfSmi(input, false_label, false_distance); 5384 // Check for callable and not undetectable objects => true. 5385 __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); 5386 __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset)); 5387 __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); 5388 __ cmp(input, 1 << Map::kIsCallable); 5389 final_branch_condition = equal; 5390 5391 } else if (String::Equals(type_name, factory()->object_string())) { 5392 __ JumpIfSmi(input, false_label, false_distance); 5393 __ cmp(input, factory()->null_value()); 5394 __ j(equal, true_label, true_distance); 5395 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); 5396 __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input); 5397 __ j(below, false_label, false_distance); 5398 // Check for callable or undetectable objects => false. 5399 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 5400 Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); 5401 final_branch_condition = zero; 5402 5403 } else { 5404 __ jmp(false_label, false_distance); 5405 } 5406 return final_branch_condition; 5407 } 5408 5409 5410 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5411 if (info()->ShouldEnsureSpaceForLazyDeopt()) { 5412 // Ensure that we have enough space after the previous lazy-bailout 5413 // instruction for patching the code here. 5414 int current_pc = masm()->pc_offset(); 5415 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5416 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5417 __ Nop(padding_size); 5418 } 5419 } 5420 last_lazy_deopt_pc_ = masm()->pc_offset(); 5421 } 5422 5423 5424 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5425 last_lazy_deopt_pc_ = masm()->pc_offset(); 5426 DCHECK(instr->HasEnvironment()); 5427 LEnvironment* env = instr->environment(); 5428 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5429 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5430 } 5431 5432 5433 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5434 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5435 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5436 // needed return address), even though the implementation of LAZY and EAGER is 5437 // now identical. When LAZY is eventually completely folded into EAGER, remove 5438 // the special case below. 5439 if (info()->IsStub() && type == Deoptimizer::EAGER) { 5440 type = Deoptimizer::LAZY; 5441 } 5442 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type); 5443 } 5444 5445 5446 void LCodeGen::DoDummy(LDummy* instr) { 5447 // Nothing to see here, move on! 5448 } 5449 5450 5451 void LCodeGen::DoDummyUse(LDummyUse* instr) { 5452 // Nothing to see here, move on! 5453 } 5454 5455 5456 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5457 PushSafepointRegistersScope scope(this); 5458 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 5459 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 5460 RecordSafepointWithLazyDeopt( 5461 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5462 DCHECK(instr->HasEnvironment()); 5463 LEnvironment* env = instr->environment(); 5464 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5465 } 5466 5467 5468 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5469 class DeferredStackCheck final : public LDeferredCode { 5470 public: 5471 DeferredStackCheck(LCodeGen* codegen, 5472 LStackCheck* instr, 5473 const X87Stack& x87_stack) 5474 : LDeferredCode(codegen, x87_stack), instr_(instr) { } 5475 void Generate() override { codegen()->DoDeferredStackCheck(instr_); } 5476 LInstruction* instr() override { return instr_; } 5477 5478 private: 5479 LStackCheck* instr_; 5480 }; 5481 5482 DCHECK(instr->HasEnvironment()); 5483 LEnvironment* env = instr->environment(); 5484 // There is no LLazyBailout instruction for stack-checks. We have to 5485 // prepare for lazy deoptimization explicitly here. 5486 if (instr->hydrogen()->is_function_entry()) { 5487 // Perform stack overflow check. 5488 Label done; 5489 ExternalReference stack_limit = 5490 ExternalReference::address_of_stack_limit(isolate()); 5491 __ cmp(esp, Operand::StaticVariable(stack_limit)); 5492 __ j(above_equal, &done, Label::kNear); 5493 5494 DCHECK(instr->context()->IsRegister()); 5495 DCHECK(ToRegister(instr->context()).is(esi)); 5496 CallCode(isolate()->builtins()->StackCheck(), 5497 RelocInfo::CODE_TARGET, 5498 instr); 5499 __ bind(&done); 5500 } else { 5501 DCHECK(instr->hydrogen()->is_backwards_branch()); 5502 // Perform stack overflow check if this goto needs it before jumping. 5503 DeferredStackCheck* deferred_stack_check = 5504 new(zone()) DeferredStackCheck(this, instr, x87_stack_); 5505 ExternalReference stack_limit = 5506 ExternalReference::address_of_stack_limit(isolate()); 5507 __ cmp(esp, Operand::StaticVariable(stack_limit)); 5508 __ j(below, deferred_stack_check->entry()); 5509 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5510 __ bind(instr->done_label()); 5511 deferred_stack_check->SetExit(instr->done_label()); 5512 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5513 // Don't record a deoptimization index for the safepoint here. 5514 // This will be done explicitly when emitting call and the safepoint in 5515 // the deferred code. 5516 } 5517 } 5518 5519 5520 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5521 // This is a pseudo-instruction that ensures that the environment here is 5522 // properly registered for deoptimization and records the assembler's PC 5523 // offset. 5524 LEnvironment* environment = instr->environment(); 5525 5526 // If the environment were already registered, we would have no way of 5527 // backpatching it with the spill slot operands. 5528 DCHECK(!environment->HasBeenRegistered()); 5529 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5530 5531 GenerateOsrPrologue(); 5532 } 5533 5534 5535 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5536 DCHECK(ToRegister(instr->context()).is(esi)); 5537 5538 Label use_cache, call_runtime; 5539 __ CheckEnumCache(&call_runtime); 5540 5541 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); 5542 __ jmp(&use_cache, Label::kNear); 5543 5544 // Get the set of properties to enumerate. 5545 __ bind(&call_runtime); 5546 __ push(eax); 5547 CallRuntime(Runtime::kForInEnumerate, instr); 5548 __ bind(&use_cache); 5549 } 5550 5551 5552 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 5553 Register map = ToRegister(instr->map()); 5554 Register result = ToRegister(instr->result()); 5555 Label load_cache, done; 5556 __ EnumLength(result, map); 5557 __ cmp(result, Immediate(Smi::kZero)); 5558 __ j(not_equal, &load_cache, Label::kNear); 5559 __ mov(result, isolate()->factory()->empty_fixed_array()); 5560 __ jmp(&done, Label::kNear); 5561 5562 __ bind(&load_cache); 5563 __ LoadInstanceDescriptors(map, result); 5564 __ mov(result, 5565 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); 5566 __ mov(result, 5567 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); 5568 __ bind(&done); 5569 __ test(result, result); 5570 DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache); 5571 } 5572 5573 5574 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5575 Register object = ToRegister(instr->value()); 5576 __ cmp(ToRegister(instr->map()), 5577 FieldOperand(object, HeapObject::kMapOffset)); 5578 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap); 5579 } 5580 5581 5582 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5583 Register object, 5584 Register index) { 5585 PushSafepointRegistersScope scope(this); 5586 __ push(object); 5587 __ push(index); 5588 __ xor_(esi, esi); 5589 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 5590 RecordSafepointWithRegisters( 5591 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); 5592 __ StoreToSafepointRegisterSlot(object, eax); 5593 } 5594 5595 5596 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5597 class DeferredLoadMutableDouble final : public LDeferredCode { 5598 public: 5599 DeferredLoadMutableDouble(LCodeGen* codegen, 5600 LLoadFieldByIndex* instr, 5601 Register object, 5602 Register index, 5603 const X87Stack& x87_stack) 5604 : LDeferredCode(codegen, x87_stack), 5605 instr_(instr), 5606 object_(object), 5607 index_(index) { 5608 } 5609 void Generate() override { 5610 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); 5611 } 5612 LInstruction* instr() override { return instr_; } 5613 5614 private: 5615 LLoadFieldByIndex* instr_; 5616 Register object_; 5617 Register index_; 5618 }; 5619 5620 Register object = ToRegister(instr->object()); 5621 Register index = ToRegister(instr->index()); 5622 5623 DeferredLoadMutableDouble* deferred; 5624 deferred = new(zone()) DeferredLoadMutableDouble( 5625 this, instr, object, index, x87_stack_); 5626 5627 Label out_of_object, done; 5628 __ test(index, Immediate(Smi::FromInt(1))); 5629 __ j(not_zero, deferred->entry()); 5630 5631 __ sar(index, 1); 5632 5633 __ cmp(index, Immediate(0)); 5634 __ j(less, &out_of_object, Label::kNear); 5635 __ mov(object, FieldOperand(object, 5636 index, 5637 times_half_pointer_size, 5638 JSObject::kHeaderSize)); 5639 __ jmp(&done, Label::kNear); 5640 5641 __ bind(&out_of_object); 5642 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset)); 5643 __ neg(index); 5644 // Index is now equal to out of object property index plus 1. 5645 __ mov(object, FieldOperand(object, 5646 index, 5647 times_half_pointer_size, 5648 FixedArray::kHeaderSize - kPointerSize)); 5649 __ bind(deferred->exit()); 5650 __ bind(&done); 5651 } 5652 5653 #undef __ 5654 5655 } // namespace internal 5656 } // namespace v8 5657 5658 #endif // V8_TARGET_ARCH_X87 5659