1 // Copyright 2013 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #if V8_TARGET_ARCH_X64 6 7 #include "src/crankshaft/x64/lithium-codegen-x64.h" 8 9 #include "src/base/bits.h" 10 #include "src/code-factory.h" 11 #include "src/code-stubs.h" 12 #include "src/crankshaft/hydrogen-osr.h" 13 #include "src/ic/ic.h" 14 #include "src/ic/stub-cache.h" 15 16 namespace v8 { 17 namespace internal { 18 19 20 // When invoking builtins, we need to record the safepoint in the middle of 21 // the invoke instruction sequence generated by the macro assembler. 22 class SafepointGenerator final : public CallWrapper { 23 public: 24 SafepointGenerator(LCodeGen* codegen, 25 LPointerMap* pointers, 26 Safepoint::DeoptMode mode) 27 : codegen_(codegen), 28 pointers_(pointers), 29 deopt_mode_(mode) { } 30 virtual ~SafepointGenerator() {} 31 32 void BeforeCall(int call_size) const override {} 33 34 void AfterCall() const override { 35 codegen_->RecordSafepoint(pointers_, deopt_mode_); 36 } 37 38 private: 39 LCodeGen* codegen_; 40 LPointerMap* pointers_; 41 Safepoint::DeoptMode deopt_mode_; 42 }; 43 44 45 #define __ masm()-> 46 47 bool LCodeGen::GenerateCode() { 48 LPhase phase("Z_Code generation", chunk()); 49 DCHECK(is_unused()); 50 status_ = GENERATING; 51 52 // Open a frame scope to indicate that there is a frame on the stack. The 53 // MANUAL indicates that the scope shouldn't actually generate code to set up 54 // the frame (that is done in GeneratePrologue). 55 FrameScope frame_scope(masm_, StackFrame::MANUAL); 56 57 return GeneratePrologue() && 58 GenerateBody() && 59 GenerateDeferredCode() && 60 GenerateJumpTable() && 61 GenerateSafepointTable(); 62 } 63 64 65 void LCodeGen::FinishCode(Handle<Code> code) { 66 DCHECK(is_done()); 67 code->set_stack_slots(GetTotalFrameSlotCount()); 68 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 69 PopulateDeoptimizationData(code); 70 } 71 72 73 #ifdef _MSC_VER 74 void LCodeGen::MakeSureStackPagesMapped(int offset) { 75 const int kPageSize = 4 * KB; 76 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { 77 __ movp(Operand(rsp, offset), rax); 78 } 79 } 80 #endif 81 82 83 void LCodeGen::SaveCallerDoubles() { 84 DCHECK(info()->saves_caller_doubles()); 85 DCHECK(NeedsEagerFrame()); 86 Comment(";;; Save clobbered callee double registers"); 87 int count = 0; 88 BitVector* doubles = chunk()->allocated_double_registers(); 89 BitVector::Iterator save_iterator(doubles); 90 while (!save_iterator.Done()) { 91 __ Movsd(MemOperand(rsp, count * kDoubleSize), 92 XMMRegister::from_code(save_iterator.Current())); 93 save_iterator.Advance(); 94 count++; 95 } 96 } 97 98 99 void LCodeGen::RestoreCallerDoubles() { 100 DCHECK(info()->saves_caller_doubles()); 101 DCHECK(NeedsEagerFrame()); 102 Comment(";;; Restore clobbered callee double registers"); 103 BitVector* doubles = chunk()->allocated_double_registers(); 104 BitVector::Iterator save_iterator(doubles); 105 int count = 0; 106 while (!save_iterator.Done()) { 107 __ Movsd(XMMRegister::from_code(save_iterator.Current()), 108 MemOperand(rsp, count * kDoubleSize)); 109 save_iterator.Advance(); 110 count++; 111 } 112 } 113 114 115 bool LCodeGen::GeneratePrologue() { 116 DCHECK(is_generating()); 117 118 if (info()->IsOptimizing()) { 119 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 120 } 121 122 info()->set_prologue_offset(masm_->pc_offset()); 123 if (NeedsEagerFrame()) { 124 DCHECK(!frame_is_built_); 125 frame_is_built_ = true; 126 if (info()->IsStub()) { 127 __ StubPrologue(StackFrame::STUB); 128 } else { 129 __ Prologue(info()->GeneratePreagedPrologue()); 130 } 131 } 132 133 // Reserve space for the stack slots needed by the code. 134 int slots = GetStackSlotCount(); 135 if (slots > 0) { 136 if (FLAG_debug_code) { 137 __ subp(rsp, Immediate(slots * kPointerSize)); 138 #ifdef _MSC_VER 139 MakeSureStackPagesMapped(slots * kPointerSize); 140 #endif 141 __ Push(rax); 142 __ Set(rax, slots); 143 __ Set(kScratchRegister, kSlotsZapValue); 144 Label loop; 145 __ bind(&loop); 146 __ movp(MemOperand(rsp, rax, times_pointer_size, 0), 147 kScratchRegister); 148 __ decl(rax); 149 __ j(not_zero, &loop); 150 __ Pop(rax); 151 } else { 152 __ subp(rsp, Immediate(slots * kPointerSize)); 153 #ifdef _MSC_VER 154 MakeSureStackPagesMapped(slots * kPointerSize); 155 #endif 156 } 157 158 if (info()->saves_caller_doubles()) { 159 SaveCallerDoubles(); 160 } 161 } 162 return !is_aborted(); 163 } 164 165 166 void LCodeGen::DoPrologue(LPrologue* instr) { 167 Comment(";;; Prologue begin"); 168 169 // Possibly allocate a local context. 170 if (info_->scope()->num_heap_slots() > 0) { 171 Comment(";;; Allocate local context"); 172 bool need_write_barrier = true; 173 // Argument to NewContext is the function, which is still in rdi. 174 int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 175 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; 176 if (info()->scope()->is_script_scope()) { 177 __ Push(rdi); 178 __ Push(info()->scope()->GetScopeInfo(info()->isolate())); 179 __ CallRuntime(Runtime::kNewScriptContext); 180 deopt_mode = Safepoint::kLazyDeopt; 181 } else if (slots <= FastNewContextStub::kMaximumSlots) { 182 FastNewContextStub stub(isolate(), slots); 183 __ CallStub(&stub); 184 // Result of FastNewContextStub is always in new space. 185 need_write_barrier = false; 186 } else { 187 __ Push(rdi); 188 __ CallRuntime(Runtime::kNewFunctionContext); 189 } 190 RecordSafepoint(deopt_mode); 191 192 // Context is returned in rax. It replaces the context passed to us. 193 // It's saved in the stack and kept live in rsi. 194 __ movp(rsi, rax); 195 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax); 196 197 // Copy any necessary parameters into the context. 198 int num_parameters = scope()->num_parameters(); 199 int first_parameter = scope()->has_this_declaration() ? -1 : 0; 200 for (int i = first_parameter; i < num_parameters; i++) { 201 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i); 202 if (var->IsContextSlot()) { 203 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 204 (num_parameters - 1 - i) * kPointerSize; 205 // Load parameter from stack. 206 __ movp(rax, Operand(rbp, parameter_offset)); 207 // Store it in the context. 208 int context_offset = Context::SlotOffset(var->index()); 209 __ movp(Operand(rsi, context_offset), rax); 210 // Update the write barrier. This clobbers rax and rbx. 211 if (need_write_barrier) { 212 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs); 213 } else if (FLAG_debug_code) { 214 Label done; 215 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear); 216 __ Abort(kExpectedNewSpaceObject); 217 __ bind(&done); 218 } 219 } 220 } 221 Comment(";;; End allocate local context"); 222 } 223 224 Comment(";;; Prologue end"); 225 } 226 227 228 void LCodeGen::GenerateOsrPrologue() { 229 // Generate the OSR entry prologue at the first unknown OSR value, or if there 230 // are none, at the OSR entrypoint instruction. 231 if (osr_pc_offset_ >= 0) return; 232 233 osr_pc_offset_ = masm()->pc_offset(); 234 235 // Adjust the frame size, subsuming the unoptimized frame into the 236 // optimized frame. 237 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 238 DCHECK(slots >= 0); 239 __ subp(rsp, Immediate(slots * kPointerSize)); 240 } 241 242 243 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 244 if (instr->IsCall()) { 245 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 246 } 247 if (!instr->IsLazyBailout() && !instr->IsGap()) { 248 safepoints_.BumpLastLazySafepointIndex(); 249 } 250 } 251 252 253 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { 254 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() && 255 instr->hydrogen_value()->representation().IsInteger32() && 256 instr->result()->IsRegister()) { 257 __ AssertZeroExtended(ToRegister(instr->result())); 258 } 259 260 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) { 261 // We sign extend the dehoisted key at the definition point when the pointer 262 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use 263 // points and MustSignExtendResult is always false. We can't use 264 // STATIC_ASSERT here as the pointer size is 32-bit for x32. 265 DCHECK(kPointerSize == kInt64Size); 266 if (instr->result()->IsRegister()) { 267 Register result_reg = ToRegister(instr->result()); 268 __ movsxlq(result_reg, result_reg); 269 } else { 270 // Sign extend the 32bit result in the stack slots. 271 DCHECK(instr->result()->IsStackSlot()); 272 Operand src = ToOperand(instr->result()); 273 __ movsxlq(kScratchRegister, src); 274 __ movq(src, kScratchRegister); 275 } 276 } 277 } 278 279 280 bool LCodeGen::GenerateJumpTable() { 281 if (jump_table_.length() == 0) return !is_aborted(); 282 283 Label needs_frame; 284 Comment(";;; -------------------- Jump table --------------------"); 285 for (int i = 0; i < jump_table_.length(); i++) { 286 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; 287 __ bind(&table_entry->label); 288 Address entry = table_entry->address; 289 DeoptComment(table_entry->deopt_info); 290 if (table_entry->needs_frame) { 291 DCHECK(!info()->saves_caller_doubles()); 292 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); 293 __ call(&needs_frame); 294 } else { 295 if (info()->saves_caller_doubles()) { 296 DCHECK(info()->IsStub()); 297 RestoreCallerDoubles(); 298 } 299 __ call(entry, RelocInfo::RUNTIME_ENTRY); 300 } 301 } 302 303 if (needs_frame.is_linked()) { 304 __ bind(&needs_frame); 305 /* stack layout 306 3: return address <-- rsp 307 2: garbage 308 1: garbage 309 0: garbage 310 */ 311 // Reserve space for stub marker. 312 __ subp(rsp, Immediate(TypedFrameConstants::kFrameTypeSize)); 313 __ Push(MemOperand( 314 rsp, TypedFrameConstants::kFrameTypeSize)); // Copy return address. 315 __ Push(kScratchRegister); 316 317 /* stack layout 318 3: return address 319 2: garbage 320 1: return address 321 0: entry address <-- rsp 322 */ 323 324 // Create a stack frame. 325 __ movp(MemOperand(rsp, 3 * kPointerSize), rbp); 326 __ leap(rbp, MemOperand(rsp, 3 * kPointerSize)); 327 328 // This variant of deopt can only be used with stubs. Since we don't 329 // have a function pointer to install in the stack frame that we're 330 // building, install a special marker there instead. 331 DCHECK(info()->IsStub()); 332 __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB)); 333 334 /* stack layout 335 3: old rbp 336 2: stub marker 337 1: return address 338 0: entry address <-- rsp 339 */ 340 __ ret(0); 341 } 342 343 return !is_aborted(); 344 } 345 346 347 bool LCodeGen::GenerateDeferredCode() { 348 DCHECK(is_generating()); 349 if (deferred_.length() > 0) { 350 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 351 LDeferredCode* code = deferred_[i]; 352 353 HValue* value = 354 instructions_->at(code->instruction_index())->hydrogen_value(); 355 RecordAndWritePosition( 356 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 357 358 Comment(";;; <@%d,#%d> " 359 "-------------------- Deferred %s --------------------", 360 code->instruction_index(), 361 code->instr()->hydrogen_value()->id(), 362 code->instr()->Mnemonic()); 363 __ bind(code->entry()); 364 if (NeedsDeferredFrame()) { 365 Comment(";;; Build frame"); 366 DCHECK(!frame_is_built_); 367 DCHECK(info()->IsStub()); 368 frame_is_built_ = true; 369 // Build the frame in such a way that esi isn't trashed. 370 __ pushq(rbp); // Caller's frame pointer. 371 __ Push(Smi::FromInt(StackFrame::STUB)); 372 __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp)); 373 Comment(";;; Deferred code"); 374 } 375 code->Generate(); 376 if (NeedsDeferredFrame()) { 377 __ bind(code->done()); 378 Comment(";;; Destroy frame"); 379 DCHECK(frame_is_built_); 380 frame_is_built_ = false; 381 __ movp(rsp, rbp); 382 __ popq(rbp); 383 } 384 __ jmp(code->exit()); 385 } 386 } 387 388 // Deferred code is the last part of the instruction sequence. Mark 389 // the generated code as done unless we bailed out. 390 if (!is_aborted()) status_ = DONE; 391 return !is_aborted(); 392 } 393 394 395 bool LCodeGen::GenerateSafepointTable() { 396 DCHECK(is_done()); 397 safepoints_.Emit(masm(), GetTotalFrameSlotCount()); 398 return !is_aborted(); 399 } 400 401 402 Register LCodeGen::ToRegister(int index) const { 403 return Register::from_code(index); 404 } 405 406 407 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 408 return XMMRegister::from_code(index); 409 } 410 411 412 Register LCodeGen::ToRegister(LOperand* op) const { 413 DCHECK(op->IsRegister()); 414 return ToRegister(op->index()); 415 } 416 417 418 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 419 DCHECK(op->IsDoubleRegister()); 420 return ToDoubleRegister(op->index()); 421 } 422 423 424 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { 425 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 426 } 427 428 429 bool LCodeGen::IsExternalConstant(LConstantOperand* op) const { 430 return chunk_->LookupLiteralRepresentation(op).IsExternal(); 431 } 432 433 434 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const { 435 return op->IsConstantOperand() && 436 chunk_->IsDehoistedKey(chunk_->LookupConstant(op)); 437 } 438 439 440 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const { 441 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 442 } 443 444 445 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 446 return ToRepresentation(op, Representation::Integer32()); 447 } 448 449 450 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 451 const Representation& r) const { 452 HConstant* constant = chunk_->LookupConstant(op); 453 int32_t value = constant->Integer32Value(); 454 if (r.IsInteger32()) return value; 455 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged()); 456 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value))); 457 } 458 459 460 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 461 HConstant* constant = chunk_->LookupConstant(op); 462 return Smi::FromInt(constant->Integer32Value()); 463 } 464 465 466 double LCodeGen::ToDouble(LConstantOperand* op) const { 467 HConstant* constant = chunk_->LookupConstant(op); 468 DCHECK(constant->HasDoubleValue()); 469 return constant->DoubleValue(); 470 } 471 472 473 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { 474 HConstant* constant = chunk_->LookupConstant(op); 475 DCHECK(constant->HasExternalReferenceValue()); 476 return constant->ExternalReferenceValue(); 477 } 478 479 480 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 481 HConstant* constant = chunk_->LookupConstant(op); 482 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 483 return constant->handle(isolate()); 484 } 485 486 487 static int ArgumentsOffsetWithoutFrame(int index) { 488 DCHECK(index < 0); 489 return -(index + 1) * kPointerSize + kPCOnStackSize; 490 } 491 492 493 Operand LCodeGen::ToOperand(LOperand* op) const { 494 // Does not handle registers. In X64 assembler, plain registers are not 495 // representable as an Operand. 496 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); 497 if (NeedsEagerFrame()) { 498 return Operand(rbp, FrameSlotToFPOffset(op->index())); 499 } else { 500 // Retrieve parameter without eager stack-frame relative to the 501 // stack-pointer. 502 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index())); 503 } 504 } 505 506 507 void LCodeGen::WriteTranslation(LEnvironment* environment, 508 Translation* translation) { 509 if (environment == NULL) return; 510 511 // The translation includes one command per value in the environment. 512 int translation_size = environment->translation_size(); 513 514 WriteTranslation(environment->outer(), translation); 515 WriteTranslationFrame(environment, translation); 516 517 int object_index = 0; 518 int dematerialized_index = 0; 519 for (int i = 0; i < translation_size; ++i) { 520 LOperand* value = environment->values()->at(i); 521 AddToTranslation( 522 environment, translation, value, environment->HasTaggedValueAt(i), 523 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); 524 } 525 } 526 527 528 void LCodeGen::AddToTranslation(LEnvironment* environment, 529 Translation* translation, 530 LOperand* op, 531 bool is_tagged, 532 bool is_uint32, 533 int* object_index_pointer, 534 int* dematerialized_index_pointer) { 535 if (op == LEnvironment::materialization_marker()) { 536 int object_index = (*object_index_pointer)++; 537 if (environment->ObjectIsDuplicateAt(object_index)) { 538 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 539 translation->DuplicateObject(dupe_of); 540 return; 541 } 542 int object_length = environment->ObjectLengthAt(object_index); 543 if (environment->ObjectIsArgumentsAt(object_index)) { 544 translation->BeginArgumentsObject(object_length); 545 } else { 546 translation->BeginCapturedObject(object_length); 547 } 548 int dematerialized_index = *dematerialized_index_pointer; 549 int env_offset = environment->translation_size() + dematerialized_index; 550 *dematerialized_index_pointer += object_length; 551 for (int i = 0; i < object_length; ++i) { 552 LOperand* value = environment->values()->at(env_offset + i); 553 AddToTranslation(environment, 554 translation, 555 value, 556 environment->HasTaggedValueAt(env_offset + i), 557 environment->HasUint32ValueAt(env_offset + i), 558 object_index_pointer, 559 dematerialized_index_pointer); 560 } 561 return; 562 } 563 564 if (op->IsStackSlot()) { 565 int index = op->index(); 566 if (is_tagged) { 567 translation->StoreStackSlot(index); 568 } else if (is_uint32) { 569 translation->StoreUint32StackSlot(index); 570 } else { 571 translation->StoreInt32StackSlot(index); 572 } 573 } else if (op->IsDoubleStackSlot()) { 574 int index = op->index(); 575 translation->StoreDoubleStackSlot(index); 576 } else if (op->IsRegister()) { 577 Register reg = ToRegister(op); 578 if (is_tagged) { 579 translation->StoreRegister(reg); 580 } else if (is_uint32) { 581 translation->StoreUint32Register(reg); 582 } else { 583 translation->StoreInt32Register(reg); 584 } 585 } else if (op->IsDoubleRegister()) { 586 XMMRegister reg = ToDoubleRegister(op); 587 translation->StoreDoubleRegister(reg); 588 } else if (op->IsConstantOperand()) { 589 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 590 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 591 translation->StoreLiteral(src_index); 592 } else { 593 UNREACHABLE(); 594 } 595 } 596 597 598 void LCodeGen::CallCodeGeneric(Handle<Code> code, 599 RelocInfo::Mode mode, 600 LInstruction* instr, 601 SafepointMode safepoint_mode, 602 int argc) { 603 DCHECK(instr != NULL); 604 __ call(code, mode); 605 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc); 606 607 // Signal that we don't inline smi code before these stubs in the 608 // optimizing code generator. 609 if (code->kind() == Code::BINARY_OP_IC || 610 code->kind() == Code::COMPARE_IC) { 611 __ nop(); 612 } 613 } 614 615 616 void LCodeGen::CallCode(Handle<Code> code, 617 RelocInfo::Mode mode, 618 LInstruction* instr) { 619 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0); 620 } 621 622 623 void LCodeGen::CallRuntime(const Runtime::Function* function, 624 int num_arguments, 625 LInstruction* instr, 626 SaveFPRegsMode save_doubles) { 627 DCHECK(instr != NULL); 628 DCHECK(instr->HasPointerMap()); 629 630 __ CallRuntime(function, num_arguments, save_doubles); 631 632 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); 633 } 634 635 636 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 637 if (context->IsRegister()) { 638 if (!ToRegister(context).is(rsi)) { 639 __ movp(rsi, ToRegister(context)); 640 } 641 } else if (context->IsStackSlot()) { 642 __ movp(rsi, ToOperand(context)); 643 } else if (context->IsConstantOperand()) { 644 HConstant* constant = 645 chunk_->LookupConstant(LConstantOperand::cast(context)); 646 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate()))); 647 } else { 648 UNREACHABLE(); 649 } 650 } 651 652 653 654 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 655 int argc, 656 LInstruction* instr, 657 LOperand* context) { 658 LoadContextFromDeferred(context); 659 660 __ CallRuntimeSaveDoubles(id); 661 RecordSafepointWithRegisters( 662 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 663 } 664 665 666 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, 667 Safepoint::DeoptMode mode) { 668 environment->set_has_been_used(); 669 if (!environment->HasBeenRegistered()) { 670 // Physical stack frame layout: 671 // -x ............. -4 0 ..................................... y 672 // [incoming arguments] [spill slots] [pushed outgoing arguments] 673 674 // Layout of the environment: 675 // 0 ..................................................... size-1 676 // [parameters] [locals] [expression stack including arguments] 677 678 // Layout of the translation: 679 // 0 ........................................................ size - 1 + 4 680 // [expression stack including arguments] [locals] [4 words] [parameters] 681 // |>------------ translation_size ------------<| 682 683 int frame_count = 0; 684 int jsframe_count = 0; 685 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 686 ++frame_count; 687 if (e->frame_type() == JS_FUNCTION) { 688 ++jsframe_count; 689 } 690 } 691 Translation translation(&translations_, frame_count, jsframe_count, zone()); 692 WriteTranslation(environment, &translation); 693 int deoptimization_index = deoptimizations_.length(); 694 int pc_offset = masm()->pc_offset(); 695 environment->Register(deoptimization_index, 696 translation.index(), 697 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 698 deoptimizations_.Add(environment, environment->zone()); 699 } 700 } 701 702 703 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, 704 Deoptimizer::DeoptReason deopt_reason, 705 Deoptimizer::BailoutType bailout_type) { 706 LEnvironment* environment = instr->environment(); 707 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 708 DCHECK(environment->HasBeenRegistered()); 709 int id = environment->deoptimization_index(); 710 Address entry = 711 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 712 if (entry == NULL) { 713 Abort(kBailoutWasNotPrepared); 714 return; 715 } 716 717 if (DeoptEveryNTimes()) { 718 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 719 Label no_deopt; 720 __ pushfq(); 721 __ pushq(rax); 722 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister); 723 __ movl(rax, count_operand); 724 __ subl(rax, Immediate(1)); 725 __ j(not_zero, &no_deopt, Label::kNear); 726 if (FLAG_trap_on_deopt) __ int3(); 727 __ movl(rax, Immediate(FLAG_deopt_every_n_times)); 728 __ movl(count_operand, rax); 729 __ popq(rax); 730 __ popfq(); 731 DCHECK(frame_is_built_); 732 __ call(entry, RelocInfo::RUNTIME_ENTRY); 733 __ bind(&no_deopt); 734 __ movl(count_operand, rax); 735 __ popq(rax); 736 __ popfq(); 737 } 738 739 if (info()->ShouldTrapOnDeopt()) { 740 Label done; 741 if (cc != no_condition) { 742 __ j(NegateCondition(cc), &done, Label::kNear); 743 } 744 __ int3(); 745 __ bind(&done); 746 } 747 748 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); 749 750 DCHECK(info()->IsStub() || frame_is_built_); 751 // Go through jump table if we need to handle condition, build frame, or 752 // restore caller doubles. 753 if (cc == no_condition && frame_is_built_ && 754 !info()->saves_caller_doubles()) { 755 DeoptComment(deopt_info); 756 __ call(entry, RelocInfo::RUNTIME_ENTRY); 757 } else { 758 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, 759 !frame_is_built_); 760 // We often have several deopts to the same entry, reuse the last 761 // jump entry if this is the case. 762 if (FLAG_trace_deopt || isolate()->is_profiling() || 763 jump_table_.is_empty() || 764 !table_entry.IsEquivalentTo(jump_table_.last())) { 765 jump_table_.Add(table_entry, zone()); 766 } 767 if (cc == no_condition) { 768 __ jmp(&jump_table_.last().label); 769 } else { 770 __ j(cc, &jump_table_.last().label); 771 } 772 } 773 } 774 775 776 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, 777 Deoptimizer::DeoptReason deopt_reason) { 778 Deoptimizer::BailoutType bailout_type = info()->IsStub() 779 ? Deoptimizer::LAZY 780 : Deoptimizer::EAGER; 781 DeoptimizeIf(cc, instr, deopt_reason, bailout_type); 782 } 783 784 785 void LCodeGen::RecordSafepointWithLazyDeopt( 786 LInstruction* instr, SafepointMode safepoint_mode, int argc) { 787 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 788 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 789 } else { 790 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS); 791 RecordSafepointWithRegisters( 792 instr->pointer_map(), argc, Safepoint::kLazyDeopt); 793 } 794 } 795 796 797 void LCodeGen::RecordSafepoint( 798 LPointerMap* pointers, 799 Safepoint::Kind kind, 800 int arguments, 801 Safepoint::DeoptMode deopt_mode) { 802 DCHECK(kind == expected_safepoint_kind_); 803 804 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 805 806 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), 807 kind, arguments, deopt_mode); 808 for (int i = 0; i < operands->length(); i++) { 809 LOperand* pointer = operands->at(i); 810 if (pointer->IsStackSlot()) { 811 safepoint.DefinePointerSlot(pointer->index(), zone()); 812 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 813 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 814 } 815 } 816 } 817 818 819 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 820 Safepoint::DeoptMode deopt_mode) { 821 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); 822 } 823 824 825 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { 826 LPointerMap empty_pointers(zone()); 827 RecordSafepoint(&empty_pointers, deopt_mode); 828 } 829 830 831 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 832 int arguments, 833 Safepoint::DeoptMode deopt_mode) { 834 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); 835 } 836 837 838 void LCodeGen::RecordAndWritePosition(int position) { 839 if (position == RelocInfo::kNoPosition) return; 840 masm()->positions_recorder()->RecordPosition(position); 841 } 842 843 844 static const char* LabelType(LLabel* label) { 845 if (label->is_loop_header()) return " (loop header)"; 846 if (label->is_osr_entry()) return " (OSR entry)"; 847 return ""; 848 } 849 850 851 void LCodeGen::DoLabel(LLabel* label) { 852 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 853 current_instruction_, 854 label->hydrogen_value()->id(), 855 label->block_id(), 856 LabelType(label)); 857 __ bind(label->label()); 858 current_block_ = label->block_id(); 859 DoGap(label); 860 } 861 862 863 void LCodeGen::DoParallelMove(LParallelMove* move) { 864 resolver_.Resolve(move); 865 } 866 867 868 void LCodeGen::DoGap(LGap* gap) { 869 for (int i = LGap::FIRST_INNER_POSITION; 870 i <= LGap::LAST_INNER_POSITION; 871 i++) { 872 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 873 LParallelMove* move = gap->GetParallelMove(inner_pos); 874 if (move != NULL) DoParallelMove(move); 875 } 876 } 877 878 879 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 880 DoGap(instr); 881 } 882 883 884 void LCodeGen::DoParameter(LParameter* instr) { 885 // Nothing to do. 886 } 887 888 889 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 890 GenerateOsrPrologue(); 891 } 892 893 894 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { 895 Register dividend = ToRegister(instr->dividend()); 896 int32_t divisor = instr->divisor(); 897 DCHECK(dividend.is(ToRegister(instr->result()))); 898 899 // Theoretically, a variation of the branch-free code for integer division by 900 // a power of 2 (calculating the remainder via an additional multiplication 901 // (which gets simplified to an 'and') and subtraction) should be faster, and 902 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 903 // indicate that positive dividends are heavily favored, so the branching 904 // version performs better. 905 HMod* hmod = instr->hydrogen(); 906 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 907 Label dividend_is_not_negative, done; 908 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 909 __ testl(dividend, dividend); 910 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); 911 // Note that this is correct even for kMinInt operands. 912 __ negl(dividend); 913 __ andl(dividend, Immediate(mask)); 914 __ negl(dividend); 915 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 916 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); 917 } 918 __ jmp(&done, Label::kNear); 919 } 920 921 __ bind(÷nd_is_not_negative); 922 __ andl(dividend, Immediate(mask)); 923 __ bind(&done); 924 } 925 926 927 void LCodeGen::DoModByConstI(LModByConstI* instr) { 928 Register dividend = ToRegister(instr->dividend()); 929 int32_t divisor = instr->divisor(); 930 DCHECK(ToRegister(instr->result()).is(rax)); 931 932 if (divisor == 0) { 933 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); 934 return; 935 } 936 937 __ TruncatingDiv(dividend, Abs(divisor)); 938 __ imull(rdx, rdx, Immediate(Abs(divisor))); 939 __ movl(rax, dividend); 940 __ subl(rax, rdx); 941 942 // Check for negative zero. 943 HMod* hmod = instr->hydrogen(); 944 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 945 Label remainder_not_zero; 946 __ j(not_zero, &remainder_not_zero, Label::kNear); 947 __ cmpl(dividend, Immediate(0)); 948 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); 949 __ bind(&remainder_not_zero); 950 } 951 } 952 953 954 void LCodeGen::DoModI(LModI* instr) { 955 HMod* hmod = instr->hydrogen(); 956 957 Register left_reg = ToRegister(instr->left()); 958 DCHECK(left_reg.is(rax)); 959 Register right_reg = ToRegister(instr->right()); 960 DCHECK(!right_reg.is(rax)); 961 DCHECK(!right_reg.is(rdx)); 962 Register result_reg = ToRegister(instr->result()); 963 DCHECK(result_reg.is(rdx)); 964 965 Label done; 966 // Check for x % 0, idiv would signal a divide error. We have to 967 // deopt in this case because we can't return a NaN. 968 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 969 __ testl(right_reg, right_reg); 970 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); 971 } 972 973 // Check for kMinInt % -1, idiv would signal a divide error. We 974 // have to deopt if we care about -0, because we can't return that. 975 if (hmod->CheckFlag(HValue::kCanOverflow)) { 976 Label no_overflow_possible; 977 __ cmpl(left_reg, Immediate(kMinInt)); 978 __ j(not_zero, &no_overflow_possible, Label::kNear); 979 __ cmpl(right_reg, Immediate(-1)); 980 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 981 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero); 982 } else { 983 __ j(not_equal, &no_overflow_possible, Label::kNear); 984 __ Set(result_reg, 0); 985 __ jmp(&done, Label::kNear); 986 } 987 __ bind(&no_overflow_possible); 988 } 989 990 // Sign extend dividend in eax into edx:eax, since we are using only the low 991 // 32 bits of the values. 992 __ cdq(); 993 994 // If we care about -0, test if the dividend is <0 and the result is 0. 995 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 996 Label positive_left; 997 __ testl(left_reg, left_reg); 998 __ j(not_sign, &positive_left, Label::kNear); 999 __ idivl(right_reg); 1000 __ testl(result_reg, result_reg); 1001 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); 1002 __ jmp(&done, Label::kNear); 1003 __ bind(&positive_left); 1004 } 1005 __ idivl(right_reg); 1006 __ bind(&done); 1007 } 1008 1009 1010 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1011 Register dividend = ToRegister(instr->dividend()); 1012 int32_t divisor = instr->divisor(); 1013 DCHECK(dividend.is(ToRegister(instr->result()))); 1014 1015 // If the divisor is positive, things are easy: There can be no deopts and we 1016 // can simply do an arithmetic right shift. 1017 if (divisor == 1) return; 1018 int32_t shift = WhichPowerOf2Abs(divisor); 1019 if (divisor > 1) { 1020 __ sarl(dividend, Immediate(shift)); 1021 return; 1022 } 1023 1024 // If the divisor is negative, we have to negate and handle edge cases. 1025 __ negl(dividend); 1026 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1027 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); 1028 } 1029 1030 // Dividing by -1 is basically negation, unless we overflow. 1031 if (divisor == -1) { 1032 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1033 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 1034 } 1035 return; 1036 } 1037 1038 // If the negation could not overflow, simply shifting is OK. 1039 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1040 __ sarl(dividend, Immediate(shift)); 1041 return; 1042 } 1043 1044 Label not_kmin_int, done; 1045 __ j(no_overflow, ¬_kmin_int, Label::kNear); 1046 __ movl(dividend, Immediate(kMinInt / divisor)); 1047 __ jmp(&done, Label::kNear); 1048 __ bind(¬_kmin_int); 1049 __ sarl(dividend, Immediate(shift)); 1050 __ bind(&done); 1051 } 1052 1053 1054 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1055 Register dividend = ToRegister(instr->dividend()); 1056 int32_t divisor = instr->divisor(); 1057 DCHECK(ToRegister(instr->result()).is(rdx)); 1058 1059 if (divisor == 0) { 1060 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); 1061 return; 1062 } 1063 1064 // Check for (0 / -x) that will produce negative zero. 1065 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1066 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1067 __ testl(dividend, dividend); 1068 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); 1069 } 1070 1071 // Easy case: We need no dynamic check for the dividend and the flooring 1072 // division is the same as the truncating division. 1073 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1074 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1075 __ TruncatingDiv(dividend, Abs(divisor)); 1076 if (divisor < 0) __ negl(rdx); 1077 return; 1078 } 1079 1080 // In the general case we may need to adjust before and after the truncating 1081 // division to get a flooring division. 1082 Register temp = ToRegister(instr->temp3()); 1083 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx)); 1084 Label needs_adjustment, done; 1085 __ cmpl(dividend, Immediate(0)); 1086 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); 1087 __ TruncatingDiv(dividend, Abs(divisor)); 1088 if (divisor < 0) __ negl(rdx); 1089 __ jmp(&done, Label::kNear); 1090 __ bind(&needs_adjustment); 1091 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1)); 1092 __ TruncatingDiv(temp, Abs(divisor)); 1093 if (divisor < 0) __ negl(rdx); 1094 __ decl(rdx); 1095 __ bind(&done); 1096 } 1097 1098 1099 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1100 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1101 HBinaryOperation* hdiv = instr->hydrogen(); 1102 Register dividend = ToRegister(instr->dividend()); 1103 Register divisor = ToRegister(instr->divisor()); 1104 Register remainder = ToRegister(instr->temp()); 1105 Register result = ToRegister(instr->result()); 1106 DCHECK(dividend.is(rax)); 1107 DCHECK(remainder.is(rdx)); 1108 DCHECK(result.is(rax)); 1109 DCHECK(!divisor.is(rax)); 1110 DCHECK(!divisor.is(rdx)); 1111 1112 // Check for x / 0. 1113 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1114 __ testl(divisor, divisor); 1115 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); 1116 } 1117 1118 // Check for (0 / -x) that will produce negative zero. 1119 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1120 Label dividend_not_zero; 1121 __ testl(dividend, dividend); 1122 __ j(not_zero, ÷nd_not_zero, Label::kNear); 1123 __ testl(divisor, divisor); 1124 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); 1125 __ bind(÷nd_not_zero); 1126 } 1127 1128 // Check for (kMinInt / -1). 1129 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1130 Label dividend_not_min_int; 1131 __ cmpl(dividend, Immediate(kMinInt)); 1132 __ j(not_zero, ÷nd_not_min_int, Label::kNear); 1133 __ cmpl(divisor, Immediate(-1)); 1134 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); 1135 __ bind(÷nd_not_min_int); 1136 } 1137 1138 // Sign extend to rdx (= remainder). 1139 __ cdq(); 1140 __ idivl(divisor); 1141 1142 Label done; 1143 __ testl(remainder, remainder); 1144 __ j(zero, &done, Label::kNear); 1145 __ xorl(remainder, divisor); 1146 __ sarl(remainder, Immediate(31)); 1147 __ addl(result, remainder); 1148 __ bind(&done); 1149 } 1150 1151 1152 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1153 Register dividend = ToRegister(instr->dividend()); 1154 int32_t divisor = instr->divisor(); 1155 Register result = ToRegister(instr->result()); 1156 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 1157 DCHECK(!result.is(dividend)); 1158 1159 // Check for (0 / -x) that will produce negative zero. 1160 HDiv* hdiv = instr->hydrogen(); 1161 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1162 __ testl(dividend, dividend); 1163 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); 1164 } 1165 // Check for (kMinInt / -1). 1166 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1167 __ cmpl(dividend, Immediate(kMinInt)); 1168 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); 1169 } 1170 // Deoptimize if remainder will not be 0. 1171 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1172 divisor != 1 && divisor != -1) { 1173 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1174 __ testl(dividend, Immediate(mask)); 1175 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); 1176 } 1177 __ Move(result, dividend); 1178 int32_t shift = WhichPowerOf2Abs(divisor); 1179 if (shift > 0) { 1180 // The arithmetic shift is always OK, the 'if' is an optimization only. 1181 if (shift > 1) __ sarl(result, Immediate(31)); 1182 __ shrl(result, Immediate(32 - shift)); 1183 __ addl(result, dividend); 1184 __ sarl(result, Immediate(shift)); 1185 } 1186 if (divisor < 0) __ negl(result); 1187 } 1188 1189 1190 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1191 Register dividend = ToRegister(instr->dividend()); 1192 int32_t divisor = instr->divisor(); 1193 DCHECK(ToRegister(instr->result()).is(rdx)); 1194 1195 if (divisor == 0) { 1196 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); 1197 return; 1198 } 1199 1200 // Check for (0 / -x) that will produce negative zero. 1201 HDiv* hdiv = instr->hydrogen(); 1202 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1203 __ testl(dividend, dividend); 1204 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); 1205 } 1206 1207 __ TruncatingDiv(dividend, Abs(divisor)); 1208 if (divisor < 0) __ negl(rdx); 1209 1210 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1211 __ movl(rax, rdx); 1212 __ imull(rax, rax, Immediate(divisor)); 1213 __ subl(rax, dividend); 1214 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); 1215 } 1216 } 1217 1218 1219 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1220 void LCodeGen::DoDivI(LDivI* instr) { 1221 HBinaryOperation* hdiv = instr->hydrogen(); 1222 Register dividend = ToRegister(instr->dividend()); 1223 Register divisor = ToRegister(instr->divisor()); 1224 Register remainder = ToRegister(instr->temp()); 1225 DCHECK(dividend.is(rax)); 1226 DCHECK(remainder.is(rdx)); 1227 DCHECK(ToRegister(instr->result()).is(rax)); 1228 DCHECK(!divisor.is(rax)); 1229 DCHECK(!divisor.is(rdx)); 1230 1231 // Check for x / 0. 1232 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1233 __ testl(divisor, divisor); 1234 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); 1235 } 1236 1237 // Check for (0 / -x) that will produce negative zero. 1238 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1239 Label dividend_not_zero; 1240 __ testl(dividend, dividend); 1241 __ j(not_zero, ÷nd_not_zero, Label::kNear); 1242 __ testl(divisor, divisor); 1243 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); 1244 __ bind(÷nd_not_zero); 1245 } 1246 1247 // Check for (kMinInt / -1). 1248 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1249 Label dividend_not_min_int; 1250 __ cmpl(dividend, Immediate(kMinInt)); 1251 __ j(not_zero, ÷nd_not_min_int, Label::kNear); 1252 __ cmpl(divisor, Immediate(-1)); 1253 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); 1254 __ bind(÷nd_not_min_int); 1255 } 1256 1257 // Sign extend to rdx (= remainder). 1258 __ cdq(); 1259 __ idivl(divisor); 1260 1261 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1262 // Deoptimize if remainder is not 0. 1263 __ testl(remainder, remainder); 1264 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); 1265 } 1266 } 1267 1268 1269 void LCodeGen::DoMulI(LMulI* instr) { 1270 Register left = ToRegister(instr->left()); 1271 LOperand* right = instr->right(); 1272 1273 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1274 if (instr->hydrogen_value()->representation().IsSmi()) { 1275 __ movp(kScratchRegister, left); 1276 } else { 1277 __ movl(kScratchRegister, left); 1278 } 1279 } 1280 1281 bool can_overflow = 1282 instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1283 if (right->IsConstantOperand()) { 1284 int32_t right_value = ToInteger32(LConstantOperand::cast(right)); 1285 if (right_value == -1) { 1286 __ negl(left); 1287 } else if (right_value == 0) { 1288 __ xorl(left, left); 1289 } else if (right_value == 2) { 1290 __ addl(left, left); 1291 } else if (!can_overflow) { 1292 // If the multiplication is known to not overflow, we 1293 // can use operations that don't set the overflow flag 1294 // correctly. 1295 switch (right_value) { 1296 case 1: 1297 // Do nothing. 1298 break; 1299 case 3: 1300 __ leal(left, Operand(left, left, times_2, 0)); 1301 break; 1302 case 4: 1303 __ shll(left, Immediate(2)); 1304 break; 1305 case 5: 1306 __ leal(left, Operand(left, left, times_4, 0)); 1307 break; 1308 case 8: 1309 __ shll(left, Immediate(3)); 1310 break; 1311 case 9: 1312 __ leal(left, Operand(left, left, times_8, 0)); 1313 break; 1314 case 16: 1315 __ shll(left, Immediate(4)); 1316 break; 1317 default: 1318 __ imull(left, left, Immediate(right_value)); 1319 break; 1320 } 1321 } else { 1322 __ imull(left, left, Immediate(right_value)); 1323 } 1324 } else if (right->IsStackSlot()) { 1325 if (instr->hydrogen_value()->representation().IsSmi()) { 1326 __ SmiToInteger64(left, left); 1327 __ imulp(left, ToOperand(right)); 1328 } else { 1329 __ imull(left, ToOperand(right)); 1330 } 1331 } else { 1332 if (instr->hydrogen_value()->representation().IsSmi()) { 1333 __ SmiToInteger64(left, left); 1334 __ imulp(left, ToRegister(right)); 1335 } else { 1336 __ imull(left, ToRegister(right)); 1337 } 1338 } 1339 1340 if (can_overflow) { 1341 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 1342 } 1343 1344 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1345 // Bail out if the result is supposed to be negative zero. 1346 Label done; 1347 if (instr->hydrogen_value()->representation().IsSmi()) { 1348 __ testp(left, left); 1349 } else { 1350 __ testl(left, left); 1351 } 1352 __ j(not_zero, &done, Label::kNear); 1353 if (right->IsConstantOperand()) { 1354 // Constant can't be represented as 32-bit Smi due to immediate size 1355 // limit. 1356 DCHECK(SmiValuesAre32Bits() 1357 ? !instr->hydrogen_value()->representation().IsSmi() 1358 : SmiValuesAre31Bits()); 1359 if (ToInteger32(LConstantOperand::cast(right)) < 0) { 1360 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); 1361 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { 1362 __ cmpl(kScratchRegister, Immediate(0)); 1363 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); 1364 } 1365 } else if (right->IsStackSlot()) { 1366 if (instr->hydrogen_value()->representation().IsSmi()) { 1367 __ orp(kScratchRegister, ToOperand(right)); 1368 } else { 1369 __ orl(kScratchRegister, ToOperand(right)); 1370 } 1371 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); 1372 } else { 1373 // Test the non-zero operand for negative sign. 1374 if (instr->hydrogen_value()->representation().IsSmi()) { 1375 __ orp(kScratchRegister, ToRegister(right)); 1376 } else { 1377 __ orl(kScratchRegister, ToRegister(right)); 1378 } 1379 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); 1380 } 1381 __ bind(&done); 1382 } 1383 } 1384 1385 1386 void LCodeGen::DoBitI(LBitI* instr) { 1387 LOperand* left = instr->left(); 1388 LOperand* right = instr->right(); 1389 DCHECK(left->Equals(instr->result())); 1390 DCHECK(left->IsRegister()); 1391 1392 if (right->IsConstantOperand()) { 1393 int32_t right_operand = 1394 ToRepresentation(LConstantOperand::cast(right), 1395 instr->hydrogen()->right()->representation()); 1396 switch (instr->op()) { 1397 case Token::BIT_AND: 1398 __ andl(ToRegister(left), Immediate(right_operand)); 1399 break; 1400 case Token::BIT_OR: 1401 __ orl(ToRegister(left), Immediate(right_operand)); 1402 break; 1403 case Token::BIT_XOR: 1404 if (right_operand == int32_t(~0)) { 1405 __ notl(ToRegister(left)); 1406 } else { 1407 __ xorl(ToRegister(left), Immediate(right_operand)); 1408 } 1409 break; 1410 default: 1411 UNREACHABLE(); 1412 break; 1413 } 1414 } else if (right->IsStackSlot()) { 1415 switch (instr->op()) { 1416 case Token::BIT_AND: 1417 if (instr->IsInteger32()) { 1418 __ andl(ToRegister(left), ToOperand(right)); 1419 } else { 1420 __ andp(ToRegister(left), ToOperand(right)); 1421 } 1422 break; 1423 case Token::BIT_OR: 1424 if (instr->IsInteger32()) { 1425 __ orl(ToRegister(left), ToOperand(right)); 1426 } else { 1427 __ orp(ToRegister(left), ToOperand(right)); 1428 } 1429 break; 1430 case Token::BIT_XOR: 1431 if (instr->IsInteger32()) { 1432 __ xorl(ToRegister(left), ToOperand(right)); 1433 } else { 1434 __ xorp(ToRegister(left), ToOperand(right)); 1435 } 1436 break; 1437 default: 1438 UNREACHABLE(); 1439 break; 1440 } 1441 } else { 1442 DCHECK(right->IsRegister()); 1443 switch (instr->op()) { 1444 case Token::BIT_AND: 1445 if (instr->IsInteger32()) { 1446 __ andl(ToRegister(left), ToRegister(right)); 1447 } else { 1448 __ andp(ToRegister(left), ToRegister(right)); 1449 } 1450 break; 1451 case Token::BIT_OR: 1452 if (instr->IsInteger32()) { 1453 __ orl(ToRegister(left), ToRegister(right)); 1454 } else { 1455 __ orp(ToRegister(left), ToRegister(right)); 1456 } 1457 break; 1458 case Token::BIT_XOR: 1459 if (instr->IsInteger32()) { 1460 __ xorl(ToRegister(left), ToRegister(right)); 1461 } else { 1462 __ xorp(ToRegister(left), ToRegister(right)); 1463 } 1464 break; 1465 default: 1466 UNREACHABLE(); 1467 break; 1468 } 1469 } 1470 } 1471 1472 1473 void LCodeGen::DoShiftI(LShiftI* instr) { 1474 LOperand* left = instr->left(); 1475 LOperand* right = instr->right(); 1476 DCHECK(left->Equals(instr->result())); 1477 DCHECK(left->IsRegister()); 1478 if (right->IsRegister()) { 1479 DCHECK(ToRegister(right).is(rcx)); 1480 1481 switch (instr->op()) { 1482 case Token::ROR: 1483 __ rorl_cl(ToRegister(left)); 1484 break; 1485 case Token::SAR: 1486 __ sarl_cl(ToRegister(left)); 1487 break; 1488 case Token::SHR: 1489 __ shrl_cl(ToRegister(left)); 1490 if (instr->can_deopt()) { 1491 __ testl(ToRegister(left), ToRegister(left)); 1492 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); 1493 } 1494 break; 1495 case Token::SHL: 1496 __ shll_cl(ToRegister(left)); 1497 break; 1498 default: 1499 UNREACHABLE(); 1500 break; 1501 } 1502 } else { 1503 int32_t value = ToInteger32(LConstantOperand::cast(right)); 1504 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1505 switch (instr->op()) { 1506 case Token::ROR: 1507 if (shift_count != 0) { 1508 __ rorl(ToRegister(left), Immediate(shift_count)); 1509 } 1510 break; 1511 case Token::SAR: 1512 if (shift_count != 0) { 1513 __ sarl(ToRegister(left), Immediate(shift_count)); 1514 } 1515 break; 1516 case Token::SHR: 1517 if (shift_count != 0) { 1518 __ shrl(ToRegister(left), Immediate(shift_count)); 1519 } else if (instr->can_deopt()) { 1520 __ testl(ToRegister(left), ToRegister(left)); 1521 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); 1522 } 1523 break; 1524 case Token::SHL: 1525 if (shift_count != 0) { 1526 if (instr->hydrogen_value()->representation().IsSmi()) { 1527 if (SmiValuesAre32Bits()) { 1528 __ shlp(ToRegister(left), Immediate(shift_count)); 1529 } else { 1530 DCHECK(SmiValuesAre31Bits()); 1531 if (instr->can_deopt()) { 1532 if (shift_count != 1) { 1533 __ shll(ToRegister(left), Immediate(shift_count - 1)); 1534 } 1535 __ Integer32ToSmi(ToRegister(left), ToRegister(left)); 1536 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 1537 } else { 1538 __ shll(ToRegister(left), Immediate(shift_count)); 1539 } 1540 } 1541 } else { 1542 __ shll(ToRegister(left), Immediate(shift_count)); 1543 } 1544 } 1545 break; 1546 default: 1547 UNREACHABLE(); 1548 break; 1549 } 1550 } 1551 } 1552 1553 1554 void LCodeGen::DoSubI(LSubI* instr) { 1555 LOperand* left = instr->left(); 1556 LOperand* right = instr->right(); 1557 DCHECK(left->Equals(instr->result())); 1558 1559 if (right->IsConstantOperand()) { 1560 int32_t right_operand = 1561 ToRepresentation(LConstantOperand::cast(right), 1562 instr->hydrogen()->right()->representation()); 1563 __ subl(ToRegister(left), Immediate(right_operand)); 1564 } else if (right->IsRegister()) { 1565 if (instr->hydrogen_value()->representation().IsSmi()) { 1566 __ subp(ToRegister(left), ToRegister(right)); 1567 } else { 1568 __ subl(ToRegister(left), ToRegister(right)); 1569 } 1570 } else { 1571 if (instr->hydrogen_value()->representation().IsSmi()) { 1572 __ subp(ToRegister(left), ToOperand(right)); 1573 } else { 1574 __ subl(ToRegister(left), ToOperand(right)); 1575 } 1576 } 1577 1578 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1579 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 1580 } 1581 } 1582 1583 1584 void LCodeGen::DoConstantI(LConstantI* instr) { 1585 Register dst = ToRegister(instr->result()); 1586 if (instr->value() == 0) { 1587 __ xorl(dst, dst); 1588 } else { 1589 __ movl(dst, Immediate(instr->value())); 1590 } 1591 } 1592 1593 1594 void LCodeGen::DoConstantS(LConstantS* instr) { 1595 __ Move(ToRegister(instr->result()), instr->value()); 1596 } 1597 1598 1599 void LCodeGen::DoConstantD(LConstantD* instr) { 1600 __ Move(ToDoubleRegister(instr->result()), instr->bits()); 1601 } 1602 1603 1604 void LCodeGen::DoConstantE(LConstantE* instr) { 1605 __ LoadAddress(ToRegister(instr->result()), instr->value()); 1606 } 1607 1608 1609 void LCodeGen::DoConstantT(LConstantT* instr) { 1610 Handle<Object> object = instr->value(isolate()); 1611 AllowDeferredHandleDereference smi_check; 1612 __ Move(ToRegister(instr->result()), object); 1613 } 1614 1615 1616 Operand LCodeGen::BuildSeqStringOperand(Register string, 1617 LOperand* index, 1618 String::Encoding encoding) { 1619 if (index->IsConstantOperand()) { 1620 int offset = ToInteger32(LConstantOperand::cast(index)); 1621 if (encoding == String::TWO_BYTE_ENCODING) { 1622 offset *= kUC16Size; 1623 } 1624 STATIC_ASSERT(kCharSize == 1); 1625 return FieldOperand(string, SeqString::kHeaderSize + offset); 1626 } 1627 return FieldOperand( 1628 string, ToRegister(index), 1629 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, 1630 SeqString::kHeaderSize); 1631 } 1632 1633 1634 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1635 String::Encoding encoding = instr->hydrogen()->encoding(); 1636 Register result = ToRegister(instr->result()); 1637 Register string = ToRegister(instr->string()); 1638 1639 if (FLAG_debug_code) { 1640 __ Push(string); 1641 __ movp(string, FieldOperand(string, HeapObject::kMapOffset)); 1642 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset)); 1643 1644 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); 1645 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1646 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1647 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING 1648 ? one_byte_seq_type : two_byte_seq_type)); 1649 __ Check(equal, kUnexpectedStringType); 1650 __ Pop(string); 1651 } 1652 1653 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1654 if (encoding == String::ONE_BYTE_ENCODING) { 1655 __ movzxbl(result, operand); 1656 } else { 1657 __ movzxwl(result, operand); 1658 } 1659 } 1660 1661 1662 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1663 String::Encoding encoding = instr->hydrogen()->encoding(); 1664 Register string = ToRegister(instr->string()); 1665 1666 if (FLAG_debug_code) { 1667 Register value = ToRegister(instr->value()); 1668 Register index = ToRegister(instr->index()); 1669 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1670 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1671 int encoding_mask = 1672 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 1673 ? one_byte_seq_type : two_byte_seq_type; 1674 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 1675 } 1676 1677 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1678 if (instr->value()->IsConstantOperand()) { 1679 int value = ToInteger32(LConstantOperand::cast(instr->value())); 1680 DCHECK_LE(0, value); 1681 if (encoding == String::ONE_BYTE_ENCODING) { 1682 DCHECK_LE(value, String::kMaxOneByteCharCode); 1683 __ movb(operand, Immediate(value)); 1684 } else { 1685 DCHECK_LE(value, String::kMaxUtf16CodeUnit); 1686 __ movw(operand, Immediate(value)); 1687 } 1688 } else { 1689 Register value = ToRegister(instr->value()); 1690 if (encoding == String::ONE_BYTE_ENCODING) { 1691 __ movb(operand, value); 1692 } else { 1693 __ movw(operand, value); 1694 } 1695 } 1696 } 1697 1698 1699 void LCodeGen::DoAddI(LAddI* instr) { 1700 LOperand* left = instr->left(); 1701 LOperand* right = instr->right(); 1702 1703 Representation target_rep = instr->hydrogen()->representation(); 1704 bool is_p = target_rep.IsSmi() || target_rep.IsExternal(); 1705 1706 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { 1707 if (right->IsConstantOperand()) { 1708 // No support for smi-immediates for 32-bit SMI. 1709 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits()); 1710 int32_t offset = 1711 ToRepresentation(LConstantOperand::cast(right), 1712 instr->hydrogen()->right()->representation()); 1713 if (is_p) { 1714 __ leap(ToRegister(instr->result()), 1715 MemOperand(ToRegister(left), offset)); 1716 } else { 1717 __ leal(ToRegister(instr->result()), 1718 MemOperand(ToRegister(left), offset)); 1719 } 1720 } else { 1721 Operand address(ToRegister(left), ToRegister(right), times_1, 0); 1722 if (is_p) { 1723 __ leap(ToRegister(instr->result()), address); 1724 } else { 1725 __ leal(ToRegister(instr->result()), address); 1726 } 1727 } 1728 } else { 1729 if (right->IsConstantOperand()) { 1730 // No support for smi-immediates for 32-bit SMI. 1731 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits()); 1732 int32_t right_operand = 1733 ToRepresentation(LConstantOperand::cast(right), 1734 instr->hydrogen()->right()->representation()); 1735 if (is_p) { 1736 __ addp(ToRegister(left), Immediate(right_operand)); 1737 } else { 1738 __ addl(ToRegister(left), Immediate(right_operand)); 1739 } 1740 } else if (right->IsRegister()) { 1741 if (is_p) { 1742 __ addp(ToRegister(left), ToRegister(right)); 1743 } else { 1744 __ addl(ToRegister(left), ToRegister(right)); 1745 } 1746 } else { 1747 if (is_p) { 1748 __ addp(ToRegister(left), ToOperand(right)); 1749 } else { 1750 __ addl(ToRegister(left), ToOperand(right)); 1751 } 1752 } 1753 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1754 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 1755 } 1756 } 1757 } 1758 1759 1760 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1761 LOperand* left = instr->left(); 1762 LOperand* right = instr->right(); 1763 DCHECK(left->Equals(instr->result())); 1764 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1765 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1766 Label return_left; 1767 Condition condition = (operation == HMathMinMax::kMathMin) 1768 ? less_equal 1769 : greater_equal; 1770 Register left_reg = ToRegister(left); 1771 if (right->IsConstantOperand()) { 1772 Immediate right_imm = Immediate( 1773 ToRepresentation(LConstantOperand::cast(right), 1774 instr->hydrogen()->right()->representation())); 1775 DCHECK(SmiValuesAre32Bits() 1776 ? !instr->hydrogen()->representation().IsSmi() 1777 : SmiValuesAre31Bits()); 1778 __ cmpl(left_reg, right_imm); 1779 __ j(condition, &return_left, Label::kNear); 1780 __ movl(left_reg, right_imm); 1781 } else if (right->IsRegister()) { 1782 Register right_reg = ToRegister(right); 1783 if (instr->hydrogen_value()->representation().IsSmi()) { 1784 __ cmpp(left_reg, right_reg); 1785 } else { 1786 __ cmpl(left_reg, right_reg); 1787 } 1788 __ j(condition, &return_left, Label::kNear); 1789 __ movp(left_reg, right_reg); 1790 } else { 1791 Operand right_op = ToOperand(right); 1792 if (instr->hydrogen_value()->representation().IsSmi()) { 1793 __ cmpp(left_reg, right_op); 1794 } else { 1795 __ cmpl(left_reg, right_op); 1796 } 1797 __ j(condition, &return_left, Label::kNear); 1798 __ movp(left_reg, right_op); 1799 } 1800 __ bind(&return_left); 1801 } else { 1802 DCHECK(instr->hydrogen()->representation().IsDouble()); 1803 Label not_nan, distinct, return_left, return_right; 1804 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; 1805 XMMRegister left_reg = ToDoubleRegister(left); 1806 XMMRegister right_reg = ToDoubleRegister(right); 1807 __ Ucomisd(left_reg, right_reg); 1808 __ j(parity_odd, ¬_nan, Label::kNear); // Both are not NaN. 1809 1810 // One of the numbers is NaN. Find which one and return it. 1811 __ Ucomisd(left_reg, left_reg); 1812 __ j(parity_even, &return_left, Label::kNear); // left is NaN. 1813 __ jmp(&return_right, Label::kNear); // right is NaN. 1814 1815 __ bind(¬_nan); 1816 __ j(not_equal, &distinct, Label::kNear); // left != right. 1817 1818 // left == right 1819 XMMRegister xmm_scratch = double_scratch0(); 1820 __ Xorpd(xmm_scratch, xmm_scratch); 1821 __ Ucomisd(left_reg, xmm_scratch); 1822 __ j(not_equal, &return_left, Label::kNear); // left == right != 0. 1823 1824 // At this point, both left and right are either +0 or -0. 1825 if (operation == HMathMinMax::kMathMin) { 1826 __ Orpd(left_reg, right_reg); 1827 } else { 1828 __ Andpd(left_reg, right_reg); 1829 } 1830 __ jmp(&return_left, Label::kNear); 1831 1832 __ bind(&distinct); 1833 __ j(condition, &return_left, Label::kNear); 1834 1835 __ bind(&return_right); 1836 __ Movapd(left_reg, right_reg); 1837 1838 __ bind(&return_left); 1839 } 1840 } 1841 1842 1843 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1844 XMMRegister left = ToDoubleRegister(instr->left()); 1845 XMMRegister right = ToDoubleRegister(instr->right()); 1846 XMMRegister result = ToDoubleRegister(instr->result()); 1847 switch (instr->op()) { 1848 case Token::ADD: 1849 if (CpuFeatures::IsSupported(AVX)) { 1850 CpuFeatureScope scope(masm(), AVX); 1851 __ vaddsd(result, left, right); 1852 } else { 1853 DCHECK(result.is(left)); 1854 __ addsd(left, right); 1855 } 1856 break; 1857 case Token::SUB: 1858 if (CpuFeatures::IsSupported(AVX)) { 1859 CpuFeatureScope scope(masm(), AVX); 1860 __ vsubsd(result, left, right); 1861 } else { 1862 DCHECK(result.is(left)); 1863 __ subsd(left, right); 1864 } 1865 break; 1866 case Token::MUL: 1867 if (CpuFeatures::IsSupported(AVX)) { 1868 CpuFeatureScope scope(masm(), AVX); 1869 __ vmulsd(result, left, right); 1870 } else { 1871 DCHECK(result.is(left)); 1872 __ mulsd(left, right); 1873 } 1874 break; 1875 case Token::DIV: 1876 if (CpuFeatures::IsSupported(AVX)) { 1877 CpuFeatureScope scope(masm(), AVX); 1878 __ vdivsd(result, left, right); 1879 } else { 1880 DCHECK(result.is(left)); 1881 __ divsd(left, right); 1882 } 1883 // Don't delete this mov. It may improve performance on some CPUs, 1884 // when there is a (v)mulsd depending on the result 1885 __ Movapd(result, result); 1886 break; 1887 case Token::MOD: { 1888 DCHECK(left.is(xmm0)); 1889 DCHECK(right.is(xmm1)); 1890 DCHECK(result.is(xmm0)); 1891 __ PrepareCallCFunction(2); 1892 __ CallCFunction( 1893 ExternalReference::mod_two_doubles_operation(isolate()), 2); 1894 break; 1895 } 1896 default: 1897 UNREACHABLE(); 1898 break; 1899 } 1900 } 1901 1902 1903 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 1904 DCHECK(ToRegister(instr->context()).is(rsi)); 1905 DCHECK(ToRegister(instr->left()).is(rdx)); 1906 DCHECK(ToRegister(instr->right()).is(rax)); 1907 DCHECK(ToRegister(instr->result()).is(rax)); 1908 1909 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code(); 1910 CallCode(code, RelocInfo::CODE_TARGET, instr); 1911 } 1912 1913 1914 template<class InstrType> 1915 void LCodeGen::EmitBranch(InstrType instr, Condition cc) { 1916 int left_block = instr->TrueDestination(chunk_); 1917 int right_block = instr->FalseDestination(chunk_); 1918 1919 int next_block = GetNextEmittedBlock(); 1920 1921 if (right_block == left_block || cc == no_condition) { 1922 EmitGoto(left_block); 1923 } else if (left_block == next_block) { 1924 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); 1925 } else if (right_block == next_block) { 1926 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1927 } else { 1928 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1929 if (cc != always) { 1930 __ jmp(chunk_->GetAssemblyLabel(right_block)); 1931 } 1932 } 1933 } 1934 1935 1936 template <class InstrType> 1937 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) { 1938 int true_block = instr->TrueDestination(chunk_); 1939 __ j(cc, chunk_->GetAssemblyLabel(true_block)); 1940 } 1941 1942 1943 template <class InstrType> 1944 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { 1945 int false_block = instr->FalseDestination(chunk_); 1946 __ j(cc, chunk_->GetAssemblyLabel(false_block)); 1947 } 1948 1949 1950 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 1951 __ int3(); 1952 } 1953 1954 1955 void LCodeGen::DoBranch(LBranch* instr) { 1956 Representation r = instr->hydrogen()->value()->representation(); 1957 if (r.IsInteger32()) { 1958 DCHECK(!info()->IsStub()); 1959 Register reg = ToRegister(instr->value()); 1960 __ testl(reg, reg); 1961 EmitBranch(instr, not_zero); 1962 } else if (r.IsSmi()) { 1963 DCHECK(!info()->IsStub()); 1964 Register reg = ToRegister(instr->value()); 1965 __ testp(reg, reg); 1966 EmitBranch(instr, not_zero); 1967 } else if (r.IsDouble()) { 1968 DCHECK(!info()->IsStub()); 1969 XMMRegister reg = ToDoubleRegister(instr->value()); 1970 XMMRegister xmm_scratch = double_scratch0(); 1971 __ Xorpd(xmm_scratch, xmm_scratch); 1972 __ Ucomisd(reg, xmm_scratch); 1973 EmitBranch(instr, not_equal); 1974 } else { 1975 DCHECK(r.IsTagged()); 1976 Register reg = ToRegister(instr->value()); 1977 HType type = instr->hydrogen()->value()->type(); 1978 if (type.IsBoolean()) { 1979 DCHECK(!info()->IsStub()); 1980 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 1981 EmitBranch(instr, equal); 1982 } else if (type.IsSmi()) { 1983 DCHECK(!info()->IsStub()); 1984 __ SmiCompare(reg, Smi::FromInt(0)); 1985 EmitBranch(instr, not_equal); 1986 } else if (type.IsJSArray()) { 1987 DCHECK(!info()->IsStub()); 1988 EmitBranch(instr, no_condition); 1989 } else if (type.IsHeapNumber()) { 1990 DCHECK(!info()->IsStub()); 1991 XMMRegister xmm_scratch = double_scratch0(); 1992 __ Xorpd(xmm_scratch, xmm_scratch); 1993 __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); 1994 EmitBranch(instr, not_equal); 1995 } else if (type.IsString()) { 1996 DCHECK(!info()->IsStub()); 1997 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 1998 EmitBranch(instr, not_equal); 1999 } else { 2000 ToBooleanICStub::Types expected = 2001 instr->hydrogen()->expected_input_types(); 2002 // Avoid deopts in the case where we've never executed this path before. 2003 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic(); 2004 2005 if (expected.Contains(ToBooleanICStub::UNDEFINED)) { 2006 // undefined -> false. 2007 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 2008 __ j(equal, instr->FalseLabel(chunk_)); 2009 } 2010 if (expected.Contains(ToBooleanICStub::BOOLEAN)) { 2011 // true -> true. 2012 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2013 __ j(equal, instr->TrueLabel(chunk_)); 2014 // false -> false. 2015 __ CompareRoot(reg, Heap::kFalseValueRootIndex); 2016 __ j(equal, instr->FalseLabel(chunk_)); 2017 } 2018 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) { 2019 // 'null' -> false. 2020 __ CompareRoot(reg, Heap::kNullValueRootIndex); 2021 __ j(equal, instr->FalseLabel(chunk_)); 2022 } 2023 2024 if (expected.Contains(ToBooleanICStub::SMI)) { 2025 // Smis: 0 -> false, all other -> true. 2026 __ Cmp(reg, Smi::FromInt(0)); 2027 __ j(equal, instr->FalseLabel(chunk_)); 2028 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2029 } else if (expected.NeedsMap()) { 2030 // If we need a map later and have a Smi -> deopt. 2031 __ testb(reg, Immediate(kSmiTagMask)); 2032 DeoptimizeIf(zero, instr, Deoptimizer::kSmi); 2033 } 2034 2035 const Register map = kScratchRegister; 2036 if (expected.NeedsMap()) { 2037 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset)); 2038 2039 if (expected.CanBeUndetectable()) { 2040 // Undetectable -> false. 2041 __ testb(FieldOperand(map, Map::kBitFieldOffset), 2042 Immediate(1 << Map::kIsUndetectable)); 2043 __ j(not_zero, instr->FalseLabel(chunk_)); 2044 } 2045 } 2046 2047 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) { 2048 // spec object -> true. 2049 __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE); 2050 __ j(above_equal, instr->TrueLabel(chunk_)); 2051 } 2052 2053 if (expected.Contains(ToBooleanICStub::STRING)) { 2054 // String value -> false iff empty. 2055 Label not_string; 2056 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); 2057 __ j(above_equal, ¬_string, Label::kNear); 2058 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 2059 __ j(not_zero, instr->TrueLabel(chunk_)); 2060 __ jmp(instr->FalseLabel(chunk_)); 2061 __ bind(¬_string); 2062 } 2063 2064 if (expected.Contains(ToBooleanICStub::SYMBOL)) { 2065 // Symbol value -> true. 2066 __ CmpInstanceType(map, SYMBOL_TYPE); 2067 __ j(equal, instr->TrueLabel(chunk_)); 2068 } 2069 2070 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) { 2071 // SIMD value -> true. 2072 __ CmpInstanceType(map, SIMD128_VALUE_TYPE); 2073 __ j(equal, instr->TrueLabel(chunk_)); 2074 } 2075 2076 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) { 2077 // heap number -> false iff +0, -0, or NaN. 2078 Label not_heap_number; 2079 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2080 __ j(not_equal, ¬_heap_number, Label::kNear); 2081 XMMRegister xmm_scratch = double_scratch0(); 2082 __ Xorpd(xmm_scratch, xmm_scratch); 2083 __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); 2084 __ j(zero, instr->FalseLabel(chunk_)); 2085 __ jmp(instr->TrueLabel(chunk_)); 2086 __ bind(¬_heap_number); 2087 } 2088 2089 if (!expected.IsGeneric()) { 2090 // We've seen something for the first time -> deopt. 2091 // This can only happen if we are not generic already. 2092 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject); 2093 } 2094 } 2095 } 2096 } 2097 2098 2099 void LCodeGen::EmitGoto(int block) { 2100 if (!IsNextEmittedBlock(block)) { 2101 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); 2102 } 2103 } 2104 2105 2106 void LCodeGen::DoGoto(LGoto* instr) { 2107 EmitGoto(instr->block_id()); 2108 } 2109 2110 2111 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 2112 Condition cond = no_condition; 2113 switch (op) { 2114 case Token::EQ: 2115 case Token::EQ_STRICT: 2116 cond = equal; 2117 break; 2118 case Token::NE: 2119 case Token::NE_STRICT: 2120 cond = not_equal; 2121 break; 2122 case Token::LT: 2123 cond = is_unsigned ? below : less; 2124 break; 2125 case Token::GT: 2126 cond = is_unsigned ? above : greater; 2127 break; 2128 case Token::LTE: 2129 cond = is_unsigned ? below_equal : less_equal; 2130 break; 2131 case Token::GTE: 2132 cond = is_unsigned ? above_equal : greater_equal; 2133 break; 2134 case Token::IN: 2135 case Token::INSTANCEOF: 2136 default: 2137 UNREACHABLE(); 2138 } 2139 return cond; 2140 } 2141 2142 2143 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2144 LOperand* left = instr->left(); 2145 LOperand* right = instr->right(); 2146 bool is_unsigned = 2147 instr->is_double() || 2148 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2149 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2150 Condition cc = TokenToCondition(instr->op(), is_unsigned); 2151 2152 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2153 // We can statically evaluate the comparison. 2154 double left_val = ToDouble(LConstantOperand::cast(left)); 2155 double right_val = ToDouble(LConstantOperand::cast(right)); 2156 int next_block = Token::EvalComparison(instr->op(), left_val, right_val) 2157 ? instr->TrueDestination(chunk_) 2158 : instr->FalseDestination(chunk_); 2159 EmitGoto(next_block); 2160 } else { 2161 if (instr->is_double()) { 2162 // Don't base result on EFLAGS when a NaN is involved. Instead 2163 // jump to the false block. 2164 __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); 2165 __ j(parity_even, instr->FalseLabel(chunk_)); 2166 } else { 2167 int32_t value; 2168 if (right->IsConstantOperand()) { 2169 value = ToInteger32(LConstantOperand::cast(right)); 2170 if (instr->hydrogen_value()->representation().IsSmi()) { 2171 __ Cmp(ToRegister(left), Smi::FromInt(value)); 2172 } else { 2173 __ cmpl(ToRegister(left), Immediate(value)); 2174 } 2175 } else if (left->IsConstantOperand()) { 2176 value = ToInteger32(LConstantOperand::cast(left)); 2177 if (instr->hydrogen_value()->representation().IsSmi()) { 2178 if (right->IsRegister()) { 2179 __ Cmp(ToRegister(right), Smi::FromInt(value)); 2180 } else { 2181 __ Cmp(ToOperand(right), Smi::FromInt(value)); 2182 } 2183 } else if (right->IsRegister()) { 2184 __ cmpl(ToRegister(right), Immediate(value)); 2185 } else { 2186 __ cmpl(ToOperand(right), Immediate(value)); 2187 } 2188 // We commuted the operands, so commute the condition. 2189 cc = CommuteCondition(cc); 2190 } else if (instr->hydrogen_value()->representation().IsSmi()) { 2191 if (right->IsRegister()) { 2192 __ cmpp(ToRegister(left), ToRegister(right)); 2193 } else { 2194 __ cmpp(ToRegister(left), ToOperand(right)); 2195 } 2196 } else { 2197 if (right->IsRegister()) { 2198 __ cmpl(ToRegister(left), ToRegister(right)); 2199 } else { 2200 __ cmpl(ToRegister(left), ToOperand(right)); 2201 } 2202 } 2203 } 2204 EmitBranch(instr, cc); 2205 } 2206 } 2207 2208 2209 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2210 Register left = ToRegister(instr->left()); 2211 2212 if (instr->right()->IsConstantOperand()) { 2213 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right())); 2214 __ Cmp(left, right); 2215 } else { 2216 Register right = ToRegister(instr->right()); 2217 __ cmpp(left, right); 2218 } 2219 EmitBranch(instr, equal); 2220 } 2221 2222 2223 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2224 if (instr->hydrogen()->representation().IsTagged()) { 2225 Register input_reg = ToRegister(instr->object()); 2226 __ Cmp(input_reg, factory()->the_hole_value()); 2227 EmitBranch(instr, equal); 2228 return; 2229 } 2230 2231 XMMRegister input_reg = ToDoubleRegister(instr->object()); 2232 __ Ucomisd(input_reg, input_reg); 2233 EmitFalseBranch(instr, parity_odd); 2234 2235 __ subp(rsp, Immediate(kDoubleSize)); 2236 __ Movsd(MemOperand(rsp, 0), input_reg); 2237 __ addp(rsp, Immediate(kDoubleSize)); 2238 2239 int offset = sizeof(kHoleNanUpper32); 2240 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32)); 2241 EmitBranch(instr, equal); 2242 } 2243 2244 2245 Condition LCodeGen::EmitIsString(Register input, 2246 Register temp1, 2247 Label* is_not_string, 2248 SmiCheck check_needed = INLINE_SMI_CHECK) { 2249 if (check_needed == INLINE_SMI_CHECK) { 2250 __ JumpIfSmi(input, is_not_string); 2251 } 2252 2253 Condition cond = masm_->IsObjectStringType(input, temp1, temp1); 2254 2255 return cond; 2256 } 2257 2258 2259 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2260 Register reg = ToRegister(instr->value()); 2261 Register temp = ToRegister(instr->temp()); 2262 2263 SmiCheck check_needed = 2264 instr->hydrogen()->value()->type().IsHeapObject() 2265 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2266 2267 Condition true_cond = EmitIsString( 2268 reg, temp, instr->FalseLabel(chunk_), check_needed); 2269 2270 EmitBranch(instr, true_cond); 2271 } 2272 2273 2274 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2275 Condition is_smi; 2276 if (instr->value()->IsRegister()) { 2277 Register input = ToRegister(instr->value()); 2278 is_smi = masm()->CheckSmi(input); 2279 } else { 2280 Operand input = ToOperand(instr->value()); 2281 is_smi = masm()->CheckSmi(input); 2282 } 2283 EmitBranch(instr, is_smi); 2284 } 2285 2286 2287 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2288 Register input = ToRegister(instr->value()); 2289 Register temp = ToRegister(instr->temp()); 2290 2291 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2292 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2293 } 2294 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset)); 2295 __ testb(FieldOperand(temp, Map::kBitFieldOffset), 2296 Immediate(1 << Map::kIsUndetectable)); 2297 EmitBranch(instr, not_zero); 2298 } 2299 2300 2301 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2302 DCHECK(ToRegister(instr->context()).is(rsi)); 2303 DCHECK(ToRegister(instr->left()).is(rdx)); 2304 DCHECK(ToRegister(instr->right()).is(rax)); 2305 2306 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code(); 2307 CallCode(code, RelocInfo::CODE_TARGET, instr); 2308 __ CompareRoot(rax, Heap::kTrueValueRootIndex); 2309 EmitBranch(instr, equal); 2310 } 2311 2312 2313 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2314 InstanceType from = instr->from(); 2315 InstanceType to = instr->to(); 2316 if (from == FIRST_TYPE) return to; 2317 DCHECK(from == to || to == LAST_TYPE); 2318 return from; 2319 } 2320 2321 2322 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2323 InstanceType from = instr->from(); 2324 InstanceType to = instr->to(); 2325 if (from == to) return equal; 2326 if (to == LAST_TYPE) return above_equal; 2327 if (from == FIRST_TYPE) return below_equal; 2328 UNREACHABLE(); 2329 return equal; 2330 } 2331 2332 2333 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2334 Register input = ToRegister(instr->value()); 2335 2336 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2337 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2338 } 2339 2340 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister); 2341 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2342 } 2343 2344 2345 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2346 Register input = ToRegister(instr->value()); 2347 Register result = ToRegister(instr->result()); 2348 2349 __ AssertString(input); 2350 2351 __ movl(result, FieldOperand(input, String::kHashFieldOffset)); 2352 DCHECK(String::kHashShift >= kSmiTagSize); 2353 __ IndexFromHash(result, result); 2354 } 2355 2356 2357 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2358 LHasCachedArrayIndexAndBranch* instr) { 2359 Register input = ToRegister(instr->value()); 2360 2361 __ testl(FieldOperand(input, String::kHashFieldOffset), 2362 Immediate(String::kContainsCachedArrayIndexMask)); 2363 EmitBranch(instr, equal); 2364 } 2365 2366 2367 // Branches to a label or falls through with the answer in the z flag. 2368 // Trashes the temp register. 2369 void LCodeGen::EmitClassOfTest(Label* is_true, 2370 Label* is_false, 2371 Handle<String> class_name, 2372 Register input, 2373 Register temp, 2374 Register temp2) { 2375 DCHECK(!input.is(temp)); 2376 DCHECK(!input.is(temp2)); 2377 DCHECK(!temp.is(temp2)); 2378 2379 __ JumpIfSmi(input, is_false); 2380 2381 __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp); 2382 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); 2383 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { 2384 __ j(above_equal, is_true); 2385 } else { 2386 __ j(above_equal, is_false); 2387 } 2388 2389 // Check if the constructor in the map is a function. 2390 __ GetMapConstructor(temp, temp, kScratchRegister); 2391 2392 // Objects with a non-function constructor have class 'Object'. 2393 __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE); 2394 if (String::Equals(class_name, isolate()->factory()->Object_string())) { 2395 __ j(not_equal, is_true); 2396 } else { 2397 __ j(not_equal, is_false); 2398 } 2399 2400 // temp now contains the constructor function. Grab the 2401 // instance class name from there. 2402 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2403 __ movp(temp, FieldOperand(temp, 2404 SharedFunctionInfo::kInstanceClassNameOffset)); 2405 // The class name we are testing against is internalized since it's a literal. 2406 // The name in the constructor is internalized because of the way the context 2407 // is booted. This routine isn't expected to work for random API-created 2408 // classes and it doesn't have to because you can't access it with natives 2409 // syntax. Since both sides are internalized it is sufficient to use an 2410 // identity comparison. 2411 DCHECK(class_name->IsInternalizedString()); 2412 __ Cmp(temp, class_name); 2413 // End with the answer in the z flag. 2414 } 2415 2416 2417 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2418 Register input = ToRegister(instr->value()); 2419 Register temp = ToRegister(instr->temp()); 2420 Register temp2 = ToRegister(instr->temp2()); 2421 Handle<String> class_name = instr->hydrogen()->class_name(); 2422 2423 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2424 class_name, input, temp, temp2); 2425 2426 EmitBranch(instr, equal); 2427 } 2428 2429 2430 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2431 Register reg = ToRegister(instr->value()); 2432 2433 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); 2434 EmitBranch(instr, equal); 2435 } 2436 2437 2438 void LCodeGen::DoHasInPrototypeChainAndBranch( 2439 LHasInPrototypeChainAndBranch* instr) { 2440 Register const object = ToRegister(instr->object()); 2441 Register const object_map = kScratchRegister; 2442 Register const object_prototype = object_map; 2443 Register const prototype = ToRegister(instr->prototype()); 2444 2445 // The {object} must be a spec object. It's sufficient to know that {object} 2446 // is not a smi, since all other non-spec objects have {null} prototypes and 2447 // will be ruled out below. 2448 if (instr->hydrogen()->ObjectNeedsSmiCheck()) { 2449 Condition is_smi = __ CheckSmi(object); 2450 EmitFalseBranch(instr, is_smi); 2451 } 2452 2453 // Loop through the {object}s prototype chain looking for the {prototype}. 2454 __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset)); 2455 Label loop; 2456 __ bind(&loop); 2457 2458 // Deoptimize if the object needs to be access checked. 2459 __ testb(FieldOperand(object_map, Map::kBitFieldOffset), 2460 Immediate(1 << Map::kIsAccessCheckNeeded)); 2461 DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck); 2462 // Deoptimize for proxies. 2463 __ CmpInstanceType(object_map, JS_PROXY_TYPE); 2464 DeoptimizeIf(equal, instr, Deoptimizer::kProxy); 2465 2466 __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset)); 2467 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); 2468 EmitFalseBranch(instr, equal); 2469 __ cmpp(object_prototype, prototype); 2470 EmitTrueBranch(instr, equal); 2471 __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset)); 2472 __ jmp(&loop); 2473 } 2474 2475 2476 void LCodeGen::DoCmpT(LCmpT* instr) { 2477 DCHECK(ToRegister(instr->context()).is(rsi)); 2478 Token::Value op = instr->op(); 2479 2480 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2481 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2482 2483 Condition condition = TokenToCondition(op, false); 2484 Label true_value, done; 2485 __ testp(rax, rax); 2486 __ j(condition, &true_value, Label::kNear); 2487 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); 2488 __ jmp(&done, Label::kNear); 2489 __ bind(&true_value); 2490 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); 2491 __ bind(&done); 2492 } 2493 2494 2495 void LCodeGen::DoReturn(LReturn* instr) { 2496 if (FLAG_trace && info()->IsOptimizing()) { 2497 // Preserve the return value on the stack and rely on the runtime call 2498 // to return the value in the same register. We're leaving the code 2499 // managed by the register allocator and tearing down the frame, it's 2500 // safe to write to the context register. 2501 __ Push(rax); 2502 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 2503 __ CallRuntime(Runtime::kTraceExit); 2504 } 2505 if (info()->saves_caller_doubles()) { 2506 RestoreCallerDoubles(); 2507 } 2508 if (NeedsEagerFrame()) { 2509 __ movp(rsp, rbp); 2510 __ popq(rbp); 2511 } 2512 if (instr->has_constant_parameter_count()) { 2513 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize, 2514 rcx); 2515 } else { 2516 DCHECK(info()->IsStub()); // Functions would need to drop one more value. 2517 Register reg = ToRegister(instr->parameter_count()); 2518 // The argument count parameter is a smi 2519 __ SmiToInteger32(reg, reg); 2520 Register return_addr_reg = reg.is(rcx) ? rbx : rcx; 2521 __ PopReturnAddressTo(return_addr_reg); 2522 __ shlp(reg, Immediate(kPointerSizeLog2)); 2523 __ addp(rsp, reg); 2524 __ jmp(return_addr_reg); 2525 } 2526 } 2527 2528 2529 template <class T> 2530 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 2531 Register vector_register = ToRegister(instr->temp_vector()); 2532 Register slot_register = LoadWithVectorDescriptor::SlotRegister(); 2533 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister())); 2534 DCHECK(slot_register.is(rax)); 2535 2536 AllowDeferredHandleDereference vector_structure_check; 2537 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 2538 __ Move(vector_register, vector); 2539 // No need to allocate this register. 2540 FeedbackVectorSlot slot = instr->hydrogen()->slot(); 2541 int index = vector->GetIndex(slot); 2542 __ Move(slot_register, Smi::FromInt(index)); 2543 } 2544 2545 2546 template <class T> 2547 void LCodeGen::EmitVectorStoreICRegisters(T* instr) { 2548 Register vector_register = ToRegister(instr->temp_vector()); 2549 Register slot_register = ToRegister(instr->temp_slot()); 2550 2551 AllowDeferredHandleDereference vector_structure_check; 2552 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 2553 __ Move(vector_register, vector); 2554 FeedbackVectorSlot slot = instr->hydrogen()->slot(); 2555 int index = vector->GetIndex(slot); 2556 __ Move(slot_register, Smi::FromInt(index)); 2557 } 2558 2559 2560 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2561 DCHECK(ToRegister(instr->context()).is(rsi)); 2562 DCHECK(ToRegister(instr->result()).is(rax)); 2563 2564 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); 2565 Handle<Code> ic = 2566 CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode()) 2567 .code(); 2568 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2569 } 2570 2571 2572 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2573 Register context = ToRegister(instr->context()); 2574 Register result = ToRegister(instr->result()); 2575 __ movp(result, ContextOperand(context, instr->slot_index())); 2576 if (instr->hydrogen()->RequiresHoleCheck()) { 2577 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2578 if (instr->hydrogen()->DeoptimizesOnHole()) { 2579 DeoptimizeIf(equal, instr, Deoptimizer::kHole); 2580 } else { 2581 Label is_not_hole; 2582 __ j(not_equal, &is_not_hole, Label::kNear); 2583 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 2584 __ bind(&is_not_hole); 2585 } 2586 } 2587 } 2588 2589 2590 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2591 Register context = ToRegister(instr->context()); 2592 Register value = ToRegister(instr->value()); 2593 2594 Operand target = ContextOperand(context, instr->slot_index()); 2595 2596 Label skip_assignment; 2597 if (instr->hydrogen()->RequiresHoleCheck()) { 2598 __ CompareRoot(target, Heap::kTheHoleValueRootIndex); 2599 if (instr->hydrogen()->DeoptimizesOnHole()) { 2600 DeoptimizeIf(equal, instr, Deoptimizer::kHole); 2601 } else { 2602 __ j(not_equal, &skip_assignment); 2603 } 2604 } 2605 __ movp(target, value); 2606 2607 if (instr->hydrogen()->NeedsWriteBarrier()) { 2608 SmiCheck check_needed = 2609 instr->hydrogen()->value()->type().IsHeapObject() 2610 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2611 int offset = Context::SlotOffset(instr->slot_index()); 2612 Register scratch = ToRegister(instr->temp()); 2613 __ RecordWriteContextSlot(context, 2614 offset, 2615 value, 2616 scratch, 2617 kSaveFPRegs, 2618 EMIT_REMEMBERED_SET, 2619 check_needed); 2620 } 2621 2622 __ bind(&skip_assignment); 2623 } 2624 2625 2626 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2627 HObjectAccess access = instr->hydrogen()->access(); 2628 int offset = access.offset(); 2629 2630 if (access.IsExternalMemory()) { 2631 Register result = ToRegister(instr->result()); 2632 if (instr->object()->IsConstantOperand()) { 2633 DCHECK(result.is(rax)); 2634 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object()))); 2635 } else { 2636 Register object = ToRegister(instr->object()); 2637 __ Load(result, MemOperand(object, offset), access.representation()); 2638 } 2639 return; 2640 } 2641 2642 Register object = ToRegister(instr->object()); 2643 if (instr->hydrogen()->representation().IsDouble()) { 2644 DCHECK(access.IsInobject()); 2645 XMMRegister result = ToDoubleRegister(instr->result()); 2646 __ Movsd(result, FieldOperand(object, offset)); 2647 return; 2648 } 2649 2650 Register result = ToRegister(instr->result()); 2651 if (!access.IsInobject()) { 2652 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2653 object = result; 2654 } 2655 2656 Representation representation = access.representation(); 2657 if (representation.IsSmi() && SmiValuesAre32Bits() && 2658 instr->hydrogen()->representation().IsInteger32()) { 2659 if (FLAG_debug_code) { 2660 Register scratch = kScratchRegister; 2661 __ Load(scratch, FieldOperand(object, offset), representation); 2662 __ AssertSmi(scratch); 2663 } 2664 2665 // Read int value directly from upper half of the smi. 2666 STATIC_ASSERT(kSmiTag == 0); 2667 DCHECK(kSmiTagSize + kSmiShiftSize == 32); 2668 offset += kPointerSize / 2; 2669 representation = Representation::Integer32(); 2670 } 2671 __ Load(result, FieldOperand(object, offset), representation); 2672 } 2673 2674 2675 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2676 DCHECK(ToRegister(instr->context()).is(rsi)); 2677 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 2678 DCHECK(ToRegister(instr->result()).is(rax)); 2679 2680 __ Move(LoadDescriptor::NameRegister(), instr->name()); 2681 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); 2682 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code(); 2683 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2684 } 2685 2686 2687 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 2688 Register function = ToRegister(instr->function()); 2689 Register result = ToRegister(instr->result()); 2690 2691 // Get the prototype or initial map from the function. 2692 __ movp(result, 2693 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2694 2695 // Check that the function has a prototype or an initial map. 2696 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2697 DeoptimizeIf(equal, instr, Deoptimizer::kHole); 2698 2699 // If the function does not have an initial map, we're done. 2700 Label done; 2701 __ CmpObjectType(result, MAP_TYPE, kScratchRegister); 2702 __ j(not_equal, &done, Label::kNear); 2703 2704 // Get the prototype from the initial map. 2705 __ movp(result, FieldOperand(result, Map::kPrototypeOffset)); 2706 2707 // All done. 2708 __ bind(&done); 2709 } 2710 2711 2712 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 2713 Register result = ToRegister(instr->result()); 2714 __ LoadRoot(result, instr->index()); 2715 } 2716 2717 2718 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 2719 Register arguments = ToRegister(instr->arguments()); 2720 Register result = ToRegister(instr->result()); 2721 2722 if (instr->length()->IsConstantOperand() && 2723 instr->index()->IsConstantOperand()) { 2724 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); 2725 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length())); 2726 if (const_index >= 0 && const_index < const_length) { 2727 StackArgumentsAccessor args(arguments, const_length, 2728 ARGUMENTS_DONT_CONTAIN_RECEIVER); 2729 __ movp(result, args.GetArgumentOperand(const_index)); 2730 } else if (FLAG_debug_code) { 2731 __ int3(); 2732 } 2733 } else { 2734 Register length = ToRegister(instr->length()); 2735 // There are two words between the frame pointer and the last argument. 2736 // Subtracting from length accounts for one of them add one more. 2737 if (instr->index()->IsRegister()) { 2738 __ subl(length, ToRegister(instr->index())); 2739 } else { 2740 __ subl(length, ToOperand(instr->index())); 2741 } 2742 StackArgumentsAccessor args(arguments, length, 2743 ARGUMENTS_DONT_CONTAIN_RECEIVER); 2744 __ movp(result, args.GetArgumentOperand(0)); 2745 } 2746 } 2747 2748 2749 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 2750 ElementsKind elements_kind = instr->elements_kind(); 2751 LOperand* key = instr->key(); 2752 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { 2753 Register key_reg = ToRegister(key); 2754 Representation key_representation = 2755 instr->hydrogen()->key()->representation(); 2756 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) { 2757 __ SmiToInteger64(key_reg, key_reg); 2758 } else if (instr->hydrogen()->IsDehoisted()) { 2759 // Sign extend key because it could be a 32 bit negative value 2760 // and the dehoisted address computation happens in 64 bits 2761 __ movsxlq(key_reg, key_reg); 2762 } 2763 } 2764 Operand operand(BuildFastArrayOperand( 2765 instr->elements(), 2766 key, 2767 instr->hydrogen()->key()->representation(), 2768 elements_kind, 2769 instr->base_offset())); 2770 2771 if (elements_kind == FLOAT32_ELEMENTS) { 2772 XMMRegister result(ToDoubleRegister(instr->result())); 2773 __ Cvtss2sd(result, operand); 2774 } else if (elements_kind == FLOAT64_ELEMENTS) { 2775 __ Movsd(ToDoubleRegister(instr->result()), operand); 2776 } else { 2777 Register result(ToRegister(instr->result())); 2778 switch (elements_kind) { 2779 case INT8_ELEMENTS: 2780 __ movsxbl(result, operand); 2781 break; 2782 case UINT8_ELEMENTS: 2783 case UINT8_CLAMPED_ELEMENTS: 2784 __ movzxbl(result, operand); 2785 break; 2786 case INT16_ELEMENTS: 2787 __ movsxwl(result, operand); 2788 break; 2789 case UINT16_ELEMENTS: 2790 __ movzxwl(result, operand); 2791 break; 2792 case INT32_ELEMENTS: 2793 __ movl(result, operand); 2794 break; 2795 case UINT32_ELEMENTS: 2796 __ movl(result, operand); 2797 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 2798 __ testl(result, result); 2799 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); 2800 } 2801 break; 2802 case FLOAT32_ELEMENTS: 2803 case FLOAT64_ELEMENTS: 2804 case FAST_ELEMENTS: 2805 case FAST_SMI_ELEMENTS: 2806 case FAST_DOUBLE_ELEMENTS: 2807 case FAST_HOLEY_ELEMENTS: 2808 case FAST_HOLEY_SMI_ELEMENTS: 2809 case FAST_HOLEY_DOUBLE_ELEMENTS: 2810 case DICTIONARY_ELEMENTS: 2811 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 2812 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 2813 case FAST_STRING_WRAPPER_ELEMENTS: 2814 case SLOW_STRING_WRAPPER_ELEMENTS: 2815 case NO_ELEMENTS: 2816 UNREACHABLE(); 2817 break; 2818 } 2819 } 2820 } 2821 2822 2823 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 2824 XMMRegister result(ToDoubleRegister(instr->result())); 2825 LOperand* key = instr->key(); 2826 if (kPointerSize == kInt32Size && !key->IsConstantOperand() && 2827 instr->hydrogen()->IsDehoisted()) { 2828 // Sign extend key because it could be a 32 bit negative value 2829 // and the dehoisted address computation happens in 64 bits 2830 __ movsxlq(ToRegister(key), ToRegister(key)); 2831 } 2832 if (instr->hydrogen()->RequiresHoleCheck()) { 2833 Operand hole_check_operand = BuildFastArrayOperand( 2834 instr->elements(), 2835 key, 2836 instr->hydrogen()->key()->representation(), 2837 FAST_DOUBLE_ELEMENTS, 2838 instr->base_offset() + sizeof(kHoleNanLower32)); 2839 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); 2840 DeoptimizeIf(equal, instr, Deoptimizer::kHole); 2841 } 2842 2843 Operand double_load_operand = BuildFastArrayOperand( 2844 instr->elements(), 2845 key, 2846 instr->hydrogen()->key()->representation(), 2847 FAST_DOUBLE_ELEMENTS, 2848 instr->base_offset()); 2849 __ Movsd(result, double_load_operand); 2850 } 2851 2852 2853 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 2854 HLoadKeyed* hinstr = instr->hydrogen(); 2855 Register result = ToRegister(instr->result()); 2856 LOperand* key = instr->key(); 2857 bool requires_hole_check = hinstr->RequiresHoleCheck(); 2858 Representation representation = hinstr->representation(); 2859 int offset = instr->base_offset(); 2860 2861 if (kPointerSize == kInt32Size && !key->IsConstantOperand() && 2862 instr->hydrogen()->IsDehoisted()) { 2863 // Sign extend key because it could be a 32 bit negative value 2864 // and the dehoisted address computation happens in 64 bits 2865 __ movsxlq(ToRegister(key), ToRegister(key)); 2866 } 2867 if (representation.IsInteger32() && SmiValuesAre32Bits() && 2868 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { 2869 DCHECK(!requires_hole_check); 2870 if (FLAG_debug_code) { 2871 Register scratch = kScratchRegister; 2872 __ Load(scratch, 2873 BuildFastArrayOperand(instr->elements(), 2874 key, 2875 instr->hydrogen()->key()->representation(), 2876 FAST_ELEMENTS, 2877 offset), 2878 Representation::Smi()); 2879 __ AssertSmi(scratch); 2880 } 2881 // Read int value directly from upper half of the smi. 2882 STATIC_ASSERT(kSmiTag == 0); 2883 DCHECK(kSmiTagSize + kSmiShiftSize == 32); 2884 offset += kPointerSize / 2; 2885 } 2886 2887 __ Load(result, 2888 BuildFastArrayOperand(instr->elements(), key, 2889 instr->hydrogen()->key()->representation(), 2890 FAST_ELEMENTS, offset), 2891 representation); 2892 2893 // Check for the hole value. 2894 if (requires_hole_check) { 2895 if (IsFastSmiElementsKind(hinstr->elements_kind())) { 2896 Condition smi = __ CheckSmi(result); 2897 DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi); 2898 } else { 2899 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2900 DeoptimizeIf(equal, instr, Deoptimizer::kHole); 2901 } 2902 } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { 2903 DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS); 2904 Label done; 2905 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2906 __ j(not_equal, &done); 2907 if (info()->IsStub()) { 2908 // A stub can safely convert the hole to undefined only if the array 2909 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise 2910 // it needs to bail out. 2911 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); 2912 __ Cmp(FieldOperand(result, Cell::kValueOffset), 2913 Smi::FromInt(Isolate::kArrayProtectorValid)); 2914 DeoptimizeIf(not_equal, instr, Deoptimizer::kHole); 2915 } 2916 __ Move(result, isolate()->factory()->undefined_value()); 2917 __ bind(&done); 2918 } 2919 } 2920 2921 2922 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 2923 if (instr->is_fixed_typed_array()) { 2924 DoLoadKeyedExternalArray(instr); 2925 } else if (instr->hydrogen()->representation().IsDouble()) { 2926 DoLoadKeyedFixedDoubleArray(instr); 2927 } else { 2928 DoLoadKeyedFixedArray(instr); 2929 } 2930 } 2931 2932 2933 Operand LCodeGen::BuildFastArrayOperand( 2934 LOperand* elements_pointer, 2935 LOperand* key, 2936 Representation key_representation, 2937 ElementsKind elements_kind, 2938 uint32_t offset) { 2939 Register elements_pointer_reg = ToRegister(elements_pointer); 2940 int shift_size = ElementsKindToShiftSize(elements_kind); 2941 if (key->IsConstantOperand()) { 2942 int32_t constant_value = ToInteger32(LConstantOperand::cast(key)); 2943 if (constant_value & 0xF0000000) { 2944 Abort(kArrayIndexConstantValueTooBig); 2945 } 2946 return Operand(elements_pointer_reg, 2947 (constant_value << shift_size) + offset); 2948 } else { 2949 // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement(). 2950 DCHECK(key_representation.IsInteger32()); 2951 2952 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); 2953 return Operand(elements_pointer_reg, 2954 ToRegister(key), 2955 scale_factor, 2956 offset); 2957 } 2958 } 2959 2960 2961 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 2962 DCHECK(ToRegister(instr->context()).is(rsi)); 2963 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 2964 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); 2965 2966 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); 2967 2968 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code(); 2969 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2970 } 2971 2972 2973 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 2974 Register result = ToRegister(instr->result()); 2975 2976 if (instr->hydrogen()->from_inlined()) { 2977 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize)); 2978 } else if (instr->hydrogen()->arguments_adaptor()) { 2979 // Check for arguments adapter frame. 2980 Label done, adapted; 2981 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); 2982 __ Cmp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset), 2983 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); 2984 __ j(equal, &adapted, Label::kNear); 2985 2986 // No arguments adaptor frame. 2987 __ movp(result, rbp); 2988 __ jmp(&done, Label::kNear); 2989 2990 // Arguments adaptor frame present. 2991 __ bind(&adapted); 2992 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); 2993 2994 // Result is the frame pointer for the frame if not adapted and for the real 2995 // frame below the adaptor frame if adapted. 2996 __ bind(&done); 2997 } else { 2998 __ movp(result, rbp); 2999 } 3000 } 3001 3002 3003 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3004 Register result = ToRegister(instr->result()); 3005 3006 Label done; 3007 3008 // If no arguments adaptor frame the number of arguments is fixed. 3009 if (instr->elements()->IsRegister()) { 3010 __ cmpp(rbp, ToRegister(instr->elements())); 3011 } else { 3012 __ cmpp(rbp, ToOperand(instr->elements())); 3013 } 3014 __ movl(result, Immediate(scope()->num_parameters())); 3015 __ j(equal, &done, Label::kNear); 3016 3017 // Arguments adaptor frame present. Get argument length from there. 3018 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); 3019 __ SmiToInteger32(result, 3020 Operand(result, 3021 ArgumentsAdaptorFrameConstants::kLengthOffset)); 3022 3023 // Argument length is in result register. 3024 __ bind(&done); 3025 } 3026 3027 3028 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3029 Register receiver = ToRegister(instr->receiver()); 3030 Register function = ToRegister(instr->function()); 3031 3032 // If the receiver is null or undefined, we have to pass the global 3033 // object as a receiver to normal functions. Values have to be 3034 // passed unchanged to builtins and strict-mode functions. 3035 Label global_object, receiver_ok; 3036 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 3037 3038 if (!instr->hydrogen()->known_function()) { 3039 // Do not transform the receiver to object for strict mode 3040 // functions. 3041 __ movp(kScratchRegister, 3042 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3043 __ testb(FieldOperand(kScratchRegister, 3044 SharedFunctionInfo::kStrictModeByteOffset), 3045 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); 3046 __ j(not_equal, &receiver_ok, dist); 3047 3048 // Do not transform the receiver to object for builtins. 3049 __ testb(FieldOperand(kScratchRegister, 3050 SharedFunctionInfo::kNativeByteOffset), 3051 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); 3052 __ j(not_equal, &receiver_ok, dist); 3053 } 3054 3055 // Normal function. Replace undefined or null with global receiver. 3056 __ CompareRoot(receiver, Heap::kNullValueRootIndex); 3057 __ j(equal, &global_object, Label::kNear); 3058 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); 3059 __ j(equal, &global_object, Label::kNear); 3060 3061 // The receiver should be a JS object. 3062 Condition is_smi = __ CheckSmi(receiver); 3063 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi); 3064 __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister); 3065 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject); 3066 3067 __ jmp(&receiver_ok, Label::kNear); 3068 __ bind(&global_object); 3069 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset)); 3070 __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX)); 3071 __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX)); 3072 3073 __ bind(&receiver_ok); 3074 } 3075 3076 3077 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3078 Register receiver = ToRegister(instr->receiver()); 3079 Register function = ToRegister(instr->function()); 3080 Register length = ToRegister(instr->length()); 3081 Register elements = ToRegister(instr->elements()); 3082 DCHECK(receiver.is(rax)); // Used for parameter count. 3083 DCHECK(function.is(rdi)); // Required by InvokeFunction. 3084 DCHECK(ToRegister(instr->result()).is(rax)); 3085 3086 // Copy the arguments to this function possibly from the 3087 // adaptor frame below it. 3088 const uint32_t kArgumentsLimit = 1 * KB; 3089 __ cmpp(length, Immediate(kArgumentsLimit)); 3090 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments); 3091 3092 __ Push(receiver); 3093 __ movp(receiver, length); 3094 3095 // Loop through the arguments pushing them onto the execution 3096 // stack. 3097 Label invoke, loop; 3098 // length is a small non-negative integer, due to the test above. 3099 __ testl(length, length); 3100 __ j(zero, &invoke, Label::kNear); 3101 __ bind(&loop); 3102 StackArgumentsAccessor args(elements, length, 3103 ARGUMENTS_DONT_CONTAIN_RECEIVER); 3104 __ Push(args.GetArgumentOperand(0)); 3105 __ decl(length); 3106 __ j(not_zero, &loop); 3107 3108 // Invoke the function. 3109 __ bind(&invoke); 3110 3111 InvokeFlag flag = CALL_FUNCTION; 3112 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { 3113 DCHECK(!info()->saves_caller_doubles()); 3114 // TODO(ishell): drop current frame before pushing arguments to the stack. 3115 flag = JUMP_FUNCTION; 3116 ParameterCount actual(rax); 3117 // It is safe to use rbx, rcx and r8 as scratch registers here given that 3118 // 1) we are not going to return to caller function anyway, 3119 // 2) rbx (expected number of arguments) will be initialized below. 3120 PrepareForTailCall(actual, rbx, rcx, r8); 3121 } 3122 3123 DCHECK(instr->HasPointerMap()); 3124 LPointerMap* pointers = instr->pointer_map(); 3125 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); 3126 ParameterCount actual(rax); 3127 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); 3128 } 3129 3130 3131 void LCodeGen::DoPushArgument(LPushArgument* instr) { 3132 LOperand* argument = instr->value(); 3133 EmitPushTaggedOperand(argument); 3134 } 3135 3136 3137 void LCodeGen::DoDrop(LDrop* instr) { 3138 __ Drop(instr->count()); 3139 } 3140 3141 3142 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3143 Register result = ToRegister(instr->result()); 3144 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); 3145 } 3146 3147 3148 void LCodeGen::DoContext(LContext* instr) { 3149 Register result = ToRegister(instr->result()); 3150 if (info()->IsOptimizing()) { 3151 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset)); 3152 } else { 3153 // If there is no frame, the context must be in rsi. 3154 DCHECK(result.is(rsi)); 3155 } 3156 } 3157 3158 3159 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3160 DCHECK(ToRegister(instr->context()).is(rsi)); 3161 __ Push(instr->hydrogen()->pairs()); 3162 __ Push(Smi::FromInt(instr->hydrogen()->flags())); 3163 CallRuntime(Runtime::kDeclareGlobals, instr); 3164 } 3165 3166 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3167 int formal_parameter_count, int arity, 3168 bool is_tail_call, LInstruction* instr) { 3169 bool dont_adapt_arguments = 3170 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3171 bool can_invoke_directly = 3172 dont_adapt_arguments || formal_parameter_count == arity; 3173 3174 Register function_reg = rdi; 3175 LPointerMap* pointers = instr->pointer_map(); 3176 3177 if (can_invoke_directly) { 3178 // Change context. 3179 __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset)); 3180 3181 // Always initialize new target and number of actual arguments. 3182 __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); 3183 __ Set(rax, arity); 3184 3185 bool is_self_call = function.is_identical_to(info()->closure()); 3186 3187 // Invoke function. 3188 if (is_self_call) { 3189 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location())); 3190 if (is_tail_call) { 3191 __ Jump(self, RelocInfo::CODE_TARGET); 3192 } else { 3193 __ Call(self, RelocInfo::CODE_TARGET); 3194 } 3195 } else { 3196 Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset); 3197 if (is_tail_call) { 3198 __ Jump(target); 3199 } else { 3200 __ Call(target); 3201 } 3202 } 3203 3204 if (!is_tail_call) { 3205 // Set up deoptimization. 3206 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); 3207 } 3208 } else { 3209 // We need to adapt arguments. 3210 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3211 ParameterCount actual(arity); 3212 ParameterCount expected(formal_parameter_count); 3213 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; 3214 __ InvokeFunction(function_reg, no_reg, expected, actual, flag, generator); 3215 } 3216 } 3217 3218 3219 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 3220 DCHECK(ToRegister(instr->result()).is(rax)); 3221 3222 if (instr->hydrogen()->IsTailCall()) { 3223 if (NeedsEagerFrame()) __ leave(); 3224 3225 if (instr->target()->IsConstantOperand()) { 3226 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3227 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3228 __ jmp(code, RelocInfo::CODE_TARGET); 3229 } else { 3230 DCHECK(instr->target()->IsRegister()); 3231 Register target = ToRegister(instr->target()); 3232 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); 3233 __ jmp(target); 3234 } 3235 } else { 3236 LPointerMap* pointers = instr->pointer_map(); 3237 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3238 3239 if (instr->target()->IsConstantOperand()) { 3240 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3241 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3242 generator.BeforeCall(__ CallSize(code)); 3243 __ call(code, RelocInfo::CODE_TARGET); 3244 } else { 3245 DCHECK(instr->target()->IsRegister()); 3246 Register target = ToRegister(instr->target()); 3247 generator.BeforeCall(__ CallSize(target)); 3248 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); 3249 __ call(target); 3250 } 3251 generator.AfterCall(); 3252 } 3253 } 3254 3255 3256 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3257 Register input_reg = ToRegister(instr->value()); 3258 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), 3259 Heap::kHeapNumberMapRootIndex); 3260 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); 3261 3262 Label slow, allocated, done; 3263 uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit(); 3264 available_regs &= ~input_reg.bit(); 3265 if (instr->context()->IsRegister()) { 3266 // Make sure that the context isn't overwritten in the AllocateHeapNumber 3267 // macro below. 3268 available_regs &= ~ToRegister(instr->context()).bit(); 3269 } 3270 3271 Register tmp = 3272 Register::from_code(base::bits::CountTrailingZeros32(available_regs)); 3273 available_regs &= ~tmp.bit(); 3274 Register tmp2 = 3275 Register::from_code(base::bits::CountTrailingZeros32(available_regs)); 3276 3277 // Preserve the value of all registers. 3278 PushSafepointRegistersScope scope(this); 3279 3280 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 3281 // Check the sign of the argument. If the argument is positive, just 3282 // return it. We do not need to patch the stack since |input| and 3283 // |result| are the same register and |input| will be restored 3284 // unchanged by popping safepoint registers. 3285 __ testl(tmp, Immediate(HeapNumber::kSignMask)); 3286 __ j(zero, &done); 3287 3288 __ AllocateHeapNumber(tmp, tmp2, &slow); 3289 __ jmp(&allocated, Label::kNear); 3290 3291 // Slow case: Call the runtime system to do the number allocation. 3292 __ bind(&slow); 3293 CallRuntimeFromDeferred( 3294 Runtime::kAllocateHeapNumber, 0, instr, instr->context()); 3295 // Set the pointer to the new heap number in tmp. 3296 if (!tmp.is(rax)) __ movp(tmp, rax); 3297 // Restore input_reg after call to runtime. 3298 __ LoadFromSafepointRegisterSlot(input_reg, input_reg); 3299 3300 __ bind(&allocated); 3301 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); 3302 __ shlq(tmp2, Immediate(1)); 3303 __ shrq(tmp2, Immediate(1)); 3304 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); 3305 __ StoreToSafepointRegisterSlot(input_reg, tmp); 3306 3307 __ bind(&done); 3308 } 3309 3310 3311 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3312 Register input_reg = ToRegister(instr->value()); 3313 __ testl(input_reg, input_reg); 3314 Label is_positive; 3315 __ j(not_sign, &is_positive, Label::kNear); 3316 __ negl(input_reg); // Sets flags. 3317 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow); 3318 __ bind(&is_positive); 3319 } 3320 3321 3322 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { 3323 Register input_reg = ToRegister(instr->value()); 3324 __ testp(input_reg, input_reg); 3325 Label is_positive; 3326 __ j(not_sign, &is_positive, Label::kNear); 3327 __ negp(input_reg); // Sets flags. 3328 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow); 3329 __ bind(&is_positive); 3330 } 3331 3332 3333 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3334 // Class for deferred case. 3335 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { 3336 public: 3337 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3338 : LDeferredCode(codegen), instr_(instr) { } 3339 void Generate() override { 3340 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3341 } 3342 LInstruction* instr() override { return instr_; } 3343 3344 private: 3345 LMathAbs* instr_; 3346 }; 3347 3348 DCHECK(instr->value()->Equals(instr->result())); 3349 Representation r = instr->hydrogen()->value()->representation(); 3350 3351 if (r.IsDouble()) { 3352 XMMRegister scratch = double_scratch0(); 3353 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3354 __ Xorpd(scratch, scratch); 3355 __ Subsd(scratch, input_reg); 3356 __ Andpd(input_reg, scratch); 3357 } else if (r.IsInteger32()) { 3358 EmitIntegerMathAbs(instr); 3359 } else if (r.IsSmi()) { 3360 EmitSmiMathAbs(instr); 3361 } else { // Tagged case. 3362 DeferredMathAbsTaggedHeapNumber* deferred = 3363 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3364 Register input_reg = ToRegister(instr->value()); 3365 // Smi check. 3366 __ JumpIfNotSmi(input_reg, deferred->entry()); 3367 EmitSmiMathAbs(instr); 3368 __ bind(deferred->exit()); 3369 } 3370 } 3371 3372 void LCodeGen::DoMathFloorD(LMathFloorD* instr) { 3373 XMMRegister output_reg = ToDoubleRegister(instr->result()); 3374 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3375 CpuFeatureScope scope(masm(), SSE4_1); 3376 __ Roundsd(output_reg, input_reg, kRoundDown); 3377 } 3378 3379 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { 3380 XMMRegister xmm_scratch = double_scratch0(); 3381 Register output_reg = ToRegister(instr->result()); 3382 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3383 3384 if (CpuFeatures::IsSupported(SSE4_1)) { 3385 CpuFeatureScope scope(masm(), SSE4_1); 3386 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3387 // Deoptimize if minus zero. 3388 __ Movq(output_reg, input_reg); 3389 __ subq(output_reg, Immediate(1)); 3390 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero); 3391 } 3392 __ Roundsd(xmm_scratch, input_reg, kRoundDown); 3393 __ Cvttsd2si(output_reg, xmm_scratch); 3394 __ cmpl(output_reg, Immediate(0x1)); 3395 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 3396 } else { 3397 Label negative_sign, done; 3398 // Deoptimize on unordered. 3399 __ Xorpd(xmm_scratch, xmm_scratch); // Zero the register. 3400 __ Ucomisd(input_reg, xmm_scratch); 3401 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); 3402 __ j(below, &negative_sign, Label::kNear); 3403 3404 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3405 // Check for negative zero. 3406 Label positive_sign; 3407 __ j(above, &positive_sign, Label::kNear); 3408 __ Movmskpd(output_reg, input_reg); 3409 __ testl(output_reg, Immediate(1)); 3410 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); 3411 __ Set(output_reg, 0); 3412 __ jmp(&done); 3413 __ bind(&positive_sign); 3414 } 3415 3416 // Use truncating instruction (OK because input is positive). 3417 __ Cvttsd2si(output_reg, input_reg); 3418 // Overflow is signalled with minint. 3419 __ cmpl(output_reg, Immediate(0x1)); 3420 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 3421 __ jmp(&done, Label::kNear); 3422 3423 // Non-zero negative reaches here. 3424 __ bind(&negative_sign); 3425 // Truncate, then compare and compensate. 3426 __ Cvttsd2si(output_reg, input_reg); 3427 __ Cvtlsi2sd(xmm_scratch, output_reg); 3428 __ Ucomisd(input_reg, xmm_scratch); 3429 __ j(equal, &done, Label::kNear); 3430 __ subl(output_reg, Immediate(1)); 3431 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 3432 3433 __ bind(&done); 3434 } 3435 } 3436 3437 void LCodeGen::DoMathRoundD(LMathRoundD* instr) { 3438 XMMRegister xmm_scratch = double_scratch0(); 3439 XMMRegister output_reg = ToDoubleRegister(instr->result()); 3440 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3441 CpuFeatureScope scope(masm(), SSE4_1); 3442 Label done; 3443 __ Roundsd(output_reg, input_reg, kRoundUp); 3444 __ Move(xmm_scratch, -0.5); 3445 __ Addsd(xmm_scratch, output_reg); 3446 __ Ucomisd(xmm_scratch, input_reg); 3447 __ j(below_equal, &done, Label::kNear); 3448 __ Move(xmm_scratch, 1.0); 3449 __ Subsd(output_reg, xmm_scratch); 3450 __ bind(&done); 3451 } 3452 3453 void LCodeGen::DoMathRoundI(LMathRoundI* instr) { 3454 const XMMRegister xmm_scratch = double_scratch0(); 3455 Register output_reg = ToRegister(instr->result()); 3456 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3457 XMMRegister input_temp = ToDoubleRegister(instr->temp()); 3458 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 3459 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 3460 3461 Label done, round_to_zero, below_one_half; 3462 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 3463 __ movq(kScratchRegister, one_half); 3464 __ Movq(xmm_scratch, kScratchRegister); 3465 __ Ucomisd(xmm_scratch, input_reg); 3466 __ j(above, &below_one_half, Label::kNear); 3467 3468 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). 3469 __ Addsd(xmm_scratch, input_reg); 3470 __ Cvttsd2si(output_reg, xmm_scratch); 3471 // Overflow is signalled with minint. 3472 __ cmpl(output_reg, Immediate(0x1)); 3473 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 3474 __ jmp(&done, dist); 3475 3476 __ bind(&below_one_half); 3477 __ movq(kScratchRegister, minus_one_half); 3478 __ Movq(xmm_scratch, kScratchRegister); 3479 __ Ucomisd(xmm_scratch, input_reg); 3480 __ j(below_equal, &round_to_zero, Label::kNear); 3481 3482 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then 3483 // compare and compensate. 3484 __ Movapd(input_temp, input_reg); // Do not alter input_reg. 3485 __ Subsd(input_temp, xmm_scratch); 3486 __ Cvttsd2si(output_reg, input_temp); 3487 // Catch minint due to overflow, and to prevent overflow when compensating. 3488 __ cmpl(output_reg, Immediate(0x1)); 3489 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 3490 3491 __ Cvtlsi2sd(xmm_scratch, output_reg); 3492 __ Ucomisd(xmm_scratch, input_temp); 3493 __ j(equal, &done, dist); 3494 __ subl(output_reg, Immediate(1)); 3495 // No overflow because we already ruled out minint. 3496 __ jmp(&done, dist); 3497 3498 __ bind(&round_to_zero); 3499 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if 3500 // we can ignore the difference between a result of -0 and +0. 3501 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3502 __ Movq(output_reg, input_reg); 3503 __ testq(output_reg, output_reg); 3504 DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero); 3505 } 3506 __ Set(output_reg, 0); 3507 __ bind(&done); 3508 } 3509 3510 3511 void LCodeGen::DoMathFround(LMathFround* instr) { 3512 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3513 XMMRegister output_reg = ToDoubleRegister(instr->result()); 3514 __ Cvtsd2ss(output_reg, input_reg); 3515 __ Cvtss2sd(output_reg, output_reg); 3516 } 3517 3518 3519 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3520 XMMRegister output = ToDoubleRegister(instr->result()); 3521 if (instr->value()->IsDoubleRegister()) { 3522 XMMRegister input = ToDoubleRegister(instr->value()); 3523 __ Sqrtsd(output, input); 3524 } else { 3525 Operand input = ToOperand(instr->value()); 3526 __ Sqrtsd(output, input); 3527 } 3528 } 3529 3530 3531 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3532 XMMRegister xmm_scratch = double_scratch0(); 3533 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3534 DCHECK(ToDoubleRegister(instr->result()).is(input_reg)); 3535 3536 // Note that according to ECMA-262 15.8.2.13: 3537 // Math.pow(-Infinity, 0.5) == Infinity 3538 // Math.sqrt(-Infinity) == NaN 3539 Label done, sqrt; 3540 // Check base for -Infinity. According to IEEE-754, double-precision 3541 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. 3542 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000)); 3543 __ Movq(xmm_scratch, kScratchRegister); 3544 __ Ucomisd(xmm_scratch, input_reg); 3545 // Comparing -Infinity with NaN results in "unordered", which sets the 3546 // zero flag as if both were equal. However, it also sets the carry flag. 3547 __ j(not_equal, &sqrt, Label::kNear); 3548 __ j(carry, &sqrt, Label::kNear); 3549 // If input is -Infinity, return Infinity. 3550 __ Xorpd(input_reg, input_reg); 3551 __ Subsd(input_reg, xmm_scratch); 3552 __ jmp(&done, Label::kNear); 3553 3554 // Square root. 3555 __ bind(&sqrt); 3556 __ Xorpd(xmm_scratch, xmm_scratch); 3557 __ Addsd(input_reg, xmm_scratch); // Convert -0 to +0. 3558 __ Sqrtsd(input_reg, input_reg); 3559 __ bind(&done); 3560 } 3561 3562 3563 void LCodeGen::DoPower(LPower* instr) { 3564 Representation exponent_type = instr->hydrogen()->right()->representation(); 3565 // Having marked this as a call, we can use any registers. 3566 // Just make sure that the input/output registers are the expected ones. 3567 3568 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); 3569 DCHECK(!instr->right()->IsRegister() || 3570 ToRegister(instr->right()).is(tagged_exponent)); 3571 DCHECK(!instr->right()->IsDoubleRegister() || 3572 ToDoubleRegister(instr->right()).is(xmm1)); 3573 DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); 3574 DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); 3575 3576 if (exponent_type.IsSmi()) { 3577 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3578 __ CallStub(&stub); 3579 } else if (exponent_type.IsTagged()) { 3580 Label no_deopt; 3581 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear); 3582 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx); 3583 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); 3584 __ bind(&no_deopt); 3585 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3586 __ CallStub(&stub); 3587 } else if (exponent_type.IsInteger32()) { 3588 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3589 __ CallStub(&stub); 3590 } else { 3591 DCHECK(exponent_type.IsDouble()); 3592 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3593 __ CallStub(&stub); 3594 } 3595 } 3596 3597 void LCodeGen::DoMathCos(LMathCos* instr) { 3598 DCHECK(ToDoubleRegister(instr->value()).is(xmm0)); 3599 DCHECK(ToDoubleRegister(instr->result()).is(xmm0)); 3600 __ PrepareCallCFunction(1); 3601 __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 1); 3602 } 3603 3604 void LCodeGen::DoMathExp(LMathExp* instr) { 3605 DCHECK(ToDoubleRegister(instr->value()).is(xmm0)); 3606 DCHECK(ToDoubleRegister(instr->result()).is(xmm0)); 3607 __ PrepareCallCFunction(1); 3608 __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 1); 3609 } 3610 3611 void LCodeGen::DoMathSin(LMathSin* instr) { 3612 DCHECK(ToDoubleRegister(instr->value()).is(xmm0)); 3613 DCHECK(ToDoubleRegister(instr->result()).is(xmm0)); 3614 __ PrepareCallCFunction(1); 3615 __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 1); 3616 } 3617 3618 void LCodeGen::DoMathLog(LMathLog* instr) { 3619 DCHECK(ToDoubleRegister(instr->value()).is(xmm0)); 3620 DCHECK(ToDoubleRegister(instr->result()).is(xmm0)); 3621 __ PrepareCallCFunction(1); 3622 __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 1); 3623 } 3624 3625 3626 void LCodeGen::DoMathClz32(LMathClz32* instr) { 3627 Register input = ToRegister(instr->value()); 3628 Register result = ToRegister(instr->result()); 3629 3630 __ Lzcntl(result, input); 3631 } 3632 3633 void LCodeGen::PrepareForTailCall(const ParameterCount& actual, 3634 Register scratch1, Register scratch2, 3635 Register scratch3) { 3636 #if DEBUG 3637 if (actual.is_reg()) { 3638 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); 3639 } else { 3640 DCHECK(!AreAliased(scratch1, scratch2, scratch3)); 3641 } 3642 #endif 3643 if (FLAG_code_comments) { 3644 if (actual.is_reg()) { 3645 Comment(";;; PrepareForTailCall, actual: %s {", 3646 RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( 3647 actual.reg().code())); 3648 } else { 3649 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); 3650 } 3651 } 3652 3653 // Check if next frame is an arguments adaptor frame. 3654 Register caller_args_count_reg = scratch1; 3655 Label no_arguments_adaptor, formal_parameter_count_loaded; 3656 __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); 3657 __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset), 3658 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); 3659 __ j(not_equal, &no_arguments_adaptor, Label::kNear); 3660 3661 // Drop current frame and load arguments count from arguments adaptor frame. 3662 __ movp(rbp, scratch2); 3663 __ SmiToInteger32( 3664 caller_args_count_reg, 3665 Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3666 __ jmp(&formal_parameter_count_loaded, Label::kNear); 3667 3668 __ bind(&no_arguments_adaptor); 3669 // Load caller's formal parameter count. 3670 __ movp(caller_args_count_reg, 3671 Immediate(info()->literal()->parameter_count())); 3672 3673 __ bind(&formal_parameter_count_loaded); 3674 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3, 3675 ReturnAddressState::kNotOnStack); 3676 Comment(";;; }"); 3677 } 3678 3679 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 3680 HInvokeFunction* hinstr = instr->hydrogen(); 3681 DCHECK(ToRegister(instr->context()).is(rsi)); 3682 DCHECK(ToRegister(instr->function()).is(rdi)); 3683 DCHECK(instr->HasPointerMap()); 3684 3685 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; 3686 3687 if (is_tail_call) { 3688 DCHECK(!info()->saves_caller_doubles()); 3689 ParameterCount actual(instr->arity()); 3690 // It is safe to use rbx, rcx and r8 as scratch registers here given that 3691 // 1) we are not going to return to caller function anyway, 3692 // 2) rbx (expected number of arguments) will be initialized below. 3693 PrepareForTailCall(actual, rbx, rcx, r8); 3694 } 3695 3696 Handle<JSFunction> known_function = hinstr->known_function(); 3697 if (known_function.is_null()) { 3698 LPointerMap* pointers = instr->pointer_map(); 3699 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3700 ParameterCount actual(instr->arity()); 3701 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; 3702 __ InvokeFunction(rdi, no_reg, actual, flag, generator); 3703 } else { 3704 CallKnownFunction(known_function, hinstr->formal_parameter_count(), 3705 instr->arity(), is_tail_call, instr); 3706 } 3707 } 3708 3709 3710 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 3711 DCHECK(ToRegister(instr->context()).is(rsi)); 3712 DCHECK(ToRegister(instr->constructor()).is(rdi)); 3713 DCHECK(ToRegister(instr->result()).is(rax)); 3714 3715 __ Set(rax, instr->arity()); 3716 __ Move(rbx, instr->hydrogen()->site()); 3717 3718 ElementsKind kind = instr->hydrogen()->elements_kind(); 3719 AllocationSiteOverrideMode override_mode = 3720 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 3721 ? DISABLE_ALLOCATION_SITES 3722 : DONT_OVERRIDE; 3723 3724 if (instr->arity() == 0) { 3725 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 3726 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3727 } else if (instr->arity() == 1) { 3728 Label done; 3729 if (IsFastPackedElementsKind(kind)) { 3730 Label packed_case; 3731 // We might need a change here 3732 // look at the first argument 3733 __ movp(rcx, Operand(rsp, 0)); 3734 __ testp(rcx, rcx); 3735 __ j(zero, &packed_case, Label::kNear); 3736 3737 ElementsKind holey_kind = GetHoleyElementsKind(kind); 3738 ArraySingleArgumentConstructorStub stub(isolate(), 3739 holey_kind, 3740 override_mode); 3741 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3742 __ jmp(&done, Label::kNear); 3743 __ bind(&packed_case); 3744 } 3745 3746 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 3747 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3748 __ bind(&done); 3749 } else { 3750 ArrayNArgumentsConstructorStub stub(isolate()); 3751 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3752 } 3753 } 3754 3755 3756 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 3757 DCHECK(ToRegister(instr->context()).is(rsi)); 3758 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); 3759 } 3760 3761 3762 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 3763 Register function = ToRegister(instr->function()); 3764 Register code_object = ToRegister(instr->code_object()); 3765 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize)); 3766 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); 3767 } 3768 3769 3770 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 3771 Register result = ToRegister(instr->result()); 3772 Register base = ToRegister(instr->base_object()); 3773 if (instr->offset()->IsConstantOperand()) { 3774 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 3775 __ leap(result, Operand(base, ToInteger32(offset))); 3776 } else { 3777 Register offset = ToRegister(instr->offset()); 3778 __ leap(result, Operand(base, offset, times_1, 0)); 3779 } 3780 } 3781 3782 3783 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 3784 HStoreNamedField* hinstr = instr->hydrogen(); 3785 Representation representation = instr->representation(); 3786 3787 HObjectAccess access = hinstr->access(); 3788 int offset = access.offset(); 3789 3790 if (access.IsExternalMemory()) { 3791 DCHECK(!hinstr->NeedsWriteBarrier()); 3792 Register value = ToRegister(instr->value()); 3793 if (instr->object()->IsConstantOperand()) { 3794 DCHECK(value.is(rax)); 3795 LConstantOperand* object = LConstantOperand::cast(instr->object()); 3796 __ store_rax(ToExternalReference(object)); 3797 } else { 3798 Register object = ToRegister(instr->object()); 3799 __ Store(MemOperand(object, offset), value, representation); 3800 } 3801 return; 3802 } 3803 3804 Register object = ToRegister(instr->object()); 3805 __ AssertNotSmi(object); 3806 3807 DCHECK(!representation.IsSmi() || 3808 !instr->value()->IsConstantOperand() || 3809 IsInteger32Constant(LConstantOperand::cast(instr->value()))); 3810 if (!FLAG_unbox_double_fields && representation.IsDouble()) { 3811 DCHECK(access.IsInobject()); 3812 DCHECK(!hinstr->has_transition()); 3813 DCHECK(!hinstr->NeedsWriteBarrier()); 3814 XMMRegister value = ToDoubleRegister(instr->value()); 3815 __ Movsd(FieldOperand(object, offset), value); 3816 return; 3817 } 3818 3819 if (hinstr->has_transition()) { 3820 Handle<Map> transition = hinstr->transition_map(); 3821 AddDeprecationDependency(transition); 3822 if (!hinstr->NeedsWriteBarrierForMap()) { 3823 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition); 3824 } else { 3825 Register temp = ToRegister(instr->temp()); 3826 __ Move(kScratchRegister, transition); 3827 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister); 3828 // Update the write barrier for the map field. 3829 __ RecordWriteForMap(object, 3830 kScratchRegister, 3831 temp, 3832 kSaveFPRegs); 3833 } 3834 } 3835 3836 // Do the store. 3837 Register write_register = object; 3838 if (!access.IsInobject()) { 3839 write_register = ToRegister(instr->temp()); 3840 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); 3841 } 3842 3843 if (representation.IsSmi() && SmiValuesAre32Bits() && 3844 hinstr->value()->representation().IsInteger32()) { 3845 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 3846 if (FLAG_debug_code) { 3847 Register scratch = kScratchRegister; 3848 __ Load(scratch, FieldOperand(write_register, offset), representation); 3849 __ AssertSmi(scratch); 3850 } 3851 // Store int value directly to upper half of the smi. 3852 STATIC_ASSERT(kSmiTag == 0); 3853 DCHECK(kSmiTagSize + kSmiShiftSize == 32); 3854 offset += kPointerSize / 2; 3855 representation = Representation::Integer32(); 3856 } 3857 3858 Operand operand = FieldOperand(write_register, offset); 3859 3860 if (FLAG_unbox_double_fields && representation.IsDouble()) { 3861 DCHECK(access.IsInobject()); 3862 XMMRegister value = ToDoubleRegister(instr->value()); 3863 __ Movsd(operand, value); 3864 3865 } else if (instr->value()->IsRegister()) { 3866 Register value = ToRegister(instr->value()); 3867 __ Store(operand, value, representation); 3868 } else { 3869 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 3870 if (IsInteger32Constant(operand_value)) { 3871 DCHECK(!hinstr->NeedsWriteBarrier()); 3872 int32_t value = ToInteger32(operand_value); 3873 if (representation.IsSmi()) { 3874 __ Move(operand, Smi::FromInt(value)); 3875 3876 } else { 3877 __ movl(operand, Immediate(value)); 3878 } 3879 3880 } else if (IsExternalConstant(operand_value)) { 3881 DCHECK(!hinstr->NeedsWriteBarrier()); 3882 ExternalReference ptr = ToExternalReference(operand_value); 3883 __ Move(kScratchRegister, ptr); 3884 __ movp(operand, kScratchRegister); 3885 } else { 3886 Handle<Object> handle_value = ToHandle(operand_value); 3887 DCHECK(!hinstr->NeedsWriteBarrier()); 3888 __ Move(operand, handle_value); 3889 } 3890 } 3891 3892 if (hinstr->NeedsWriteBarrier()) { 3893 Register value = ToRegister(instr->value()); 3894 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; 3895 // Update the write barrier for the object for in-object properties. 3896 __ RecordWriteField(write_register, 3897 offset, 3898 value, 3899 temp, 3900 kSaveFPRegs, 3901 EMIT_REMEMBERED_SET, 3902 hinstr->SmiCheckForWriteBarrier(), 3903 hinstr->PointersToHereCheckForValue()); 3904 } 3905 } 3906 3907 3908 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 3909 DCHECK(ToRegister(instr->context()).is(rsi)); 3910 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 3911 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 3912 3913 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr); 3914 3915 __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name()); 3916 Handle<Code> ic = 3917 CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode()) 3918 .code(); 3919 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3920 } 3921 3922 3923 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 3924 Representation representation = instr->hydrogen()->length()->representation(); 3925 DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); 3926 DCHECK(representation.IsSmiOrInteger32()); 3927 3928 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal; 3929 if (instr->length()->IsConstantOperand()) { 3930 int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); 3931 Register index = ToRegister(instr->index()); 3932 if (representation.IsSmi()) { 3933 __ Cmp(index, Smi::FromInt(length)); 3934 } else { 3935 __ cmpl(index, Immediate(length)); 3936 } 3937 cc = CommuteCondition(cc); 3938 } else if (instr->index()->IsConstantOperand()) { 3939 int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); 3940 if (instr->length()->IsRegister()) { 3941 Register length = ToRegister(instr->length()); 3942 if (representation.IsSmi()) { 3943 __ Cmp(length, Smi::FromInt(index)); 3944 } else { 3945 __ cmpl(length, Immediate(index)); 3946 } 3947 } else { 3948 Operand length = ToOperand(instr->length()); 3949 if (representation.IsSmi()) { 3950 __ Cmp(length, Smi::FromInt(index)); 3951 } else { 3952 __ cmpl(length, Immediate(index)); 3953 } 3954 } 3955 } else { 3956 Register index = ToRegister(instr->index()); 3957 if (instr->length()->IsRegister()) { 3958 Register length = ToRegister(instr->length()); 3959 if (representation.IsSmi()) { 3960 __ cmpp(length, index); 3961 } else { 3962 __ cmpl(length, index); 3963 } 3964 } else { 3965 Operand length = ToOperand(instr->length()); 3966 if (representation.IsSmi()) { 3967 __ cmpp(length, index); 3968 } else { 3969 __ cmpl(length, index); 3970 } 3971 } 3972 } 3973 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 3974 Label done; 3975 __ j(NegateCondition(cc), &done, Label::kNear); 3976 __ int3(); 3977 __ bind(&done); 3978 } else { 3979 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); 3980 } 3981 } 3982 3983 3984 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 3985 ElementsKind elements_kind = instr->elements_kind(); 3986 LOperand* key = instr->key(); 3987 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { 3988 Register key_reg = ToRegister(key); 3989 Representation key_representation = 3990 instr->hydrogen()->key()->representation(); 3991 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) { 3992 __ SmiToInteger64(key_reg, key_reg); 3993 } else if (instr->hydrogen()->IsDehoisted()) { 3994 // Sign extend key because it could be a 32 bit negative value 3995 // and the dehoisted address computation happens in 64 bits 3996 __ movsxlq(key_reg, key_reg); 3997 } 3998 } 3999 Operand operand(BuildFastArrayOperand( 4000 instr->elements(), 4001 key, 4002 instr->hydrogen()->key()->representation(), 4003 elements_kind, 4004 instr->base_offset())); 4005 4006 if (elements_kind == FLOAT32_ELEMENTS) { 4007 XMMRegister value(ToDoubleRegister(instr->value())); 4008 __ Cvtsd2ss(value, value); 4009 __ Movss(operand, value); 4010 } else if (elements_kind == FLOAT64_ELEMENTS) { 4011 __ Movsd(operand, ToDoubleRegister(instr->value())); 4012 } else { 4013 Register value(ToRegister(instr->value())); 4014 switch (elements_kind) { 4015 case INT8_ELEMENTS: 4016 case UINT8_ELEMENTS: 4017 case UINT8_CLAMPED_ELEMENTS: 4018 __ movb(operand, value); 4019 break; 4020 case INT16_ELEMENTS: 4021 case UINT16_ELEMENTS: 4022 __ movw(operand, value); 4023 break; 4024 case INT32_ELEMENTS: 4025 case UINT32_ELEMENTS: 4026 __ movl(operand, value); 4027 break; 4028 case FLOAT32_ELEMENTS: 4029 case FLOAT64_ELEMENTS: 4030 case FAST_ELEMENTS: 4031 case FAST_SMI_ELEMENTS: 4032 case FAST_DOUBLE_ELEMENTS: 4033 case FAST_HOLEY_ELEMENTS: 4034 case FAST_HOLEY_SMI_ELEMENTS: 4035 case FAST_HOLEY_DOUBLE_ELEMENTS: 4036 case DICTIONARY_ELEMENTS: 4037 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 4038 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 4039 case FAST_STRING_WRAPPER_ELEMENTS: 4040 case SLOW_STRING_WRAPPER_ELEMENTS: 4041 case NO_ELEMENTS: 4042 UNREACHABLE(); 4043 break; 4044 } 4045 } 4046 } 4047 4048 4049 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4050 XMMRegister value = ToDoubleRegister(instr->value()); 4051 LOperand* key = instr->key(); 4052 if (kPointerSize == kInt32Size && !key->IsConstantOperand() && 4053 instr->hydrogen()->IsDehoisted()) { 4054 // Sign extend key because it could be a 32 bit negative value 4055 // and the dehoisted address computation happens in 64 bits 4056 __ movsxlq(ToRegister(key), ToRegister(key)); 4057 } 4058 if (instr->NeedsCanonicalization()) { 4059 XMMRegister xmm_scratch = double_scratch0(); 4060 // Turn potential sNaN value into qNaN. 4061 __ Xorpd(xmm_scratch, xmm_scratch); 4062 __ Subsd(value, xmm_scratch); 4063 } 4064 4065 Operand double_store_operand = BuildFastArrayOperand( 4066 instr->elements(), 4067 key, 4068 instr->hydrogen()->key()->representation(), 4069 FAST_DOUBLE_ELEMENTS, 4070 instr->base_offset()); 4071 4072 __ Movsd(double_store_operand, value); 4073 } 4074 4075 4076 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4077 HStoreKeyed* hinstr = instr->hydrogen(); 4078 LOperand* key = instr->key(); 4079 int offset = instr->base_offset(); 4080 Representation representation = hinstr->value()->representation(); 4081 4082 if (kPointerSize == kInt32Size && !key->IsConstantOperand() && 4083 instr->hydrogen()->IsDehoisted()) { 4084 // Sign extend key because it could be a 32 bit negative value 4085 // and the dehoisted address computation happens in 64 bits 4086 __ movsxlq(ToRegister(key), ToRegister(key)); 4087 } 4088 if (representation.IsInteger32() && SmiValuesAre32Bits()) { 4089 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4090 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); 4091 if (FLAG_debug_code) { 4092 Register scratch = kScratchRegister; 4093 __ Load(scratch, 4094 BuildFastArrayOperand(instr->elements(), 4095 key, 4096 instr->hydrogen()->key()->representation(), 4097 FAST_ELEMENTS, 4098 offset), 4099 Representation::Smi()); 4100 __ AssertSmi(scratch); 4101 } 4102 // Store int value directly to upper half of the smi. 4103 STATIC_ASSERT(kSmiTag == 0); 4104 DCHECK(kSmiTagSize + kSmiShiftSize == 32); 4105 offset += kPointerSize / 2; 4106 } 4107 4108 Operand operand = 4109 BuildFastArrayOperand(instr->elements(), 4110 key, 4111 instr->hydrogen()->key()->representation(), 4112 FAST_ELEMENTS, 4113 offset); 4114 if (instr->value()->IsRegister()) { 4115 __ Store(operand, ToRegister(instr->value()), representation); 4116 } else { 4117 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 4118 if (IsInteger32Constant(operand_value)) { 4119 int32_t value = ToInteger32(operand_value); 4120 if (representation.IsSmi()) { 4121 __ Move(operand, Smi::FromInt(value)); 4122 4123 } else { 4124 __ movl(operand, Immediate(value)); 4125 } 4126 } else { 4127 Handle<Object> handle_value = ToHandle(operand_value); 4128 __ Move(operand, handle_value); 4129 } 4130 } 4131 4132 if (hinstr->NeedsWriteBarrier()) { 4133 Register elements = ToRegister(instr->elements()); 4134 DCHECK(instr->value()->IsRegister()); 4135 Register value = ToRegister(instr->value()); 4136 DCHECK(!key->IsConstantOperand()); 4137 SmiCheck check_needed = hinstr->value()->type().IsHeapObject() 4138 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4139 // Compute address of modified element and store it into key register. 4140 Register key_reg(ToRegister(key)); 4141 __ leap(key_reg, operand); 4142 __ RecordWrite(elements, 4143 key_reg, 4144 value, 4145 kSaveFPRegs, 4146 EMIT_REMEMBERED_SET, 4147 check_needed, 4148 hinstr->PointersToHereCheckForValue()); 4149 } 4150 } 4151 4152 4153 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4154 if (instr->is_fixed_typed_array()) { 4155 DoStoreKeyedExternalArray(instr); 4156 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4157 DoStoreKeyedFixedDoubleArray(instr); 4158 } else { 4159 DoStoreKeyedFixedArray(instr); 4160 } 4161 } 4162 4163 4164 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4165 DCHECK(ToRegister(instr->context()).is(rsi)); 4166 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4167 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); 4168 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4169 4170 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr); 4171 4172 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( 4173 isolate(), instr->language_mode()) 4174 .code(); 4175 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4176 } 4177 4178 4179 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { 4180 class DeferredMaybeGrowElements final : public LDeferredCode { 4181 public: 4182 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) 4183 : LDeferredCode(codegen), instr_(instr) {} 4184 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } 4185 LInstruction* instr() override { return instr_; } 4186 4187 private: 4188 LMaybeGrowElements* instr_; 4189 }; 4190 4191 Register result = rax; 4192 DeferredMaybeGrowElements* deferred = 4193 new (zone()) DeferredMaybeGrowElements(this, instr); 4194 LOperand* key = instr->key(); 4195 LOperand* current_capacity = instr->current_capacity(); 4196 4197 DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); 4198 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); 4199 DCHECK(key->IsConstantOperand() || key->IsRegister()); 4200 DCHECK(current_capacity->IsConstantOperand() || 4201 current_capacity->IsRegister()); 4202 4203 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { 4204 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 4205 int32_t constant_capacity = 4206 ToInteger32(LConstantOperand::cast(current_capacity)); 4207 if (constant_key >= constant_capacity) { 4208 // Deferred case. 4209 __ jmp(deferred->entry()); 4210 } 4211 } else if (key->IsConstantOperand()) { 4212 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 4213 __ cmpl(ToRegister(current_capacity), Immediate(constant_key)); 4214 __ j(less_equal, deferred->entry()); 4215 } else if (current_capacity->IsConstantOperand()) { 4216 int32_t constant_capacity = 4217 ToInteger32(LConstantOperand::cast(current_capacity)); 4218 __ cmpl(ToRegister(key), Immediate(constant_capacity)); 4219 __ j(greater_equal, deferred->entry()); 4220 } else { 4221 __ cmpl(ToRegister(key), ToRegister(current_capacity)); 4222 __ j(greater_equal, deferred->entry()); 4223 } 4224 4225 if (instr->elements()->IsRegister()) { 4226 __ movp(result, ToRegister(instr->elements())); 4227 } else { 4228 __ movp(result, ToOperand(instr->elements())); 4229 } 4230 4231 __ bind(deferred->exit()); 4232 } 4233 4234 4235 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { 4236 // TODO(3095996): Get rid of this. For now, we need to make the 4237 // result register contain a valid pointer because it is already 4238 // contained in the register pointer map. 4239 Register result = rax; 4240 __ Move(result, Smi::FromInt(0)); 4241 4242 // We have to call a stub. 4243 { 4244 PushSafepointRegistersScope scope(this); 4245 if (instr->object()->IsConstantOperand()) { 4246 LConstantOperand* constant_object = 4247 LConstantOperand::cast(instr->object()); 4248 if (IsSmiConstant(constant_object)) { 4249 Smi* immediate = ToSmi(constant_object); 4250 __ Move(result, immediate); 4251 } else { 4252 Handle<Object> handle_value = ToHandle(constant_object); 4253 __ Move(result, handle_value); 4254 } 4255 } else if (instr->object()->IsRegister()) { 4256 __ Move(result, ToRegister(instr->object())); 4257 } else { 4258 __ movp(result, ToOperand(instr->object())); 4259 } 4260 4261 LOperand* key = instr->key(); 4262 if (key->IsConstantOperand()) { 4263 __ Move(rbx, ToSmi(LConstantOperand::cast(key))); 4264 } else { 4265 __ Move(rbx, ToRegister(key)); 4266 __ Integer32ToSmi(rbx, rbx); 4267 } 4268 4269 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), 4270 instr->hydrogen()->kind()); 4271 __ CallStub(&stub); 4272 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); 4273 __ StoreToSafepointRegisterSlot(result, result); 4274 } 4275 4276 // Deopt on smi, which means the elements array changed to dictionary mode. 4277 Condition is_smi = __ CheckSmi(result); 4278 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi); 4279 } 4280 4281 4282 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4283 Register object_reg = ToRegister(instr->object()); 4284 4285 Handle<Map> from_map = instr->original_map(); 4286 Handle<Map> to_map = instr->transitioned_map(); 4287 ElementsKind from_kind = instr->from_kind(); 4288 ElementsKind to_kind = instr->to_kind(); 4289 4290 Label not_applicable; 4291 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); 4292 __ j(not_equal, ¬_applicable); 4293 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4294 Register new_map_reg = ToRegister(instr->new_map_temp()); 4295 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); 4296 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); 4297 // Write barrier. 4298 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()), 4299 kDontSaveFPRegs); 4300 } else { 4301 DCHECK(object_reg.is(rax)); 4302 DCHECK(ToRegister(instr->context()).is(rsi)); 4303 PushSafepointRegistersScope scope(this); 4304 __ Move(rbx, to_map); 4305 TransitionElementsKindStub stub(isolate(), from_kind, to_kind); 4306 __ CallStub(&stub); 4307 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); 4308 } 4309 __ bind(¬_applicable); 4310 } 4311 4312 4313 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4314 Register object = ToRegister(instr->object()); 4315 Register temp = ToRegister(instr->temp()); 4316 Label no_memento_found; 4317 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); 4318 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound); 4319 __ bind(&no_memento_found); 4320 } 4321 4322 4323 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4324 DCHECK(ToRegister(instr->context()).is(rsi)); 4325 DCHECK(ToRegister(instr->left()).is(rdx)); 4326 DCHECK(ToRegister(instr->right()).is(rax)); 4327 StringAddStub stub(isolate(), 4328 instr->hydrogen()->flags(), 4329 instr->hydrogen()->pretenure_flag()); 4330 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4331 } 4332 4333 4334 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4335 class DeferredStringCharCodeAt final : public LDeferredCode { 4336 public: 4337 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 4338 : LDeferredCode(codegen), instr_(instr) { } 4339 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } 4340 LInstruction* instr() override { return instr_; } 4341 4342 private: 4343 LStringCharCodeAt* instr_; 4344 }; 4345 4346 DeferredStringCharCodeAt* deferred = 4347 new(zone()) DeferredStringCharCodeAt(this, instr); 4348 4349 StringCharLoadGenerator::Generate(masm(), 4350 ToRegister(instr->string()), 4351 ToRegister(instr->index()), 4352 ToRegister(instr->result()), 4353 deferred->entry()); 4354 __ bind(deferred->exit()); 4355 } 4356 4357 4358 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4359 Register string = ToRegister(instr->string()); 4360 Register result = ToRegister(instr->result()); 4361 4362 // TODO(3095996): Get rid of this. For now, we need to make the 4363 // result register contain a valid pointer because it is already 4364 // contained in the register pointer map. 4365 __ Set(result, 0); 4366 4367 PushSafepointRegistersScope scope(this); 4368 __ Push(string); 4369 // Push the index as a smi. This is safe because of the checks in 4370 // DoStringCharCodeAt above. 4371 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); 4372 if (instr->index()->IsConstantOperand()) { 4373 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4374 __ Push(Smi::FromInt(const_index)); 4375 } else { 4376 Register index = ToRegister(instr->index()); 4377 __ Integer32ToSmi(index, index); 4378 __ Push(index); 4379 } 4380 CallRuntimeFromDeferred( 4381 Runtime::kStringCharCodeAtRT, 2, instr, instr->context()); 4382 __ AssertSmi(rax); 4383 __ SmiToInteger32(rax, rax); 4384 __ StoreToSafepointRegisterSlot(result, rax); 4385 } 4386 4387 4388 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4389 class DeferredStringCharFromCode final : public LDeferredCode { 4390 public: 4391 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 4392 : LDeferredCode(codegen), instr_(instr) { } 4393 void Generate() override { 4394 codegen()->DoDeferredStringCharFromCode(instr_); 4395 } 4396 LInstruction* instr() override { return instr_; } 4397 4398 private: 4399 LStringCharFromCode* instr_; 4400 }; 4401 4402 DeferredStringCharFromCode* deferred = 4403 new(zone()) DeferredStringCharFromCode(this, instr); 4404 4405 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); 4406 Register char_code = ToRegister(instr->char_code()); 4407 Register result = ToRegister(instr->result()); 4408 DCHECK(!char_code.is(result)); 4409 4410 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); 4411 __ j(above, deferred->entry()); 4412 __ movsxlq(char_code, char_code); 4413 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4414 __ movp(result, FieldOperand(result, 4415 char_code, times_pointer_size, 4416 FixedArray::kHeaderSize)); 4417 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); 4418 __ j(equal, deferred->entry()); 4419 __ bind(deferred->exit()); 4420 } 4421 4422 4423 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4424 Register char_code = ToRegister(instr->char_code()); 4425 Register result = ToRegister(instr->result()); 4426 4427 // TODO(3095996): Get rid of this. For now, we need to make the 4428 // result register contain a valid pointer because it is already 4429 // contained in the register pointer map. 4430 __ Set(result, 0); 4431 4432 PushSafepointRegistersScope scope(this); 4433 __ Integer32ToSmi(char_code, char_code); 4434 __ Push(char_code); 4435 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, 4436 instr->context()); 4437 __ StoreToSafepointRegisterSlot(result, rax); 4438 } 4439 4440 4441 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4442 LOperand* input = instr->value(); 4443 DCHECK(input->IsRegister() || input->IsStackSlot()); 4444 LOperand* output = instr->result(); 4445 DCHECK(output->IsDoubleRegister()); 4446 if (input->IsRegister()) { 4447 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); 4448 } else { 4449 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); 4450 } 4451 } 4452 4453 4454 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4455 LOperand* input = instr->value(); 4456 LOperand* output = instr->result(); 4457 4458 __ LoadUint32(ToDoubleRegister(output), ToRegister(input)); 4459 } 4460 4461 4462 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4463 class DeferredNumberTagI final : public LDeferredCode { 4464 public: 4465 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 4466 : LDeferredCode(codegen), instr_(instr) { } 4467 void Generate() override { 4468 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4469 instr_->temp2(), SIGNED_INT32); 4470 } 4471 LInstruction* instr() override { return instr_; } 4472 4473 private: 4474 LNumberTagI* instr_; 4475 }; 4476 4477 LOperand* input = instr->value(); 4478 DCHECK(input->IsRegister() && input->Equals(instr->result())); 4479 Register reg = ToRegister(input); 4480 4481 if (SmiValuesAre32Bits()) { 4482 __ Integer32ToSmi(reg, reg); 4483 } else { 4484 DCHECK(SmiValuesAre31Bits()); 4485 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); 4486 __ Integer32ToSmi(reg, reg); 4487 __ j(overflow, deferred->entry()); 4488 __ bind(deferred->exit()); 4489 } 4490 } 4491 4492 4493 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4494 class DeferredNumberTagU final : public LDeferredCode { 4495 public: 4496 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4497 : LDeferredCode(codegen), instr_(instr) { } 4498 void Generate() override { 4499 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4500 instr_->temp2(), UNSIGNED_INT32); 4501 } 4502 LInstruction* instr() override { return instr_; } 4503 4504 private: 4505 LNumberTagU* instr_; 4506 }; 4507 4508 LOperand* input = instr->value(); 4509 DCHECK(input->IsRegister() && input->Equals(instr->result())); 4510 Register reg = ToRegister(input); 4511 4512 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4513 __ cmpl(reg, Immediate(Smi::kMaxValue)); 4514 __ j(above, deferred->entry()); 4515 __ Integer32ToSmi(reg, reg); 4516 __ bind(deferred->exit()); 4517 } 4518 4519 4520 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, 4521 LOperand* value, 4522 LOperand* temp1, 4523 LOperand* temp2, 4524 IntegerSignedness signedness) { 4525 Label done, slow; 4526 Register reg = ToRegister(value); 4527 Register tmp = ToRegister(temp1); 4528 XMMRegister temp_xmm = ToDoubleRegister(temp2); 4529 4530 // Load value into temp_xmm which will be preserved across potential call to 4531 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable 4532 // XMM registers on x64). 4533 if (signedness == SIGNED_INT32) { 4534 DCHECK(SmiValuesAre31Bits()); 4535 // There was overflow, so bits 30 and 31 of the original integer 4536 // disagree. Try to allocate a heap number in new space and store 4537 // the value in there. If that fails, call the runtime system. 4538 __ SmiToInteger32(reg, reg); 4539 __ xorl(reg, Immediate(0x80000000)); 4540 __ Cvtlsi2sd(temp_xmm, reg); 4541 } else { 4542 DCHECK(signedness == UNSIGNED_INT32); 4543 __ LoadUint32(temp_xmm, reg); 4544 } 4545 4546 if (FLAG_inline_new) { 4547 __ AllocateHeapNumber(reg, tmp, &slow); 4548 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar); 4549 } 4550 4551 // Slow case: Call the runtime system to do the number allocation. 4552 __ bind(&slow); 4553 { 4554 // Put a valid pointer value in the stack slot where the result 4555 // register is stored, as this register is in the pointer map, but contains 4556 // an integer value. 4557 __ Set(reg, 0); 4558 4559 // Preserve the value of all registers. 4560 PushSafepointRegistersScope scope(this); 4561 4562 // NumberTagIU uses the context from the frame, rather than 4563 // the environment's HContext or HInlinedContext value. 4564 // They only call Runtime::kAllocateHeapNumber. 4565 // The corresponding HChange instructions are added in a phase that does 4566 // not have easy access to the local context. 4567 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 4568 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4569 RecordSafepointWithRegisters( 4570 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4571 __ StoreToSafepointRegisterSlot(reg, rax); 4572 } 4573 4574 // Done. Put the value in temp_xmm into the value of the allocated heap 4575 // number. 4576 __ bind(&done); 4577 __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm); 4578 } 4579 4580 4581 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4582 class DeferredNumberTagD final : public LDeferredCode { 4583 public: 4584 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4585 : LDeferredCode(codegen), instr_(instr) { } 4586 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } 4587 LInstruction* instr() override { return instr_; } 4588 4589 private: 4590 LNumberTagD* instr_; 4591 }; 4592 4593 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4594 Register reg = ToRegister(instr->result()); 4595 Register tmp = ToRegister(instr->temp()); 4596 4597 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4598 if (FLAG_inline_new) { 4599 __ AllocateHeapNumber(reg, tmp, deferred->entry()); 4600 } else { 4601 __ jmp(deferred->entry()); 4602 } 4603 __ bind(deferred->exit()); 4604 __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 4605 } 4606 4607 4608 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4609 // TODO(3095996): Get rid of this. For now, we need to make the 4610 // result register contain a valid pointer because it is already 4611 // contained in the register pointer map. 4612 Register reg = ToRegister(instr->result()); 4613 __ Move(reg, Smi::FromInt(0)); 4614 4615 { 4616 PushSafepointRegistersScope scope(this); 4617 // NumberTagD uses the context from the frame, rather than 4618 // the environment's HContext or HInlinedContext value. 4619 // They only call Runtime::kAllocateHeapNumber. 4620 // The corresponding HChange instructions are added in a phase that does 4621 // not have easy access to the local context. 4622 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 4623 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4624 RecordSafepointWithRegisters( 4625 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4626 __ movp(kScratchRegister, rax); 4627 } 4628 __ movp(reg, kScratchRegister); 4629 } 4630 4631 4632 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4633 HChange* hchange = instr->hydrogen(); 4634 Register input = ToRegister(instr->value()); 4635 Register output = ToRegister(instr->result()); 4636 if (hchange->CheckFlag(HValue::kCanOverflow) && 4637 hchange->value()->CheckFlag(HValue::kUint32)) { 4638 Condition is_smi = __ CheckUInteger32ValidSmiValue(input); 4639 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow); 4640 } 4641 __ Integer32ToSmi(output, input); 4642 if (hchange->CheckFlag(HValue::kCanOverflow) && 4643 !hchange->value()->CheckFlag(HValue::kUint32)) { 4644 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 4645 } 4646 } 4647 4648 4649 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4650 DCHECK(instr->value()->Equals(instr->result())); 4651 Register input = ToRegister(instr->value()); 4652 if (instr->needs_check()) { 4653 Condition is_smi = __ CheckSmi(input); 4654 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi); 4655 } else { 4656 __ AssertSmi(input); 4657 } 4658 __ SmiToInteger32(input, input); 4659 } 4660 4661 4662 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, 4663 XMMRegister result_reg, NumberUntagDMode mode) { 4664 bool can_convert_undefined_to_nan = 4665 instr->hydrogen()->can_convert_undefined_to_nan(); 4666 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); 4667 4668 Label convert, load_smi, done; 4669 4670 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4671 // Smi check. 4672 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 4673 4674 // Heap number map check. 4675 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), 4676 Heap::kHeapNumberMapRootIndex); 4677 4678 // On x64 it is safe to load at heap number offset before evaluating the map 4679 // check, since all heap objects are at least two words long. 4680 __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); 4681 4682 if (can_convert_undefined_to_nan) { 4683 __ j(not_equal, &convert, Label::kNear); 4684 } else { 4685 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); 4686 } 4687 4688 if (deoptimize_on_minus_zero) { 4689 XMMRegister xmm_scratch = double_scratch0(); 4690 __ Xorpd(xmm_scratch, xmm_scratch); 4691 __ Ucomisd(xmm_scratch, result_reg); 4692 __ j(not_equal, &done, Label::kNear); 4693 __ Movmskpd(kScratchRegister, result_reg); 4694 __ testl(kScratchRegister, Immediate(1)); 4695 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); 4696 } 4697 __ jmp(&done, Label::kNear); 4698 4699 if (can_convert_undefined_to_nan) { 4700 __ bind(&convert); 4701 4702 // Convert undefined (and hole) to NaN. Compute NaN as 0/0. 4703 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); 4704 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); 4705 4706 __ Pcmpeqd(result_reg, result_reg); 4707 __ jmp(&done, Label::kNear); 4708 } 4709 } else { 4710 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4711 } 4712 4713 // Smi to XMM conversion 4714 __ bind(&load_smi); 4715 __ SmiToInteger32(kScratchRegister, input_reg); 4716 __ Cvtlsi2sd(result_reg, kScratchRegister); 4717 __ bind(&done); 4718 } 4719 4720 4721 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { 4722 Register input_reg = ToRegister(instr->value()); 4723 4724 if (instr->truncating()) { 4725 Label no_heap_number, check_bools, check_false; 4726 4727 // Heap number map check. 4728 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), 4729 Heap::kHeapNumberMapRootIndex); 4730 __ j(not_equal, &no_heap_number, Label::kNear); 4731 __ TruncateHeapNumberToI(input_reg, input_reg); 4732 __ jmp(done); 4733 4734 __ bind(&no_heap_number); 4735 // Check for Oddballs. Undefined/False is converted to zero and True to one 4736 // for truncating conversions. 4737 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); 4738 __ j(not_equal, &check_bools, Label::kNear); 4739 __ Set(input_reg, 0); 4740 __ jmp(done); 4741 4742 __ bind(&check_bools); 4743 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); 4744 __ j(not_equal, &check_false, Label::kNear); 4745 __ Set(input_reg, 1); 4746 __ jmp(done); 4747 4748 __ bind(&check_false); 4749 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); 4750 DeoptimizeIf(not_equal, instr, 4751 Deoptimizer::kNotAHeapNumberUndefinedBoolean); 4752 __ Set(input_reg, 0); 4753 } else { 4754 XMMRegister scratch = ToDoubleRegister(instr->temp()); 4755 DCHECK(!scratch.is(double_scratch0())); 4756 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), 4757 Heap::kHeapNumberMapRootIndex); 4758 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); 4759 __ Movsd(double_scratch0(), 4760 FieldOperand(input_reg, HeapNumber::kValueOffset)); 4761 __ Cvttsd2si(input_reg, double_scratch0()); 4762 __ Cvtlsi2sd(scratch, input_reg); 4763 __ Ucomisd(double_scratch0(), scratch); 4764 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); 4765 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); 4766 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { 4767 __ testl(input_reg, input_reg); 4768 __ j(not_zero, done); 4769 __ Movmskpd(input_reg, double_scratch0()); 4770 __ andl(input_reg, Immediate(1)); 4771 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); 4772 } 4773 } 4774 } 4775 4776 4777 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4778 class DeferredTaggedToI final : public LDeferredCode { 4779 public: 4780 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4781 : LDeferredCode(codegen), instr_(instr) { } 4782 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); } 4783 LInstruction* instr() override { return instr_; } 4784 4785 private: 4786 LTaggedToI* instr_; 4787 }; 4788 4789 LOperand* input = instr->value(); 4790 DCHECK(input->IsRegister()); 4791 DCHECK(input->Equals(instr->result())); 4792 Register input_reg = ToRegister(input); 4793 4794 if (instr->hydrogen()->value()->representation().IsSmi()) { 4795 __ SmiToInteger32(input_reg, input_reg); 4796 } else { 4797 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 4798 __ JumpIfNotSmi(input_reg, deferred->entry()); 4799 __ SmiToInteger32(input_reg, input_reg); 4800 __ bind(deferred->exit()); 4801 } 4802 } 4803 4804 4805 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4806 LOperand* input = instr->value(); 4807 DCHECK(input->IsRegister()); 4808 LOperand* result = instr->result(); 4809 DCHECK(result->IsDoubleRegister()); 4810 4811 Register input_reg = ToRegister(input); 4812 XMMRegister result_reg = ToDoubleRegister(result); 4813 4814 HValue* value = instr->hydrogen()->value(); 4815 NumberUntagDMode mode = value->representation().IsSmi() 4816 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 4817 4818 EmitNumberUntagD(instr, input_reg, result_reg, mode); 4819 } 4820 4821 4822 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4823 LOperand* input = instr->value(); 4824 DCHECK(input->IsDoubleRegister()); 4825 LOperand* result = instr->result(); 4826 DCHECK(result->IsRegister()); 4827 4828 XMMRegister input_reg = ToDoubleRegister(input); 4829 Register result_reg = ToRegister(result); 4830 4831 if (instr->truncating()) { 4832 __ TruncateDoubleToI(result_reg, input_reg); 4833 } else { 4834 Label lost_precision, is_nan, minus_zero, done; 4835 XMMRegister xmm_scratch = double_scratch0(); 4836 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 4837 __ DoubleToI(result_reg, input_reg, xmm_scratch, 4838 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, 4839 &is_nan, &minus_zero, dist); 4840 __ jmp(&done, dist); 4841 __ bind(&lost_precision); 4842 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); 4843 __ bind(&is_nan); 4844 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); 4845 __ bind(&minus_zero); 4846 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); 4847 __ bind(&done); 4848 } 4849 } 4850 4851 4852 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 4853 LOperand* input = instr->value(); 4854 DCHECK(input->IsDoubleRegister()); 4855 LOperand* result = instr->result(); 4856 DCHECK(result->IsRegister()); 4857 4858 XMMRegister input_reg = ToDoubleRegister(input); 4859 Register result_reg = ToRegister(result); 4860 4861 Label lost_precision, is_nan, minus_zero, done; 4862 XMMRegister xmm_scratch = double_scratch0(); 4863 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 4864 __ DoubleToI(result_reg, input_reg, xmm_scratch, 4865 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, 4866 &minus_zero, dist); 4867 __ jmp(&done, dist); 4868 __ bind(&lost_precision); 4869 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); 4870 __ bind(&is_nan); 4871 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); 4872 __ bind(&minus_zero); 4873 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); 4874 __ bind(&done); 4875 __ Integer32ToSmi(result_reg, result_reg); 4876 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 4877 } 4878 4879 4880 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 4881 LOperand* input = instr->value(); 4882 Condition cc = masm()->CheckSmi(ToRegister(input)); 4883 DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi); 4884 } 4885 4886 4887 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 4888 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4889 LOperand* input = instr->value(); 4890 Condition cc = masm()->CheckSmi(ToRegister(input)); 4891 DeoptimizeIf(cc, instr, Deoptimizer::kSmi); 4892 } 4893 } 4894 4895 4896 void LCodeGen::DoCheckArrayBufferNotNeutered( 4897 LCheckArrayBufferNotNeutered* instr) { 4898 Register view = ToRegister(instr->view()); 4899 4900 __ movp(kScratchRegister, 4901 FieldOperand(view, JSArrayBufferView::kBufferOffset)); 4902 __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset), 4903 Immediate(1 << JSArrayBuffer::WasNeutered::kShift)); 4904 DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds); 4905 } 4906 4907 4908 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 4909 Register input = ToRegister(instr->value()); 4910 4911 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); 4912 4913 if (instr->hydrogen()->is_interval_check()) { 4914 InstanceType first; 4915 InstanceType last; 4916 instr->hydrogen()->GetCheckInterval(&first, &last); 4917 4918 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), 4919 Immediate(static_cast<int8_t>(first))); 4920 4921 // If there is only one type in the interval check for equality. 4922 if (first == last) { 4923 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); 4924 } else { 4925 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType); 4926 // Omit check for the last type. 4927 if (last != LAST_TYPE) { 4928 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), 4929 Immediate(static_cast<int8_t>(last))); 4930 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType); 4931 } 4932 } 4933 } else { 4934 uint8_t mask; 4935 uint8_t tag; 4936 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 4937 4938 if (base::bits::IsPowerOfTwo32(mask)) { 4939 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 4940 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), 4941 Immediate(mask)); 4942 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, 4943 Deoptimizer::kWrongInstanceType); 4944 } else { 4945 __ movzxbl(kScratchRegister, 4946 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); 4947 __ andb(kScratchRegister, Immediate(mask)); 4948 __ cmpb(kScratchRegister, Immediate(tag)); 4949 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); 4950 } 4951 } 4952 } 4953 4954 4955 void LCodeGen::DoCheckValue(LCheckValue* instr) { 4956 Register reg = ToRegister(instr->value()); 4957 __ Cmp(reg, instr->hydrogen()->object().handle()); 4958 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch); 4959 } 4960 4961 4962 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 4963 { 4964 PushSafepointRegistersScope scope(this); 4965 __ Push(object); 4966 __ Set(rsi, 0); 4967 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 4968 RecordSafepointWithRegisters( 4969 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 4970 4971 __ testp(rax, Immediate(kSmiTagMask)); 4972 } 4973 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed); 4974 } 4975 4976 4977 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 4978 class DeferredCheckMaps final : public LDeferredCode { 4979 public: 4980 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 4981 : LDeferredCode(codegen), instr_(instr), object_(object) { 4982 SetExit(check_maps()); 4983 } 4984 void Generate() override { 4985 codegen()->DoDeferredInstanceMigration(instr_, object_); 4986 } 4987 Label* check_maps() { return &check_maps_; } 4988 LInstruction* instr() override { return instr_; } 4989 4990 private: 4991 LCheckMaps* instr_; 4992 Label check_maps_; 4993 Register object_; 4994 }; 4995 4996 if (instr->hydrogen()->IsStabilityCheck()) { 4997 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 4998 for (int i = 0; i < maps->size(); ++i) { 4999 AddStabilityDependency(maps->at(i).handle()); 5000 } 5001 return; 5002 } 5003 5004 LOperand* input = instr->value(); 5005 DCHECK(input->IsRegister()); 5006 Register reg = ToRegister(input); 5007 5008 DeferredCheckMaps* deferred = NULL; 5009 if (instr->hydrogen()->HasMigrationTarget()) { 5010 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); 5011 __ bind(deferred->check_maps()); 5012 } 5013 5014 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5015 Label success; 5016 for (int i = 0; i < maps->size() - 1; i++) { 5017 Handle<Map> map = maps->at(i).handle(); 5018 __ CompareMap(reg, map); 5019 __ j(equal, &success, Label::kNear); 5020 } 5021 5022 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5023 __ CompareMap(reg, map); 5024 if (instr->hydrogen()->HasMigrationTarget()) { 5025 __ j(not_equal, deferred->entry()); 5026 } else { 5027 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); 5028 } 5029 5030 __ bind(&success); 5031 } 5032 5033 5034 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5035 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); 5036 XMMRegister xmm_scratch = double_scratch0(); 5037 Register result_reg = ToRegister(instr->result()); 5038 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); 5039 } 5040 5041 5042 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5043 DCHECK(instr->unclamped()->Equals(instr->result())); 5044 Register value_reg = ToRegister(instr->result()); 5045 __ ClampUint8(value_reg); 5046 } 5047 5048 5049 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5050 DCHECK(instr->unclamped()->Equals(instr->result())); 5051 Register input_reg = ToRegister(instr->unclamped()); 5052 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); 5053 XMMRegister xmm_scratch = double_scratch0(); 5054 Label is_smi, done, heap_number; 5055 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 5056 __ JumpIfSmi(input_reg, &is_smi, dist); 5057 5058 // Check for heap number 5059 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5060 factory()->heap_number_map()); 5061 __ j(equal, &heap_number, Label::kNear); 5062 5063 // Check for undefined. Undefined is converted to zero for clamping 5064 // conversions. 5065 __ Cmp(input_reg, factory()->undefined_value()); 5066 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); 5067 __ xorl(input_reg, input_reg); 5068 __ jmp(&done, Label::kNear); 5069 5070 // Heap number 5071 __ bind(&heap_number); 5072 __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); 5073 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); 5074 __ jmp(&done, Label::kNear); 5075 5076 // smi 5077 __ bind(&is_smi); 5078 __ SmiToInteger32(input_reg, input_reg); 5079 __ ClampUint8(input_reg); 5080 5081 __ bind(&done); 5082 } 5083 5084 5085 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { 5086 XMMRegister value_reg = ToDoubleRegister(instr->value()); 5087 Register result_reg = ToRegister(instr->result()); 5088 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { 5089 __ Movq(result_reg, value_reg); 5090 __ shrq(result_reg, Immediate(32)); 5091 } else { 5092 __ Movd(result_reg, value_reg); 5093 } 5094 } 5095 5096 5097 void LCodeGen::DoAllocate(LAllocate* instr) { 5098 class DeferredAllocate final : public LDeferredCode { 5099 public: 5100 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5101 : LDeferredCode(codegen), instr_(instr) { } 5102 void Generate() override { codegen()->DoDeferredAllocate(instr_); } 5103 LInstruction* instr() override { return instr_; } 5104 5105 private: 5106 LAllocate* instr_; 5107 }; 5108 5109 DeferredAllocate* deferred = 5110 new(zone()) DeferredAllocate(this, instr); 5111 5112 Register result = ToRegister(instr->result()); 5113 Register temp = ToRegister(instr->temp()); 5114 5115 // Allocate memory for the object. 5116 AllocationFlags flags = NO_ALLOCATION_FLAGS; 5117 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5118 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5119 } 5120 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5121 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5122 flags = static_cast<AllocationFlags>(flags | PRETENURE); 5123 } 5124 5125 if (instr->hydrogen()->IsAllocationFoldingDominator()) { 5126 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR); 5127 } 5128 DCHECK(!instr->hydrogen()->IsAllocationFolded()); 5129 5130 if (instr->size()->IsConstantOperand()) { 5131 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5132 CHECK(size <= Page::kMaxRegularHeapObjectSize); 5133 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); 5134 } else { 5135 Register size = ToRegister(instr->size()); 5136 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); 5137 } 5138 5139 __ bind(deferred->exit()); 5140 5141 if (instr->hydrogen()->MustPrefillWithFiller()) { 5142 if (instr->size()->IsConstantOperand()) { 5143 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5144 __ movl(temp, Immediate((size / kPointerSize) - 1)); 5145 } else { 5146 temp = ToRegister(instr->size()); 5147 __ sarp(temp, Immediate(kPointerSizeLog2)); 5148 __ decl(temp); 5149 } 5150 Label loop; 5151 __ bind(&loop); 5152 __ Move(FieldOperand(result, temp, times_pointer_size, 0), 5153 isolate()->factory()->one_pointer_filler_map()); 5154 __ decl(temp); 5155 __ j(not_zero, &loop); 5156 } 5157 } 5158 5159 void LCodeGen::DoFastAllocate(LFastAllocate* instr) { 5160 DCHECK(instr->hydrogen()->IsAllocationFolded()); 5161 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); 5162 Register result = ToRegister(instr->result()); 5163 Register temp = ToRegister(instr->temp()); 5164 5165 AllocationFlags flags = ALLOCATION_FOLDED; 5166 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5167 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5168 } 5169 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5170 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5171 flags = static_cast<AllocationFlags>(flags | PRETENURE); 5172 } 5173 if (instr->size()->IsConstantOperand()) { 5174 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5175 CHECK(size <= Page::kMaxRegularHeapObjectSize); 5176 __ FastAllocate(size, result, temp, flags); 5177 } else { 5178 Register size = ToRegister(instr->size()); 5179 __ FastAllocate(size, result, temp, flags); 5180 } 5181 } 5182 5183 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5184 Register result = ToRegister(instr->result()); 5185 5186 // TODO(3095996): Get rid of this. For now, we need to make the 5187 // result register contain a valid pointer because it is already 5188 // contained in the register pointer map. 5189 __ Move(result, Smi::FromInt(0)); 5190 5191 PushSafepointRegistersScope scope(this); 5192 if (instr->size()->IsRegister()) { 5193 Register size = ToRegister(instr->size()); 5194 DCHECK(!size.is(result)); 5195 __ Integer32ToSmi(size, size); 5196 __ Push(size); 5197 } else { 5198 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5199 __ Push(Smi::FromInt(size)); 5200 } 5201 5202 int flags = 0; 5203 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5204 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5205 flags = AllocateTargetSpace::update(flags, OLD_SPACE); 5206 } else { 5207 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5208 } 5209 __ Push(Smi::FromInt(flags)); 5210 5211 CallRuntimeFromDeferred( 5212 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 5213 __ StoreToSafepointRegisterSlot(result, rax); 5214 5215 if (instr->hydrogen()->IsAllocationFoldingDominator()) { 5216 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; 5217 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5218 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5219 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE); 5220 } 5221 // If the allocation folding dominator allocate triggered a GC, allocation 5222 // happend in the runtime. We have to reset the top pointer to virtually 5223 // undo the allocation. 5224 ExternalReference allocation_top = 5225 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); 5226 __ subp(rax, Immediate(kHeapObjectTag)); 5227 __ Store(allocation_top, rax); 5228 __ addp(rax, Immediate(kHeapObjectTag)); 5229 } 5230 } 5231 5232 5233 void LCodeGen::DoTypeof(LTypeof* instr) { 5234 DCHECK(ToRegister(instr->context()).is(rsi)); 5235 DCHECK(ToRegister(instr->value()).is(rbx)); 5236 Label end, do_call; 5237 Register value_register = ToRegister(instr->value()); 5238 __ JumpIfNotSmi(value_register, &do_call); 5239 __ Move(rax, isolate()->factory()->number_string()); 5240 __ jmp(&end); 5241 __ bind(&do_call); 5242 TypeofStub stub(isolate()); 5243 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5244 __ bind(&end); 5245 } 5246 5247 5248 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { 5249 DCHECK(!operand->IsDoubleRegister()); 5250 if (operand->IsConstantOperand()) { 5251 __ Push(ToHandle(LConstantOperand::cast(operand))); 5252 } else if (operand->IsRegister()) { 5253 __ Push(ToRegister(operand)); 5254 } else { 5255 __ Push(ToOperand(operand)); 5256 } 5257 } 5258 5259 5260 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5261 Register input = ToRegister(instr->value()); 5262 Condition final_branch_condition = EmitTypeofIs(instr, input); 5263 if (final_branch_condition != no_condition) { 5264 EmitBranch(instr, final_branch_condition); 5265 } 5266 } 5267 5268 5269 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { 5270 Label* true_label = instr->TrueLabel(chunk_); 5271 Label* false_label = instr->FalseLabel(chunk_); 5272 Handle<String> type_name = instr->type_literal(); 5273 int left_block = instr->TrueDestination(chunk_); 5274 int right_block = instr->FalseDestination(chunk_); 5275 int next_block = GetNextEmittedBlock(); 5276 5277 Label::Distance true_distance = left_block == next_block ? Label::kNear 5278 : Label::kFar; 5279 Label::Distance false_distance = right_block == next_block ? Label::kNear 5280 : Label::kFar; 5281 Condition final_branch_condition = no_condition; 5282 Factory* factory = isolate()->factory(); 5283 if (String::Equals(type_name, factory->number_string())) { 5284 __ JumpIfSmi(input, true_label, true_distance); 5285 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), 5286 Heap::kHeapNumberMapRootIndex); 5287 5288 final_branch_condition = equal; 5289 5290 } else if (String::Equals(type_name, factory->string_string())) { 5291 __ JumpIfSmi(input, false_label, false_distance); 5292 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); 5293 final_branch_condition = below; 5294 5295 } else if (String::Equals(type_name, factory->symbol_string())) { 5296 __ JumpIfSmi(input, false_label, false_distance); 5297 __ CmpObjectType(input, SYMBOL_TYPE, input); 5298 final_branch_condition = equal; 5299 5300 } else if (String::Equals(type_name, factory->boolean_string())) { 5301 __ CompareRoot(input, Heap::kTrueValueRootIndex); 5302 __ j(equal, true_label, true_distance); 5303 __ CompareRoot(input, Heap::kFalseValueRootIndex); 5304 final_branch_condition = equal; 5305 5306 } else if (String::Equals(type_name, factory->undefined_string())) { 5307 __ CompareRoot(input, Heap::kNullValueRootIndex); 5308 __ j(equal, false_label, false_distance); 5309 __ JumpIfSmi(input, false_label, false_distance); 5310 // Check for undetectable objects => true. 5311 __ movp(input, FieldOperand(input, HeapObject::kMapOffset)); 5312 __ testb(FieldOperand(input, Map::kBitFieldOffset), 5313 Immediate(1 << Map::kIsUndetectable)); 5314 final_branch_condition = not_zero; 5315 5316 } else if (String::Equals(type_name, factory->function_string())) { 5317 __ JumpIfSmi(input, false_label, false_distance); 5318 // Check for callable and not undetectable objects => true. 5319 __ movp(input, FieldOperand(input, HeapObject::kMapOffset)); 5320 __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset)); 5321 __ andb(input, 5322 Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); 5323 __ cmpb(input, Immediate(1 << Map::kIsCallable)); 5324 final_branch_condition = equal; 5325 5326 } else if (String::Equals(type_name, factory->object_string())) { 5327 __ JumpIfSmi(input, false_label, false_distance); 5328 __ CompareRoot(input, Heap::kNullValueRootIndex); 5329 __ j(equal, true_label, true_distance); 5330 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); 5331 __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input); 5332 __ j(below, false_label, false_distance); 5333 // Check for callable or undetectable objects => false. 5334 __ testb(FieldOperand(input, Map::kBitFieldOffset), 5335 Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); 5336 final_branch_condition = zero; 5337 5338 // clang-format off 5339 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \ 5340 } else if (String::Equals(type_name, factory->type##_string())) { \ 5341 __ JumpIfSmi(input, false_label, false_distance); \ 5342 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), \ 5343 Heap::k##Type##MapRootIndex); \ 5344 final_branch_condition = equal; 5345 SIMD128_TYPES(SIMD128_TYPE) 5346 #undef SIMD128_TYPE 5347 // clang-format on 5348 5349 } else { 5350 __ jmp(false_label, false_distance); 5351 } 5352 5353 return final_branch_condition; 5354 } 5355 5356 5357 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5358 if (info()->ShouldEnsureSpaceForLazyDeopt()) { 5359 // Ensure that we have enough space after the previous lazy-bailout 5360 // instruction for patching the code here. 5361 int current_pc = masm()->pc_offset(); 5362 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5363 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5364 __ Nop(padding_size); 5365 } 5366 } 5367 last_lazy_deopt_pc_ = masm()->pc_offset(); 5368 } 5369 5370 5371 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5372 last_lazy_deopt_pc_ = masm()->pc_offset(); 5373 DCHECK(instr->HasEnvironment()); 5374 LEnvironment* env = instr->environment(); 5375 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5376 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5377 } 5378 5379 5380 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5381 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5382 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5383 // needed return address), even though the implementation of LAZY and EAGER is 5384 // now identical. When LAZY is eventually completely folded into EAGER, remove 5385 // the special case below. 5386 if (info()->IsStub() && type == Deoptimizer::EAGER) { 5387 type = Deoptimizer::LAZY; 5388 } 5389 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type); 5390 } 5391 5392 5393 void LCodeGen::DoDummy(LDummy* instr) { 5394 // Nothing to see here, move on! 5395 } 5396 5397 5398 void LCodeGen::DoDummyUse(LDummyUse* instr) { 5399 // Nothing to see here, move on! 5400 } 5401 5402 5403 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5404 PushSafepointRegistersScope scope(this); 5405 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 5406 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 5407 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); 5408 DCHECK(instr->HasEnvironment()); 5409 LEnvironment* env = instr->environment(); 5410 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5411 } 5412 5413 5414 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5415 class DeferredStackCheck final : public LDeferredCode { 5416 public: 5417 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5418 : LDeferredCode(codegen), instr_(instr) { } 5419 void Generate() override { codegen()->DoDeferredStackCheck(instr_); } 5420 LInstruction* instr() override { return instr_; } 5421 5422 private: 5423 LStackCheck* instr_; 5424 }; 5425 5426 DCHECK(instr->HasEnvironment()); 5427 LEnvironment* env = instr->environment(); 5428 // There is no LLazyBailout instruction for stack-checks. We have to 5429 // prepare for lazy deoptimization explicitly here. 5430 if (instr->hydrogen()->is_function_entry()) { 5431 // Perform stack overflow check. 5432 Label done; 5433 __ CompareRoot(rsp, Heap::kStackLimitRootIndex); 5434 __ j(above_equal, &done, Label::kNear); 5435 5436 DCHECK(instr->context()->IsRegister()); 5437 DCHECK(ToRegister(instr->context()).is(rsi)); 5438 CallCode(isolate()->builtins()->StackCheck(), 5439 RelocInfo::CODE_TARGET, 5440 instr); 5441 __ bind(&done); 5442 } else { 5443 DCHECK(instr->hydrogen()->is_backwards_branch()); 5444 // Perform stack overflow check if this goto needs it before jumping. 5445 DeferredStackCheck* deferred_stack_check = 5446 new(zone()) DeferredStackCheck(this, instr); 5447 __ CompareRoot(rsp, Heap::kStackLimitRootIndex); 5448 __ j(below, deferred_stack_check->entry()); 5449 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5450 __ bind(instr->done_label()); 5451 deferred_stack_check->SetExit(instr->done_label()); 5452 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5453 // Don't record a deoptimization index for the safepoint here. 5454 // This will be done explicitly when emitting call and the safepoint in 5455 // the deferred code. 5456 } 5457 } 5458 5459 5460 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5461 // This is a pseudo-instruction that ensures that the environment here is 5462 // properly registered for deoptimization and records the assembler's PC 5463 // offset. 5464 LEnvironment* environment = instr->environment(); 5465 5466 // If the environment were already registered, we would have no way of 5467 // backpatching it with the spill slot operands. 5468 DCHECK(!environment->HasBeenRegistered()); 5469 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5470 5471 GenerateOsrPrologue(); 5472 } 5473 5474 5475 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5476 DCHECK(ToRegister(instr->context()).is(rsi)); 5477 5478 Label use_cache, call_runtime; 5479 __ CheckEnumCache(&call_runtime); 5480 5481 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); 5482 __ jmp(&use_cache, Label::kNear); 5483 5484 // Get the set of properties to enumerate. 5485 __ bind(&call_runtime); 5486 __ Push(rax); 5487 CallRuntime(Runtime::kForInEnumerate, instr); 5488 __ bind(&use_cache); 5489 } 5490 5491 5492 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 5493 Register map = ToRegister(instr->map()); 5494 Register result = ToRegister(instr->result()); 5495 Label load_cache, done; 5496 __ EnumLength(result, map); 5497 __ Cmp(result, Smi::FromInt(0)); 5498 __ j(not_equal, &load_cache, Label::kNear); 5499 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); 5500 __ jmp(&done, Label::kNear); 5501 __ bind(&load_cache); 5502 __ LoadInstanceDescriptors(map, result); 5503 __ movp(result, 5504 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); 5505 __ movp(result, 5506 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); 5507 __ bind(&done); 5508 Condition cc = masm()->CheckSmi(result); 5509 DeoptimizeIf(cc, instr, Deoptimizer::kNoCache); 5510 } 5511 5512 5513 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5514 Register object = ToRegister(instr->value()); 5515 __ cmpp(ToRegister(instr->map()), 5516 FieldOperand(object, HeapObject::kMapOffset)); 5517 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); 5518 } 5519 5520 5521 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5522 Register object, 5523 Register index) { 5524 PushSafepointRegistersScope scope(this); 5525 __ Push(object); 5526 __ Push(index); 5527 __ xorp(rsi, rsi); 5528 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 5529 RecordSafepointWithRegisters( 5530 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); 5531 __ StoreToSafepointRegisterSlot(object, rax); 5532 } 5533 5534 5535 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5536 class DeferredLoadMutableDouble final : public LDeferredCode { 5537 public: 5538 DeferredLoadMutableDouble(LCodeGen* codegen, 5539 LLoadFieldByIndex* instr, 5540 Register object, 5541 Register index) 5542 : LDeferredCode(codegen), 5543 instr_(instr), 5544 object_(object), 5545 index_(index) { 5546 } 5547 void Generate() override { 5548 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); 5549 } 5550 LInstruction* instr() override { return instr_; } 5551 5552 private: 5553 LLoadFieldByIndex* instr_; 5554 Register object_; 5555 Register index_; 5556 }; 5557 5558 Register object = ToRegister(instr->object()); 5559 Register index = ToRegister(instr->index()); 5560 5561 DeferredLoadMutableDouble* deferred; 5562 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index); 5563 5564 Label out_of_object, done; 5565 __ Move(kScratchRegister, Smi::FromInt(1)); 5566 __ testp(index, kScratchRegister); 5567 __ j(not_zero, deferred->entry()); 5568 5569 __ sarp(index, Immediate(1)); 5570 5571 __ SmiToInteger32(index, index); 5572 __ cmpl(index, Immediate(0)); 5573 __ j(less, &out_of_object, Label::kNear); 5574 __ movp(object, FieldOperand(object, 5575 index, 5576 times_pointer_size, 5577 JSObject::kHeaderSize)); 5578 __ jmp(&done, Label::kNear); 5579 5580 __ bind(&out_of_object); 5581 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset)); 5582 __ negl(index); 5583 // Index is now equal to out of object property index plus 1. 5584 __ movp(object, FieldOperand(object, 5585 index, 5586 times_pointer_size, 5587 FixedArray::kHeaderSize - kPointerSize)); 5588 __ bind(deferred->exit()); 5589 __ bind(&done); 5590 } 5591 5592 #undef __ 5593 5594 } // namespace internal 5595 } // namespace v8 5596 5597 #endif // V8_TARGET_ARCH_X64 5598