1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "src/v8.h" 6 7 #if V8_TARGET_ARCH_IA32 8 9 #include "src/ia32/lithium-codegen-ia32.h" 10 #include "src/ic.h" 11 #include "src/code-stubs.h" 12 #include "src/deoptimizer.h" 13 #include "src/stub-cache.h" 14 #include "src/codegen.h" 15 #include "src/hydrogen-osr.h" 16 17 namespace v8 { 18 namespace internal { 19 20 // When invoking builtins, we need to record the safepoint in the middle of 21 // the invoke instruction sequence generated by the macro assembler. 22 class SafepointGenerator V8_FINAL : public CallWrapper { 23 public: 24 SafepointGenerator(LCodeGen* codegen, 25 LPointerMap* pointers, 26 Safepoint::DeoptMode mode) 27 : codegen_(codegen), 28 pointers_(pointers), 29 deopt_mode_(mode) {} 30 virtual ~SafepointGenerator() {} 31 32 virtual void BeforeCall(int call_size) const V8_OVERRIDE {} 33 34 virtual void AfterCall() const V8_OVERRIDE { 35 codegen_->RecordSafepoint(pointers_, deopt_mode_); 36 } 37 38 private: 39 LCodeGen* codegen_; 40 LPointerMap* pointers_; 41 Safepoint::DeoptMode deopt_mode_; 42 }; 43 44 45 #define __ masm()-> 46 47 bool LCodeGen::GenerateCode() { 48 LPhase phase("Z_Code generation", chunk()); 49 ASSERT(is_unused()); 50 status_ = GENERATING; 51 52 // Open a frame scope to indicate that there is a frame on the stack. The 53 // MANUAL indicates that the scope shouldn't actually generate code to set up 54 // the frame (that is done in GeneratePrologue). 55 FrameScope frame_scope(masm_, StackFrame::MANUAL); 56 57 support_aligned_spilled_doubles_ = info()->IsOptimizing(); 58 59 dynamic_frame_alignment_ = info()->IsOptimizing() && 60 ((chunk()->num_double_slots() > 2 && 61 !chunk()->graph()->is_recursive()) || 62 !info()->osr_ast_id().IsNone()); 63 64 return GeneratePrologue() && 65 GenerateBody() && 66 GenerateDeferredCode() && 67 GenerateJumpTable() && 68 GenerateSafepointTable(); 69 } 70 71 72 void LCodeGen::FinishCode(Handle<Code> code) { 73 ASSERT(is_done()); 74 code->set_stack_slots(GetStackSlotCount()); 75 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 76 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); 77 PopulateDeoptimizationData(code); 78 if (!info()->IsStub()) { 79 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); 80 } 81 } 82 83 84 #ifdef _MSC_VER 85 void LCodeGen::MakeSureStackPagesMapped(int offset) { 86 const int kPageSize = 4 * KB; 87 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { 88 __ mov(Operand(esp, offset), eax); 89 } 90 } 91 #endif 92 93 94 void LCodeGen::SaveCallerDoubles() { 95 ASSERT(info()->saves_caller_doubles()); 96 ASSERT(NeedsEagerFrame()); 97 Comment(";;; Save clobbered callee double registers"); 98 int count = 0; 99 BitVector* doubles = chunk()->allocated_double_registers(); 100 BitVector::Iterator save_iterator(doubles); 101 while (!save_iterator.Done()) { 102 __ movsd(MemOperand(esp, count * kDoubleSize), 103 XMMRegister::FromAllocationIndex(save_iterator.Current())); 104 save_iterator.Advance(); 105 count++; 106 } 107 } 108 109 110 void LCodeGen::RestoreCallerDoubles() { 111 ASSERT(info()->saves_caller_doubles()); 112 ASSERT(NeedsEagerFrame()); 113 Comment(";;; Restore clobbered callee double registers"); 114 BitVector* doubles = chunk()->allocated_double_registers(); 115 BitVector::Iterator save_iterator(doubles); 116 int count = 0; 117 while (!save_iterator.Done()) { 118 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), 119 MemOperand(esp, count * kDoubleSize)); 120 save_iterator.Advance(); 121 count++; 122 } 123 } 124 125 126 bool LCodeGen::GeneratePrologue() { 127 ASSERT(is_generating()); 128 129 if (info()->IsOptimizing()) { 130 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 131 132 #ifdef DEBUG 133 if (strlen(FLAG_stop_at) > 0 && 134 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 135 __ int3(); 136 } 137 #endif 138 139 // Sloppy mode functions and builtins need to replace the receiver with the 140 // global proxy when called as functions (without an explicit receiver 141 // object). 142 if (info_->this_has_uses() && 143 info_->strict_mode() == SLOPPY && 144 !info_->is_native()) { 145 Label ok; 146 // +1 for return address. 147 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; 148 __ mov(ecx, Operand(esp, receiver_offset)); 149 150 __ cmp(ecx, isolate()->factory()->undefined_value()); 151 __ j(not_equal, &ok, Label::kNear); 152 153 __ mov(ecx, GlobalObjectOperand()); 154 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset)); 155 156 __ mov(Operand(esp, receiver_offset), ecx); 157 158 __ bind(&ok); 159 } 160 161 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { 162 // Move state of dynamic frame alignment into edx. 163 __ Move(edx, Immediate(kNoAlignmentPadding)); 164 165 Label do_not_pad, align_loop; 166 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); 167 // Align esp + 4 to a multiple of 2 * kPointerSize. 168 __ test(esp, Immediate(kPointerSize)); 169 __ j(not_zero, &do_not_pad, Label::kNear); 170 __ push(Immediate(0)); 171 __ mov(ebx, esp); 172 __ mov(edx, Immediate(kAlignmentPaddingPushed)); 173 // Copy arguments, receiver, and return address. 174 __ mov(ecx, Immediate(scope()->num_parameters() + 2)); 175 176 __ bind(&align_loop); 177 __ mov(eax, Operand(ebx, 1 * kPointerSize)); 178 __ mov(Operand(ebx, 0), eax); 179 __ add(Operand(ebx), Immediate(kPointerSize)); 180 __ dec(ecx); 181 __ j(not_zero, &align_loop, Label::kNear); 182 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); 183 __ bind(&do_not_pad); 184 } 185 } 186 187 info()->set_prologue_offset(masm_->pc_offset()); 188 if (NeedsEagerFrame()) { 189 ASSERT(!frame_is_built_); 190 frame_is_built_ = true; 191 if (info()->IsStub()) { 192 __ StubPrologue(); 193 } else { 194 __ Prologue(info()->IsCodePreAgingActive()); 195 } 196 info()->AddNoFrameRange(0, masm_->pc_offset()); 197 } 198 199 if (info()->IsOptimizing() && 200 dynamic_frame_alignment_ && 201 FLAG_debug_code) { 202 __ test(esp, Immediate(kPointerSize)); 203 __ Assert(zero, kFrameIsExpectedToBeAligned); 204 } 205 206 // Reserve space for the stack slots needed by the code. 207 int slots = GetStackSlotCount(); 208 ASSERT(slots != 0 || !info()->IsOptimizing()); 209 if (slots > 0) { 210 if (slots == 1) { 211 if (dynamic_frame_alignment_) { 212 __ push(edx); 213 } else { 214 __ push(Immediate(kNoAlignmentPadding)); 215 } 216 } else { 217 if (FLAG_debug_code) { 218 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 219 #ifdef _MSC_VER 220 MakeSureStackPagesMapped(slots * kPointerSize); 221 #endif 222 __ push(eax); 223 __ mov(Operand(eax), Immediate(slots)); 224 Label loop; 225 __ bind(&loop); 226 __ mov(MemOperand(esp, eax, times_4, 0), 227 Immediate(kSlotsZapValue)); 228 __ dec(eax); 229 __ j(not_zero, &loop); 230 __ pop(eax); 231 } else { 232 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 233 #ifdef _MSC_VER 234 MakeSureStackPagesMapped(slots * kPointerSize); 235 #endif 236 } 237 238 if (support_aligned_spilled_doubles_) { 239 Comment(";;; Store dynamic frame alignment tag for spilled doubles"); 240 // Store dynamic frame alignment state in the first local. 241 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; 242 if (dynamic_frame_alignment_) { 243 __ mov(Operand(ebp, offset), edx); 244 } else { 245 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); 246 } 247 } 248 } 249 250 if (info()->saves_caller_doubles()) SaveCallerDoubles(); 251 } 252 253 // Possibly allocate a local context. 254 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 255 if (heap_slots > 0) { 256 Comment(";;; Allocate local context"); 257 bool need_write_barrier = true; 258 // Argument to NewContext is the function, which is still in edi. 259 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 260 FastNewContextStub stub(isolate(), heap_slots); 261 __ CallStub(&stub); 262 // Result of FastNewContextStub is always in new space. 263 need_write_barrier = false; 264 } else { 265 __ push(edi); 266 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); 267 } 268 RecordSafepoint(Safepoint::kNoLazyDeopt); 269 // Context is returned in eax. It replaces the context passed to us. 270 // It's saved in the stack and kept live in esi. 271 __ mov(esi, eax); 272 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax); 273 274 // Copy parameters into context if necessary. 275 int num_parameters = scope()->num_parameters(); 276 for (int i = 0; i < num_parameters; i++) { 277 Variable* var = scope()->parameter(i); 278 if (var->IsContextSlot()) { 279 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 280 (num_parameters - 1 - i) * kPointerSize; 281 // Load parameter from stack. 282 __ mov(eax, Operand(ebp, parameter_offset)); 283 // Store it in the context. 284 int context_offset = Context::SlotOffset(var->index()); 285 __ mov(Operand(esi, context_offset), eax); 286 // Update the write barrier. This clobbers eax and ebx. 287 if (need_write_barrier) { 288 __ RecordWriteContextSlot(esi, 289 context_offset, 290 eax, 291 ebx, 292 kDontSaveFPRegs); 293 } else if (FLAG_debug_code) { 294 Label done; 295 __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); 296 __ Abort(kExpectedNewSpaceObject); 297 __ bind(&done); 298 } 299 } 300 } 301 Comment(";;; End allocate local context"); 302 } 303 304 // Trace the call. 305 if (FLAG_trace && info()->IsOptimizing()) { 306 // We have not executed any compiled code yet, so esi still holds the 307 // incoming context. 308 __ CallRuntime(Runtime::kTraceEnter, 0); 309 } 310 return !is_aborted(); 311 } 312 313 314 void LCodeGen::GenerateOsrPrologue() { 315 // Generate the OSR entry prologue at the first unknown OSR value, or if there 316 // are none, at the OSR entrypoint instruction. 317 if (osr_pc_offset_ >= 0) return; 318 319 osr_pc_offset_ = masm()->pc_offset(); 320 321 // Move state of dynamic frame alignment into edx. 322 __ Move(edx, Immediate(kNoAlignmentPadding)); 323 324 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { 325 Label do_not_pad, align_loop; 326 // Align ebp + 4 to a multiple of 2 * kPointerSize. 327 __ test(ebp, Immediate(kPointerSize)); 328 __ j(zero, &do_not_pad, Label::kNear); 329 __ push(Immediate(0)); 330 __ mov(ebx, esp); 331 __ mov(edx, Immediate(kAlignmentPaddingPushed)); 332 333 // Move all parts of the frame over one word. The frame consists of: 334 // unoptimized frame slots, alignment state, context, frame pointer, return 335 // address, receiver, and the arguments. 336 __ mov(ecx, Immediate(scope()->num_parameters() + 337 5 + graph()->osr()->UnoptimizedFrameSlots())); 338 339 __ bind(&align_loop); 340 __ mov(eax, Operand(ebx, 1 * kPointerSize)); 341 __ mov(Operand(ebx, 0), eax); 342 __ add(Operand(ebx), Immediate(kPointerSize)); 343 __ dec(ecx); 344 __ j(not_zero, &align_loop, Label::kNear); 345 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); 346 __ sub(Operand(ebp), Immediate(kPointerSize)); 347 __ bind(&do_not_pad); 348 } 349 350 // Save the first local, which is overwritten by the alignment state. 351 Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize); 352 __ push(alignment_loc); 353 354 // Set the dynamic frame alignment state. 355 __ mov(alignment_loc, edx); 356 357 // Adjust the frame size, subsuming the unoptimized frame into the 358 // optimized frame. 359 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 360 ASSERT(slots >= 1); 361 __ sub(esp, Immediate((slots - 1) * kPointerSize)); 362 } 363 364 365 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 366 if (instr->IsCall()) { 367 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 368 } 369 if (!instr->IsLazyBailout() && !instr->IsGap()) { 370 safepoints_.BumpLastLazySafepointIndex(); 371 } 372 } 373 374 375 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { } 376 377 378 bool LCodeGen::GenerateJumpTable() { 379 Label needs_frame; 380 if (jump_table_.length() > 0) { 381 Comment(";;; -------------------- Jump table --------------------"); 382 } 383 for (int i = 0; i < jump_table_.length(); i++) { 384 __ bind(&jump_table_[i].label); 385 Address entry = jump_table_[i].address; 386 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; 387 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 388 if (id == Deoptimizer::kNotDeoptimizationEntry) { 389 Comment(";;; jump table entry %d.", i); 390 } else { 391 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 392 } 393 if (jump_table_[i].needs_frame) { 394 ASSERT(!info()->saves_caller_doubles()); 395 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); 396 if (needs_frame.is_bound()) { 397 __ jmp(&needs_frame); 398 } else { 399 __ bind(&needs_frame); 400 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); 401 // This variant of deopt can only be used with stubs. Since we don't 402 // have a function pointer to install in the stack frame that we're 403 // building, install a special marker there instead. 404 ASSERT(info()->IsStub()); 405 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); 406 // Push a PC inside the function so that the deopt code can find where 407 // the deopt comes from. It doesn't have to be the precise return 408 // address of a "calling" LAZY deopt, it only has to be somewhere 409 // inside the code body. 410 Label push_approx_pc; 411 __ call(&push_approx_pc); 412 __ bind(&push_approx_pc); 413 // Push the continuation which was stashed were the ebp should 414 // be. Replace it with the saved ebp. 415 __ push(MemOperand(esp, 3 * kPointerSize)); 416 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); 417 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); 418 __ ret(0); // Call the continuation without clobbering registers. 419 } 420 } else { 421 if (info()->saves_caller_doubles()) RestoreCallerDoubles(); 422 __ call(entry, RelocInfo::RUNTIME_ENTRY); 423 } 424 } 425 return !is_aborted(); 426 } 427 428 429 bool LCodeGen::GenerateDeferredCode() { 430 ASSERT(is_generating()); 431 if (deferred_.length() > 0) { 432 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 433 LDeferredCode* code = deferred_[i]; 434 435 HValue* value = 436 instructions_->at(code->instruction_index())->hydrogen_value(); 437 RecordAndWritePosition( 438 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 439 440 Comment(";;; <@%d,#%d> " 441 "-------------------- Deferred %s --------------------", 442 code->instruction_index(), 443 code->instr()->hydrogen_value()->id(), 444 code->instr()->Mnemonic()); 445 __ bind(code->entry()); 446 if (NeedsDeferredFrame()) { 447 Comment(";;; Build frame"); 448 ASSERT(!frame_is_built_); 449 ASSERT(info()->IsStub()); 450 frame_is_built_ = true; 451 // Build the frame in such a way that esi isn't trashed. 452 __ push(ebp); // Caller's frame pointer. 453 __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); 454 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); 455 __ lea(ebp, Operand(esp, 2 * kPointerSize)); 456 Comment(";;; Deferred code"); 457 } 458 code->Generate(); 459 if (NeedsDeferredFrame()) { 460 __ bind(code->done()); 461 Comment(";;; Destroy frame"); 462 ASSERT(frame_is_built_); 463 frame_is_built_ = false; 464 __ mov(esp, ebp); 465 __ pop(ebp); 466 } 467 __ jmp(code->exit()); 468 } 469 } 470 471 // Deferred code is the last part of the instruction sequence. Mark 472 // the generated code as done unless we bailed out. 473 if (!is_aborted()) status_ = DONE; 474 return !is_aborted(); 475 } 476 477 478 bool LCodeGen::GenerateSafepointTable() { 479 ASSERT(is_done()); 480 if (!info()->IsStub()) { 481 // For lazy deoptimization we need space to patch a call after every call. 482 // Ensure there is always space for such patching, even if the code ends 483 // in a call. 484 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); 485 while (masm()->pc_offset() < target_offset) { 486 masm()->nop(); 487 } 488 } 489 safepoints_.Emit(masm(), GetStackSlotCount()); 490 return !is_aborted(); 491 } 492 493 494 Register LCodeGen::ToRegister(int index) const { 495 return Register::FromAllocationIndex(index); 496 } 497 498 499 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 500 return XMMRegister::FromAllocationIndex(index); 501 } 502 503 504 Register LCodeGen::ToRegister(LOperand* op) const { 505 ASSERT(op->IsRegister()); 506 return ToRegister(op->index()); 507 } 508 509 510 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 511 ASSERT(op->IsDoubleRegister()); 512 return ToDoubleRegister(op->index()); 513 } 514 515 516 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 517 return ToRepresentation(op, Representation::Integer32()); 518 } 519 520 521 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 522 const Representation& r) const { 523 HConstant* constant = chunk_->LookupConstant(op); 524 int32_t value = constant->Integer32Value(); 525 if (r.IsInteger32()) return value; 526 ASSERT(r.IsSmiOrTagged()); 527 return reinterpret_cast<int32_t>(Smi::FromInt(value)); 528 } 529 530 531 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 532 HConstant* constant = chunk_->LookupConstant(op); 533 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 534 return constant->handle(isolate()); 535 } 536 537 538 double LCodeGen::ToDouble(LConstantOperand* op) const { 539 HConstant* constant = chunk_->LookupConstant(op); 540 ASSERT(constant->HasDoubleValue()); 541 return constant->DoubleValue(); 542 } 543 544 545 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { 546 HConstant* constant = chunk_->LookupConstant(op); 547 ASSERT(constant->HasExternalReferenceValue()); 548 return constant->ExternalReferenceValue(); 549 } 550 551 552 bool LCodeGen::IsInteger32(LConstantOperand* op) const { 553 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 554 } 555 556 557 bool LCodeGen::IsSmi(LConstantOperand* op) const { 558 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 559 } 560 561 562 static int ArgumentsOffsetWithoutFrame(int index) { 563 ASSERT(index < 0); 564 return -(index + 1) * kPointerSize + kPCOnStackSize; 565 } 566 567 568 Operand LCodeGen::ToOperand(LOperand* op) const { 569 if (op->IsRegister()) return Operand(ToRegister(op)); 570 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); 571 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 572 if (NeedsEagerFrame()) { 573 return Operand(ebp, StackSlotOffset(op->index())); 574 } else { 575 // Retrieve parameter without eager stack-frame relative to the 576 // stack-pointer. 577 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); 578 } 579 } 580 581 582 Operand LCodeGen::HighOperand(LOperand* op) { 583 ASSERT(op->IsDoubleStackSlot()); 584 if (NeedsEagerFrame()) { 585 return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize); 586 } else { 587 // Retrieve parameter without eager stack-frame relative to the 588 // stack-pointer. 589 return Operand( 590 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); 591 } 592 } 593 594 595 void LCodeGen::WriteTranslation(LEnvironment* environment, 596 Translation* translation) { 597 if (environment == NULL) return; 598 599 // The translation includes one command per value in the environment. 600 int translation_size = environment->translation_size(); 601 // The output frame height does not include the parameters. 602 int height = translation_size - environment->parameter_count(); 603 604 WriteTranslation(environment->outer(), translation); 605 bool has_closure_id = !info()->closure().is_null() && 606 !info()->closure().is_identical_to(environment->closure()); 607 int closure_id = has_closure_id 608 ? DefineDeoptimizationLiteral(environment->closure()) 609 : Translation::kSelfLiteralId; 610 switch (environment->frame_type()) { 611 case JS_FUNCTION: 612 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 613 break; 614 case JS_CONSTRUCT: 615 translation->BeginConstructStubFrame(closure_id, translation_size); 616 break; 617 case JS_GETTER: 618 ASSERT(translation_size == 1); 619 ASSERT(height == 0); 620 translation->BeginGetterStubFrame(closure_id); 621 break; 622 case JS_SETTER: 623 ASSERT(translation_size == 2); 624 ASSERT(height == 0); 625 translation->BeginSetterStubFrame(closure_id); 626 break; 627 case ARGUMENTS_ADAPTOR: 628 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 629 break; 630 case STUB: 631 translation->BeginCompiledStubFrame(); 632 break; 633 default: 634 UNREACHABLE(); 635 } 636 637 int object_index = 0; 638 int dematerialized_index = 0; 639 for (int i = 0; i < translation_size; ++i) { 640 LOperand* value = environment->values()->at(i); 641 AddToTranslation(environment, 642 translation, 643 value, 644 environment->HasTaggedValueAt(i), 645 environment->HasUint32ValueAt(i), 646 &object_index, 647 &dematerialized_index); 648 } 649 } 650 651 652 void LCodeGen::AddToTranslation(LEnvironment* environment, 653 Translation* translation, 654 LOperand* op, 655 bool is_tagged, 656 bool is_uint32, 657 int* object_index_pointer, 658 int* dematerialized_index_pointer) { 659 if (op == LEnvironment::materialization_marker()) { 660 int object_index = (*object_index_pointer)++; 661 if (environment->ObjectIsDuplicateAt(object_index)) { 662 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 663 translation->DuplicateObject(dupe_of); 664 return; 665 } 666 int object_length = environment->ObjectLengthAt(object_index); 667 if (environment->ObjectIsArgumentsAt(object_index)) { 668 translation->BeginArgumentsObject(object_length); 669 } else { 670 translation->BeginCapturedObject(object_length); 671 } 672 int dematerialized_index = *dematerialized_index_pointer; 673 int env_offset = environment->translation_size() + dematerialized_index; 674 *dematerialized_index_pointer += object_length; 675 for (int i = 0; i < object_length; ++i) { 676 LOperand* value = environment->values()->at(env_offset + i); 677 AddToTranslation(environment, 678 translation, 679 value, 680 environment->HasTaggedValueAt(env_offset + i), 681 environment->HasUint32ValueAt(env_offset + i), 682 object_index_pointer, 683 dematerialized_index_pointer); 684 } 685 return; 686 } 687 688 if (op->IsStackSlot()) { 689 if (is_tagged) { 690 translation->StoreStackSlot(op->index()); 691 } else if (is_uint32) { 692 translation->StoreUint32StackSlot(op->index()); 693 } else { 694 translation->StoreInt32StackSlot(op->index()); 695 } 696 } else if (op->IsDoubleStackSlot()) { 697 translation->StoreDoubleStackSlot(op->index()); 698 } else if (op->IsRegister()) { 699 Register reg = ToRegister(op); 700 if (is_tagged) { 701 translation->StoreRegister(reg); 702 } else if (is_uint32) { 703 translation->StoreUint32Register(reg); 704 } else { 705 translation->StoreInt32Register(reg); 706 } 707 } else if (op->IsDoubleRegister()) { 708 XMMRegister reg = ToDoubleRegister(op); 709 translation->StoreDoubleRegister(reg); 710 } else if (op->IsConstantOperand()) { 711 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 712 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 713 translation->StoreLiteral(src_index); 714 } else { 715 UNREACHABLE(); 716 } 717 } 718 719 720 void LCodeGen::CallCodeGeneric(Handle<Code> code, 721 RelocInfo::Mode mode, 722 LInstruction* instr, 723 SafepointMode safepoint_mode) { 724 ASSERT(instr != NULL); 725 __ call(code, mode); 726 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 727 728 // Signal that we don't inline smi code before these stubs in the 729 // optimizing code generator. 730 if (code->kind() == Code::BINARY_OP_IC || 731 code->kind() == Code::COMPARE_IC) { 732 __ nop(); 733 } 734 } 735 736 737 void LCodeGen::CallCode(Handle<Code> code, 738 RelocInfo::Mode mode, 739 LInstruction* instr) { 740 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); 741 } 742 743 744 void LCodeGen::CallRuntime(const Runtime::Function* fun, 745 int argc, 746 LInstruction* instr, 747 SaveFPRegsMode save_doubles) { 748 ASSERT(instr != NULL); 749 ASSERT(instr->HasPointerMap()); 750 751 __ CallRuntime(fun, argc, save_doubles); 752 753 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 754 755 ASSERT(info()->is_calling()); 756 } 757 758 759 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 760 if (context->IsRegister()) { 761 if (!ToRegister(context).is(esi)) { 762 __ mov(esi, ToRegister(context)); 763 } 764 } else if (context->IsStackSlot()) { 765 __ mov(esi, ToOperand(context)); 766 } else if (context->IsConstantOperand()) { 767 HConstant* constant = 768 chunk_->LookupConstant(LConstantOperand::cast(context)); 769 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate()))); 770 } else { 771 UNREACHABLE(); 772 } 773 } 774 775 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 776 int argc, 777 LInstruction* instr, 778 LOperand* context) { 779 LoadContextFromDeferred(context); 780 781 __ CallRuntimeSaveDoubles(id); 782 RecordSafepointWithRegisters( 783 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 784 785 ASSERT(info()->is_calling()); 786 } 787 788 789 void LCodeGen::RegisterEnvironmentForDeoptimization( 790 LEnvironment* environment, Safepoint::DeoptMode mode) { 791 environment->set_has_been_used(); 792 if (!environment->HasBeenRegistered()) { 793 // Physical stack frame layout: 794 // -x ............. -4 0 ..................................... y 795 // [incoming arguments] [spill slots] [pushed outgoing arguments] 796 797 // Layout of the environment: 798 // 0 ..................................................... size-1 799 // [parameters] [locals] [expression stack including arguments] 800 801 // Layout of the translation: 802 // 0 ........................................................ size - 1 + 4 803 // [expression stack including arguments] [locals] [4 words] [parameters] 804 // |>------------ translation_size ------------<| 805 806 int frame_count = 0; 807 int jsframe_count = 0; 808 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 809 ++frame_count; 810 if (e->frame_type() == JS_FUNCTION) { 811 ++jsframe_count; 812 } 813 } 814 Translation translation(&translations_, frame_count, jsframe_count, zone()); 815 WriteTranslation(environment, &translation); 816 int deoptimization_index = deoptimizations_.length(); 817 int pc_offset = masm()->pc_offset(); 818 environment->Register(deoptimization_index, 819 translation.index(), 820 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 821 deoptimizations_.Add(environment, zone()); 822 } 823 } 824 825 826 void LCodeGen::DeoptimizeIf(Condition cc, 827 LEnvironment* environment, 828 Deoptimizer::BailoutType bailout_type) { 829 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 830 ASSERT(environment->HasBeenRegistered()); 831 int id = environment->deoptimization_index(); 832 ASSERT(info()->IsOptimizing() || info()->IsStub()); 833 Address entry = 834 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 835 if (entry == NULL) { 836 Abort(kBailoutWasNotPrepared); 837 return; 838 } 839 840 if (DeoptEveryNTimes()) { 841 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 842 Label no_deopt; 843 __ pushfd(); 844 __ push(eax); 845 __ mov(eax, Operand::StaticVariable(count)); 846 __ sub(eax, Immediate(1)); 847 __ j(not_zero, &no_deopt, Label::kNear); 848 if (FLAG_trap_on_deopt) __ int3(); 849 __ mov(eax, Immediate(FLAG_deopt_every_n_times)); 850 __ mov(Operand::StaticVariable(count), eax); 851 __ pop(eax); 852 __ popfd(); 853 ASSERT(frame_is_built_); 854 __ call(entry, RelocInfo::RUNTIME_ENTRY); 855 __ bind(&no_deopt); 856 __ mov(Operand::StaticVariable(count), eax); 857 __ pop(eax); 858 __ popfd(); 859 } 860 861 if (info()->ShouldTrapOnDeopt()) { 862 Label done; 863 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); 864 __ int3(); 865 __ bind(&done); 866 } 867 868 ASSERT(info()->IsStub() || frame_is_built_); 869 if (cc == no_condition && frame_is_built_) { 870 __ call(entry, RelocInfo::RUNTIME_ENTRY); 871 } else { 872 // We often have several deopts to the same entry, reuse the last 873 // jump entry if this is the case. 874 if (jump_table_.is_empty() || 875 jump_table_.last().address != entry || 876 jump_table_.last().needs_frame != !frame_is_built_ || 877 jump_table_.last().bailout_type != bailout_type) { 878 Deoptimizer::JumpTableEntry table_entry(entry, 879 bailout_type, 880 !frame_is_built_); 881 jump_table_.Add(table_entry, zone()); 882 } 883 if (cc == no_condition) { 884 __ jmp(&jump_table_.last().label); 885 } else { 886 __ j(cc, &jump_table_.last().label); 887 } 888 } 889 } 890 891 892 void LCodeGen::DeoptimizeIf(Condition cc, 893 LEnvironment* environment) { 894 Deoptimizer::BailoutType bailout_type = info()->IsStub() 895 ? Deoptimizer::LAZY 896 : Deoptimizer::EAGER; 897 DeoptimizeIf(cc, environment, bailout_type); 898 } 899 900 901 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 902 int length = deoptimizations_.length(); 903 if (length == 0) return; 904 Handle<DeoptimizationInputData> data = 905 DeoptimizationInputData::New(isolate(), length, TENURED); 906 907 Handle<ByteArray> translations = 908 translations_.CreateByteArray(isolate()->factory()); 909 data->SetTranslationByteArray(*translations); 910 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); 911 data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); 912 if (info_->IsOptimizing()) { 913 // Reference to shared function info does not change between phases. 914 AllowDeferredHandleDereference allow_handle_dereference; 915 data->SetSharedFunctionInfo(*info_->shared_info()); 916 } else { 917 data->SetSharedFunctionInfo(Smi::FromInt(0)); 918 } 919 920 Handle<FixedArray> literals = 921 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); 922 { AllowDeferredHandleDereference copy_handles; 923 for (int i = 0; i < deoptimization_literals_.length(); i++) { 924 literals->set(i, *deoptimization_literals_[i]); 925 } 926 data->SetLiteralArray(*literals); 927 } 928 929 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); 930 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); 931 932 // Populate the deoptimization entries. 933 for (int i = 0; i < length; i++) { 934 LEnvironment* env = deoptimizations_[i]; 935 data->SetAstId(i, env->ast_id()); 936 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); 937 data->SetArgumentsStackHeight(i, 938 Smi::FromInt(env->arguments_stack_height())); 939 data->SetPc(i, Smi::FromInt(env->pc_offset())); 940 } 941 code->set_deoptimization_data(*data); 942 } 943 944 945 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { 946 int result = deoptimization_literals_.length(); 947 for (int i = 0; i < deoptimization_literals_.length(); ++i) { 948 if (deoptimization_literals_[i].is_identical_to(literal)) return i; 949 } 950 deoptimization_literals_.Add(literal, zone()); 951 return result; 952 } 953 954 955 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { 956 ASSERT(deoptimization_literals_.length() == 0); 957 958 const ZoneList<Handle<JSFunction> >* inlined_closures = 959 chunk()->inlined_closures(); 960 961 for (int i = 0, length = inlined_closures->length(); 962 i < length; 963 i++) { 964 DefineDeoptimizationLiteral(inlined_closures->at(i)); 965 } 966 967 inlined_function_count_ = deoptimization_literals_.length(); 968 } 969 970 971 void LCodeGen::RecordSafepointWithLazyDeopt( 972 LInstruction* instr, SafepointMode safepoint_mode) { 973 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 974 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 975 } else { 976 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 977 RecordSafepointWithRegisters( 978 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 979 } 980 } 981 982 983 void LCodeGen::RecordSafepoint( 984 LPointerMap* pointers, 985 Safepoint::Kind kind, 986 int arguments, 987 Safepoint::DeoptMode deopt_mode) { 988 ASSERT(kind == expected_safepoint_kind_); 989 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 990 Safepoint safepoint = 991 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); 992 for (int i = 0; i < operands->length(); i++) { 993 LOperand* pointer = operands->at(i); 994 if (pointer->IsStackSlot()) { 995 safepoint.DefinePointerSlot(pointer->index(), zone()); 996 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 997 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 998 } 999 } 1000 } 1001 1002 1003 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 1004 Safepoint::DeoptMode mode) { 1005 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode); 1006 } 1007 1008 1009 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) { 1010 LPointerMap empty_pointers(zone()); 1011 RecordSafepoint(&empty_pointers, mode); 1012 } 1013 1014 1015 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 1016 int arguments, 1017 Safepoint::DeoptMode mode) { 1018 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode); 1019 } 1020 1021 1022 void LCodeGen::RecordAndWritePosition(int position) { 1023 if (position == RelocInfo::kNoPosition) return; 1024 masm()->positions_recorder()->RecordPosition(position); 1025 masm()->positions_recorder()->WriteRecordedPositions(); 1026 } 1027 1028 1029 static const char* LabelType(LLabel* label) { 1030 if (label->is_loop_header()) return " (loop header)"; 1031 if (label->is_osr_entry()) return " (OSR entry)"; 1032 return ""; 1033 } 1034 1035 1036 void LCodeGen::DoLabel(LLabel* label) { 1037 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 1038 current_instruction_, 1039 label->hydrogen_value()->id(), 1040 label->block_id(), 1041 LabelType(label)); 1042 __ bind(label->label()); 1043 current_block_ = label->block_id(); 1044 DoGap(label); 1045 } 1046 1047 1048 void LCodeGen::DoParallelMove(LParallelMove* move) { 1049 resolver_.Resolve(move); 1050 } 1051 1052 1053 void LCodeGen::DoGap(LGap* gap) { 1054 for (int i = LGap::FIRST_INNER_POSITION; 1055 i <= LGap::LAST_INNER_POSITION; 1056 i++) { 1057 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 1058 LParallelMove* move = gap->GetParallelMove(inner_pos); 1059 if (move != NULL) DoParallelMove(move); 1060 } 1061 } 1062 1063 1064 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 1065 DoGap(instr); 1066 } 1067 1068 1069 void LCodeGen::DoParameter(LParameter* instr) { 1070 // Nothing to do. 1071 } 1072 1073 1074 void LCodeGen::DoCallStub(LCallStub* instr) { 1075 ASSERT(ToRegister(instr->context()).is(esi)); 1076 ASSERT(ToRegister(instr->result()).is(eax)); 1077 switch (instr->hydrogen()->major_key()) { 1078 case CodeStub::RegExpExec: { 1079 RegExpExecStub stub(isolate()); 1080 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1081 break; 1082 } 1083 case CodeStub::SubString: { 1084 SubStringStub stub(isolate()); 1085 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1086 break; 1087 } 1088 case CodeStub::StringCompare: { 1089 StringCompareStub stub(isolate()); 1090 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1091 break; 1092 } 1093 default: 1094 UNREACHABLE(); 1095 } 1096 } 1097 1098 1099 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 1100 GenerateOsrPrologue(); 1101 } 1102 1103 1104 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { 1105 Register dividend = ToRegister(instr->dividend()); 1106 int32_t divisor = instr->divisor(); 1107 ASSERT(dividend.is(ToRegister(instr->result()))); 1108 1109 // Theoretically, a variation of the branch-free code for integer division by 1110 // a power of 2 (calculating the remainder via an additional multiplication 1111 // (which gets simplified to an 'and') and subtraction) should be faster, and 1112 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 1113 // indicate that positive dividends are heavily favored, so the branching 1114 // version performs better. 1115 HMod* hmod = instr->hydrogen(); 1116 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1117 Label dividend_is_not_negative, done; 1118 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 1119 __ test(dividend, dividend); 1120 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); 1121 // Note that this is correct even for kMinInt operands. 1122 __ neg(dividend); 1123 __ and_(dividend, mask); 1124 __ neg(dividend); 1125 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1126 DeoptimizeIf(zero, instr->environment()); 1127 } 1128 __ jmp(&done, Label::kNear); 1129 } 1130 1131 __ bind(÷nd_is_not_negative); 1132 __ and_(dividend, mask); 1133 __ bind(&done); 1134 } 1135 1136 1137 void LCodeGen::DoModByConstI(LModByConstI* instr) { 1138 Register dividend = ToRegister(instr->dividend()); 1139 int32_t divisor = instr->divisor(); 1140 ASSERT(ToRegister(instr->result()).is(eax)); 1141 1142 if (divisor == 0) { 1143 DeoptimizeIf(no_condition, instr->environment()); 1144 return; 1145 } 1146 1147 __ TruncatingDiv(dividend, Abs(divisor)); 1148 __ imul(edx, edx, Abs(divisor)); 1149 __ mov(eax, dividend); 1150 __ sub(eax, edx); 1151 1152 // Check for negative zero. 1153 HMod* hmod = instr->hydrogen(); 1154 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1155 Label remainder_not_zero; 1156 __ j(not_zero, &remainder_not_zero, Label::kNear); 1157 __ cmp(dividend, Immediate(0)); 1158 DeoptimizeIf(less, instr->environment()); 1159 __ bind(&remainder_not_zero); 1160 } 1161 } 1162 1163 1164 void LCodeGen::DoModI(LModI* instr) { 1165 HMod* hmod = instr->hydrogen(); 1166 1167 Register left_reg = ToRegister(instr->left()); 1168 ASSERT(left_reg.is(eax)); 1169 Register right_reg = ToRegister(instr->right()); 1170 ASSERT(!right_reg.is(eax)); 1171 ASSERT(!right_reg.is(edx)); 1172 Register result_reg = ToRegister(instr->result()); 1173 ASSERT(result_reg.is(edx)); 1174 1175 Label done; 1176 // Check for x % 0, idiv would signal a divide error. We have to 1177 // deopt in this case because we can't return a NaN. 1178 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1179 __ test(right_reg, Operand(right_reg)); 1180 DeoptimizeIf(zero, instr->environment()); 1181 } 1182 1183 // Check for kMinInt % -1, idiv would signal a divide error. We 1184 // have to deopt if we care about -0, because we can't return that. 1185 if (hmod->CheckFlag(HValue::kCanOverflow)) { 1186 Label no_overflow_possible; 1187 __ cmp(left_reg, kMinInt); 1188 __ j(not_equal, &no_overflow_possible, Label::kNear); 1189 __ cmp(right_reg, -1); 1190 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1191 DeoptimizeIf(equal, instr->environment()); 1192 } else { 1193 __ j(not_equal, &no_overflow_possible, Label::kNear); 1194 __ Move(result_reg, Immediate(0)); 1195 __ jmp(&done, Label::kNear); 1196 } 1197 __ bind(&no_overflow_possible); 1198 } 1199 1200 // Sign extend dividend in eax into edx:eax. 1201 __ cdq(); 1202 1203 // If we care about -0, test if the dividend is <0 and the result is 0. 1204 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1205 Label positive_left; 1206 __ test(left_reg, Operand(left_reg)); 1207 __ j(not_sign, &positive_left, Label::kNear); 1208 __ idiv(right_reg); 1209 __ test(result_reg, Operand(result_reg)); 1210 DeoptimizeIf(zero, instr->environment()); 1211 __ jmp(&done, Label::kNear); 1212 __ bind(&positive_left); 1213 } 1214 __ idiv(right_reg); 1215 __ bind(&done); 1216 } 1217 1218 1219 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1220 Register dividend = ToRegister(instr->dividend()); 1221 int32_t divisor = instr->divisor(); 1222 Register result = ToRegister(instr->result()); 1223 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor))); 1224 ASSERT(!result.is(dividend)); 1225 1226 // Check for (0 / -x) that will produce negative zero. 1227 HDiv* hdiv = instr->hydrogen(); 1228 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1229 __ test(dividend, dividend); 1230 DeoptimizeIf(zero, instr->environment()); 1231 } 1232 // Check for (kMinInt / -1). 1233 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1234 __ cmp(dividend, kMinInt); 1235 DeoptimizeIf(zero, instr->environment()); 1236 } 1237 // Deoptimize if remainder will not be 0. 1238 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1239 divisor != 1 && divisor != -1) { 1240 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1241 __ test(dividend, Immediate(mask)); 1242 DeoptimizeIf(not_zero, instr->environment()); 1243 } 1244 __ Move(result, dividend); 1245 int32_t shift = WhichPowerOf2Abs(divisor); 1246 if (shift > 0) { 1247 // The arithmetic shift is always OK, the 'if' is an optimization only. 1248 if (shift > 1) __ sar(result, 31); 1249 __ shr(result, 32 - shift); 1250 __ add(result, dividend); 1251 __ sar(result, shift); 1252 } 1253 if (divisor < 0) __ neg(result); 1254 } 1255 1256 1257 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1258 Register dividend = ToRegister(instr->dividend()); 1259 int32_t divisor = instr->divisor(); 1260 ASSERT(ToRegister(instr->result()).is(edx)); 1261 1262 if (divisor == 0) { 1263 DeoptimizeIf(no_condition, instr->environment()); 1264 return; 1265 } 1266 1267 // Check for (0 / -x) that will produce negative zero. 1268 HDiv* hdiv = instr->hydrogen(); 1269 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1270 __ test(dividend, dividend); 1271 DeoptimizeIf(zero, instr->environment()); 1272 } 1273 1274 __ TruncatingDiv(dividend, Abs(divisor)); 1275 if (divisor < 0) __ neg(edx); 1276 1277 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1278 __ mov(eax, edx); 1279 __ imul(eax, eax, divisor); 1280 __ sub(eax, dividend); 1281 DeoptimizeIf(not_equal, instr->environment()); 1282 } 1283 } 1284 1285 1286 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1287 void LCodeGen::DoDivI(LDivI* instr) { 1288 HBinaryOperation* hdiv = instr->hydrogen(); 1289 Register dividend = ToRegister(instr->dividend()); 1290 Register divisor = ToRegister(instr->divisor()); 1291 Register remainder = ToRegister(instr->temp()); 1292 ASSERT(dividend.is(eax)); 1293 ASSERT(remainder.is(edx)); 1294 ASSERT(ToRegister(instr->result()).is(eax)); 1295 ASSERT(!divisor.is(eax)); 1296 ASSERT(!divisor.is(edx)); 1297 1298 // Check for x / 0. 1299 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1300 __ test(divisor, divisor); 1301 DeoptimizeIf(zero, instr->environment()); 1302 } 1303 1304 // Check for (0 / -x) that will produce negative zero. 1305 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1306 Label dividend_not_zero; 1307 __ test(dividend, dividend); 1308 __ j(not_zero, ÷nd_not_zero, Label::kNear); 1309 __ test(divisor, divisor); 1310 DeoptimizeIf(sign, instr->environment()); 1311 __ bind(÷nd_not_zero); 1312 } 1313 1314 // Check for (kMinInt / -1). 1315 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1316 Label dividend_not_min_int; 1317 __ cmp(dividend, kMinInt); 1318 __ j(not_zero, ÷nd_not_min_int, Label::kNear); 1319 __ cmp(divisor, -1); 1320 DeoptimizeIf(zero, instr->environment()); 1321 __ bind(÷nd_not_min_int); 1322 } 1323 1324 // Sign extend to edx (= remainder). 1325 __ cdq(); 1326 __ idiv(divisor); 1327 1328 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1329 // Deoptimize if remainder is not 0. 1330 __ test(remainder, remainder); 1331 DeoptimizeIf(not_zero, instr->environment()); 1332 } 1333 } 1334 1335 1336 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1337 Register dividend = ToRegister(instr->dividend()); 1338 int32_t divisor = instr->divisor(); 1339 ASSERT(dividend.is(ToRegister(instr->result()))); 1340 1341 // If the divisor is positive, things are easy: There can be no deopts and we 1342 // can simply do an arithmetic right shift. 1343 if (divisor == 1) return; 1344 int32_t shift = WhichPowerOf2Abs(divisor); 1345 if (divisor > 1) { 1346 __ sar(dividend, shift); 1347 return; 1348 } 1349 1350 // If the divisor is negative, we have to negate and handle edge cases. 1351 __ neg(dividend); 1352 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1353 DeoptimizeIf(zero, instr->environment()); 1354 } 1355 1356 // Dividing by -1 is basically negation, unless we overflow. 1357 if (divisor == -1) { 1358 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1359 DeoptimizeIf(overflow, instr->environment()); 1360 } 1361 return; 1362 } 1363 1364 // If the negation could not overflow, simply shifting is OK. 1365 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1366 __ sar(dividend, shift); 1367 return; 1368 } 1369 1370 Label not_kmin_int, done; 1371 __ j(no_overflow, ¬_kmin_int, Label::kNear); 1372 __ mov(dividend, Immediate(kMinInt / divisor)); 1373 __ jmp(&done, Label::kNear); 1374 __ bind(¬_kmin_int); 1375 __ sar(dividend, shift); 1376 __ bind(&done); 1377 } 1378 1379 1380 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1381 Register dividend = ToRegister(instr->dividend()); 1382 int32_t divisor = instr->divisor(); 1383 ASSERT(ToRegister(instr->result()).is(edx)); 1384 1385 if (divisor == 0) { 1386 DeoptimizeIf(no_condition, instr->environment()); 1387 return; 1388 } 1389 1390 // Check for (0 / -x) that will produce negative zero. 1391 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1392 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1393 __ test(dividend, dividend); 1394 DeoptimizeIf(zero, instr->environment()); 1395 } 1396 1397 // Easy case: We need no dynamic check for the dividend and the flooring 1398 // division is the same as the truncating division. 1399 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1400 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1401 __ TruncatingDiv(dividend, Abs(divisor)); 1402 if (divisor < 0) __ neg(edx); 1403 return; 1404 } 1405 1406 // In the general case we may need to adjust before and after the truncating 1407 // division to get a flooring division. 1408 Register temp = ToRegister(instr->temp3()); 1409 ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx)); 1410 Label needs_adjustment, done; 1411 __ cmp(dividend, Immediate(0)); 1412 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); 1413 __ TruncatingDiv(dividend, Abs(divisor)); 1414 if (divisor < 0) __ neg(edx); 1415 __ jmp(&done, Label::kNear); 1416 __ bind(&needs_adjustment); 1417 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1)); 1418 __ TruncatingDiv(temp, Abs(divisor)); 1419 if (divisor < 0) __ neg(edx); 1420 __ dec(edx); 1421 __ bind(&done); 1422 } 1423 1424 1425 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1426 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1427 HBinaryOperation* hdiv = instr->hydrogen(); 1428 Register dividend = ToRegister(instr->dividend()); 1429 Register divisor = ToRegister(instr->divisor()); 1430 Register remainder = ToRegister(instr->temp()); 1431 Register result = ToRegister(instr->result()); 1432 ASSERT(dividend.is(eax)); 1433 ASSERT(remainder.is(edx)); 1434 ASSERT(result.is(eax)); 1435 ASSERT(!divisor.is(eax)); 1436 ASSERT(!divisor.is(edx)); 1437 1438 // Check for x / 0. 1439 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1440 __ test(divisor, divisor); 1441 DeoptimizeIf(zero, instr->environment()); 1442 } 1443 1444 // Check for (0 / -x) that will produce negative zero. 1445 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1446 Label dividend_not_zero; 1447 __ test(dividend, dividend); 1448 __ j(not_zero, ÷nd_not_zero, Label::kNear); 1449 __ test(divisor, divisor); 1450 DeoptimizeIf(sign, instr->environment()); 1451 __ bind(÷nd_not_zero); 1452 } 1453 1454 // Check for (kMinInt / -1). 1455 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1456 Label dividend_not_min_int; 1457 __ cmp(dividend, kMinInt); 1458 __ j(not_zero, ÷nd_not_min_int, Label::kNear); 1459 __ cmp(divisor, -1); 1460 DeoptimizeIf(zero, instr->environment()); 1461 __ bind(÷nd_not_min_int); 1462 } 1463 1464 // Sign extend to edx (= remainder). 1465 __ cdq(); 1466 __ idiv(divisor); 1467 1468 Label done; 1469 __ test(remainder, remainder); 1470 __ j(zero, &done, Label::kNear); 1471 __ xor_(remainder, divisor); 1472 __ sar(remainder, 31); 1473 __ add(result, remainder); 1474 __ bind(&done); 1475 } 1476 1477 1478 void LCodeGen::DoMulI(LMulI* instr) { 1479 Register left = ToRegister(instr->left()); 1480 LOperand* right = instr->right(); 1481 1482 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1483 __ mov(ToRegister(instr->temp()), left); 1484 } 1485 1486 if (right->IsConstantOperand()) { 1487 // Try strength reductions on the multiplication. 1488 // All replacement instructions are at most as long as the imul 1489 // and have better latency. 1490 int constant = ToInteger32(LConstantOperand::cast(right)); 1491 if (constant == -1) { 1492 __ neg(left); 1493 } else if (constant == 0) { 1494 __ xor_(left, Operand(left)); 1495 } else if (constant == 2) { 1496 __ add(left, Operand(left)); 1497 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1498 // If we know that the multiplication can't overflow, it's safe to 1499 // use instructions that don't set the overflow flag for the 1500 // multiplication. 1501 switch (constant) { 1502 case 1: 1503 // Do nothing. 1504 break; 1505 case 3: 1506 __ lea(left, Operand(left, left, times_2, 0)); 1507 break; 1508 case 4: 1509 __ shl(left, 2); 1510 break; 1511 case 5: 1512 __ lea(left, Operand(left, left, times_4, 0)); 1513 break; 1514 case 8: 1515 __ shl(left, 3); 1516 break; 1517 case 9: 1518 __ lea(left, Operand(left, left, times_8, 0)); 1519 break; 1520 case 16: 1521 __ shl(left, 4); 1522 break; 1523 default: 1524 __ imul(left, left, constant); 1525 break; 1526 } 1527 } else { 1528 __ imul(left, left, constant); 1529 } 1530 } else { 1531 if (instr->hydrogen()->representation().IsSmi()) { 1532 __ SmiUntag(left); 1533 } 1534 __ imul(left, ToOperand(right)); 1535 } 1536 1537 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1538 DeoptimizeIf(overflow, instr->environment()); 1539 } 1540 1541 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1542 // Bail out if the result is supposed to be negative zero. 1543 Label done; 1544 __ test(left, Operand(left)); 1545 __ j(not_zero, &done, Label::kNear); 1546 if (right->IsConstantOperand()) { 1547 if (ToInteger32(LConstantOperand::cast(right)) < 0) { 1548 DeoptimizeIf(no_condition, instr->environment()); 1549 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { 1550 __ cmp(ToRegister(instr->temp()), Immediate(0)); 1551 DeoptimizeIf(less, instr->environment()); 1552 } 1553 } else { 1554 // Test the non-zero operand for negative sign. 1555 __ or_(ToRegister(instr->temp()), ToOperand(right)); 1556 DeoptimizeIf(sign, instr->environment()); 1557 } 1558 __ bind(&done); 1559 } 1560 } 1561 1562 1563 void LCodeGen::DoBitI(LBitI* instr) { 1564 LOperand* left = instr->left(); 1565 LOperand* right = instr->right(); 1566 ASSERT(left->Equals(instr->result())); 1567 ASSERT(left->IsRegister()); 1568 1569 if (right->IsConstantOperand()) { 1570 int32_t right_operand = 1571 ToRepresentation(LConstantOperand::cast(right), 1572 instr->hydrogen()->representation()); 1573 switch (instr->op()) { 1574 case Token::BIT_AND: 1575 __ and_(ToRegister(left), right_operand); 1576 break; 1577 case Token::BIT_OR: 1578 __ or_(ToRegister(left), right_operand); 1579 break; 1580 case Token::BIT_XOR: 1581 if (right_operand == int32_t(~0)) { 1582 __ not_(ToRegister(left)); 1583 } else { 1584 __ xor_(ToRegister(left), right_operand); 1585 } 1586 break; 1587 default: 1588 UNREACHABLE(); 1589 break; 1590 } 1591 } else { 1592 switch (instr->op()) { 1593 case Token::BIT_AND: 1594 __ and_(ToRegister(left), ToOperand(right)); 1595 break; 1596 case Token::BIT_OR: 1597 __ or_(ToRegister(left), ToOperand(right)); 1598 break; 1599 case Token::BIT_XOR: 1600 __ xor_(ToRegister(left), ToOperand(right)); 1601 break; 1602 default: 1603 UNREACHABLE(); 1604 break; 1605 } 1606 } 1607 } 1608 1609 1610 void LCodeGen::DoShiftI(LShiftI* instr) { 1611 LOperand* left = instr->left(); 1612 LOperand* right = instr->right(); 1613 ASSERT(left->Equals(instr->result())); 1614 ASSERT(left->IsRegister()); 1615 if (right->IsRegister()) { 1616 ASSERT(ToRegister(right).is(ecx)); 1617 1618 switch (instr->op()) { 1619 case Token::ROR: 1620 __ ror_cl(ToRegister(left)); 1621 if (instr->can_deopt()) { 1622 __ test(ToRegister(left), ToRegister(left)); 1623 DeoptimizeIf(sign, instr->environment()); 1624 } 1625 break; 1626 case Token::SAR: 1627 __ sar_cl(ToRegister(left)); 1628 break; 1629 case Token::SHR: 1630 __ shr_cl(ToRegister(left)); 1631 if (instr->can_deopt()) { 1632 __ test(ToRegister(left), ToRegister(left)); 1633 DeoptimizeIf(sign, instr->environment()); 1634 } 1635 break; 1636 case Token::SHL: 1637 __ shl_cl(ToRegister(left)); 1638 break; 1639 default: 1640 UNREACHABLE(); 1641 break; 1642 } 1643 } else { 1644 int value = ToInteger32(LConstantOperand::cast(right)); 1645 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1646 switch (instr->op()) { 1647 case Token::ROR: 1648 if (shift_count == 0 && instr->can_deopt()) { 1649 __ test(ToRegister(left), ToRegister(left)); 1650 DeoptimizeIf(sign, instr->environment()); 1651 } else { 1652 __ ror(ToRegister(left), shift_count); 1653 } 1654 break; 1655 case Token::SAR: 1656 if (shift_count != 0) { 1657 __ sar(ToRegister(left), shift_count); 1658 } 1659 break; 1660 case Token::SHR: 1661 if (shift_count != 0) { 1662 __ shr(ToRegister(left), shift_count); 1663 } else if (instr->can_deopt()) { 1664 __ test(ToRegister(left), ToRegister(left)); 1665 DeoptimizeIf(sign, instr->environment()); 1666 } 1667 break; 1668 case Token::SHL: 1669 if (shift_count != 0) { 1670 if (instr->hydrogen_value()->representation().IsSmi() && 1671 instr->can_deopt()) { 1672 if (shift_count != 1) { 1673 __ shl(ToRegister(left), shift_count - 1); 1674 } 1675 __ SmiTag(ToRegister(left)); 1676 DeoptimizeIf(overflow, instr->environment()); 1677 } else { 1678 __ shl(ToRegister(left), shift_count); 1679 } 1680 } 1681 break; 1682 default: 1683 UNREACHABLE(); 1684 break; 1685 } 1686 } 1687 } 1688 1689 1690 void LCodeGen::DoSubI(LSubI* instr) { 1691 LOperand* left = instr->left(); 1692 LOperand* right = instr->right(); 1693 ASSERT(left->Equals(instr->result())); 1694 1695 if (right->IsConstantOperand()) { 1696 __ sub(ToOperand(left), 1697 ToImmediate(right, instr->hydrogen()->representation())); 1698 } else { 1699 __ sub(ToRegister(left), ToOperand(right)); 1700 } 1701 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1702 DeoptimizeIf(overflow, instr->environment()); 1703 } 1704 } 1705 1706 1707 void LCodeGen::DoConstantI(LConstantI* instr) { 1708 __ Move(ToRegister(instr->result()), Immediate(instr->value())); 1709 } 1710 1711 1712 void LCodeGen::DoConstantS(LConstantS* instr) { 1713 __ Move(ToRegister(instr->result()), Immediate(instr->value())); 1714 } 1715 1716 1717 void LCodeGen::DoConstantD(LConstantD* instr) { 1718 double v = instr->value(); 1719 uint64_t int_val = BitCast<uint64_t, double>(v); 1720 int32_t lower = static_cast<int32_t>(int_val); 1721 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1722 ASSERT(instr->result()->IsDoubleRegister()); 1723 1724 XMMRegister res = ToDoubleRegister(instr->result()); 1725 if (int_val == 0) { 1726 __ xorps(res, res); 1727 } else { 1728 Register temp = ToRegister(instr->temp()); 1729 if (CpuFeatures::IsSupported(SSE4_1)) { 1730 CpuFeatureScope scope2(masm(), SSE4_1); 1731 if (lower != 0) { 1732 __ Move(temp, Immediate(lower)); 1733 __ movd(res, Operand(temp)); 1734 __ Move(temp, Immediate(upper)); 1735 __ pinsrd(res, Operand(temp), 1); 1736 } else { 1737 __ xorps(res, res); 1738 __ Move(temp, Immediate(upper)); 1739 __ pinsrd(res, Operand(temp), 1); 1740 } 1741 } else { 1742 __ Move(temp, Immediate(upper)); 1743 __ movd(res, Operand(temp)); 1744 __ psllq(res, 32); 1745 if (lower != 0) { 1746 XMMRegister xmm_scratch = double_scratch0(); 1747 __ Move(temp, Immediate(lower)); 1748 __ movd(xmm_scratch, Operand(temp)); 1749 __ orps(res, xmm_scratch); 1750 } 1751 } 1752 } 1753 } 1754 1755 1756 void LCodeGen::DoConstantE(LConstantE* instr) { 1757 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); 1758 } 1759 1760 1761 void LCodeGen::DoConstantT(LConstantT* instr) { 1762 Register reg = ToRegister(instr->result()); 1763 Handle<Object> object = instr->value(isolate()); 1764 AllowDeferredHandleDereference smi_check; 1765 __ LoadObject(reg, object); 1766 } 1767 1768 1769 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { 1770 Register result = ToRegister(instr->result()); 1771 Register map = ToRegister(instr->value()); 1772 __ EnumLength(result, map); 1773 } 1774 1775 1776 void LCodeGen::DoDateField(LDateField* instr) { 1777 Register object = ToRegister(instr->date()); 1778 Register result = ToRegister(instr->result()); 1779 Register scratch = ToRegister(instr->temp()); 1780 Smi* index = instr->index(); 1781 Label runtime, done; 1782 ASSERT(object.is(result)); 1783 ASSERT(object.is(eax)); 1784 1785 __ test(object, Immediate(kSmiTagMask)); 1786 DeoptimizeIf(zero, instr->environment()); 1787 __ CmpObjectType(object, JS_DATE_TYPE, scratch); 1788 DeoptimizeIf(not_equal, instr->environment()); 1789 1790 if (index->value() == 0) { 1791 __ mov(result, FieldOperand(object, JSDate::kValueOffset)); 1792 } else { 1793 if (index->value() < JSDate::kFirstUncachedField) { 1794 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 1795 __ mov(scratch, Operand::StaticVariable(stamp)); 1796 __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset)); 1797 __ j(not_equal, &runtime, Label::kNear); 1798 __ mov(result, FieldOperand(object, JSDate::kValueOffset + 1799 kPointerSize * index->value())); 1800 __ jmp(&done, Label::kNear); 1801 } 1802 __ bind(&runtime); 1803 __ PrepareCallCFunction(2, scratch); 1804 __ mov(Operand(esp, 0), object); 1805 __ mov(Operand(esp, 1 * kPointerSize), Immediate(index)); 1806 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); 1807 __ bind(&done); 1808 } 1809 } 1810 1811 1812 Operand LCodeGen::BuildSeqStringOperand(Register string, 1813 LOperand* index, 1814 String::Encoding encoding) { 1815 if (index->IsConstantOperand()) { 1816 int offset = ToRepresentation(LConstantOperand::cast(index), 1817 Representation::Integer32()); 1818 if (encoding == String::TWO_BYTE_ENCODING) { 1819 offset *= kUC16Size; 1820 } 1821 STATIC_ASSERT(kCharSize == 1); 1822 return FieldOperand(string, SeqString::kHeaderSize + offset); 1823 } 1824 return FieldOperand( 1825 string, ToRegister(index), 1826 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, 1827 SeqString::kHeaderSize); 1828 } 1829 1830 1831 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1832 String::Encoding encoding = instr->hydrogen()->encoding(); 1833 Register result = ToRegister(instr->result()); 1834 Register string = ToRegister(instr->string()); 1835 1836 if (FLAG_debug_code) { 1837 __ push(string); 1838 __ mov(string, FieldOperand(string, HeapObject::kMapOffset)); 1839 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset)); 1840 1841 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); 1842 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1843 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1844 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING 1845 ? one_byte_seq_type : two_byte_seq_type)); 1846 __ Check(equal, kUnexpectedStringType); 1847 __ pop(string); 1848 } 1849 1850 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1851 if (encoding == String::ONE_BYTE_ENCODING) { 1852 __ movzx_b(result, operand); 1853 } else { 1854 __ movzx_w(result, operand); 1855 } 1856 } 1857 1858 1859 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1860 String::Encoding encoding = instr->hydrogen()->encoding(); 1861 Register string = ToRegister(instr->string()); 1862 1863 if (FLAG_debug_code) { 1864 Register value = ToRegister(instr->value()); 1865 Register index = ToRegister(instr->index()); 1866 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1867 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1868 int encoding_mask = 1869 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 1870 ? one_byte_seq_type : two_byte_seq_type; 1871 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 1872 } 1873 1874 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1875 if (instr->value()->IsConstantOperand()) { 1876 int value = ToRepresentation(LConstantOperand::cast(instr->value()), 1877 Representation::Integer32()); 1878 ASSERT_LE(0, value); 1879 if (encoding == String::ONE_BYTE_ENCODING) { 1880 ASSERT_LE(value, String::kMaxOneByteCharCode); 1881 __ mov_b(operand, static_cast<int8_t>(value)); 1882 } else { 1883 ASSERT_LE(value, String::kMaxUtf16CodeUnit); 1884 __ mov_w(operand, static_cast<int16_t>(value)); 1885 } 1886 } else { 1887 Register value = ToRegister(instr->value()); 1888 if (encoding == String::ONE_BYTE_ENCODING) { 1889 __ mov_b(operand, value); 1890 } else { 1891 __ mov_w(operand, value); 1892 } 1893 } 1894 } 1895 1896 1897 void LCodeGen::DoAddI(LAddI* instr) { 1898 LOperand* left = instr->left(); 1899 LOperand* right = instr->right(); 1900 1901 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { 1902 if (right->IsConstantOperand()) { 1903 int32_t offset = ToRepresentation(LConstantOperand::cast(right), 1904 instr->hydrogen()->representation()); 1905 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); 1906 } else { 1907 Operand address(ToRegister(left), ToRegister(right), times_1, 0); 1908 __ lea(ToRegister(instr->result()), address); 1909 } 1910 } else { 1911 if (right->IsConstantOperand()) { 1912 __ add(ToOperand(left), 1913 ToImmediate(right, instr->hydrogen()->representation())); 1914 } else { 1915 __ add(ToRegister(left), ToOperand(right)); 1916 } 1917 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1918 DeoptimizeIf(overflow, instr->environment()); 1919 } 1920 } 1921 } 1922 1923 1924 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1925 LOperand* left = instr->left(); 1926 LOperand* right = instr->right(); 1927 ASSERT(left->Equals(instr->result())); 1928 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1929 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1930 Label return_left; 1931 Condition condition = (operation == HMathMinMax::kMathMin) 1932 ? less_equal 1933 : greater_equal; 1934 if (right->IsConstantOperand()) { 1935 Operand left_op = ToOperand(left); 1936 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()), 1937 instr->hydrogen()->representation()); 1938 __ cmp(left_op, immediate); 1939 __ j(condition, &return_left, Label::kNear); 1940 __ mov(left_op, immediate); 1941 } else { 1942 Register left_reg = ToRegister(left); 1943 Operand right_op = ToOperand(right); 1944 __ cmp(left_reg, right_op); 1945 __ j(condition, &return_left, Label::kNear); 1946 __ mov(left_reg, right_op); 1947 } 1948 __ bind(&return_left); 1949 } else { 1950 ASSERT(instr->hydrogen()->representation().IsDouble()); 1951 Label check_nan_left, check_zero, return_left, return_right; 1952 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; 1953 XMMRegister left_reg = ToDoubleRegister(left); 1954 XMMRegister right_reg = ToDoubleRegister(right); 1955 __ ucomisd(left_reg, right_reg); 1956 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. 1957 __ j(equal, &check_zero, Label::kNear); // left == right. 1958 __ j(condition, &return_left, Label::kNear); 1959 __ jmp(&return_right, Label::kNear); 1960 1961 __ bind(&check_zero); 1962 XMMRegister xmm_scratch = double_scratch0(); 1963 __ xorps(xmm_scratch, xmm_scratch); 1964 __ ucomisd(left_reg, xmm_scratch); 1965 __ j(not_equal, &return_left, Label::kNear); // left == right != 0. 1966 // At this point, both left and right are either 0 or -0. 1967 if (operation == HMathMinMax::kMathMin) { 1968 __ orpd(left_reg, right_reg); 1969 } else { 1970 // Since we operate on +0 and/or -0, addsd and andsd have the same effect. 1971 __ addsd(left_reg, right_reg); 1972 } 1973 __ jmp(&return_left, Label::kNear); 1974 1975 __ bind(&check_nan_left); 1976 __ ucomisd(left_reg, left_reg); // NaN check. 1977 __ j(parity_even, &return_left, Label::kNear); // left == NaN. 1978 __ bind(&return_right); 1979 __ movaps(left_reg, right_reg); 1980 1981 __ bind(&return_left); 1982 } 1983 } 1984 1985 1986 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1987 XMMRegister left = ToDoubleRegister(instr->left()); 1988 XMMRegister right = ToDoubleRegister(instr->right()); 1989 XMMRegister result = ToDoubleRegister(instr->result()); 1990 switch (instr->op()) { 1991 case Token::ADD: 1992 __ addsd(left, right); 1993 break; 1994 case Token::SUB: 1995 __ subsd(left, right); 1996 break; 1997 case Token::MUL: 1998 __ mulsd(left, right); 1999 break; 2000 case Token::DIV: 2001 __ divsd(left, right); 2002 // Don't delete this mov. It may improve performance on some CPUs, 2003 // when there is a mulsd depending on the result 2004 __ movaps(left, left); 2005 break; 2006 case Token::MOD: { 2007 // Pass two doubles as arguments on the stack. 2008 __ PrepareCallCFunction(4, eax); 2009 __ movsd(Operand(esp, 0 * kDoubleSize), left); 2010 __ movsd(Operand(esp, 1 * kDoubleSize), right); 2011 __ CallCFunction( 2012 ExternalReference::mod_two_doubles_operation(isolate()), 2013 4); 2014 2015 // Return value is in st(0) on ia32. 2016 // Store it into the result register. 2017 __ sub(Operand(esp), Immediate(kDoubleSize)); 2018 __ fstp_d(Operand(esp, 0)); 2019 __ movsd(result, Operand(esp, 0)); 2020 __ add(Operand(esp), Immediate(kDoubleSize)); 2021 break; 2022 } 2023 default: 2024 UNREACHABLE(); 2025 break; 2026 } 2027 } 2028 2029 2030 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2031 ASSERT(ToRegister(instr->context()).is(esi)); 2032 ASSERT(ToRegister(instr->left()).is(edx)); 2033 ASSERT(ToRegister(instr->right()).is(eax)); 2034 ASSERT(ToRegister(instr->result()).is(eax)); 2035 2036 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); 2037 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2038 } 2039 2040 2041 template<class InstrType> 2042 void LCodeGen::EmitBranch(InstrType instr, Condition cc) { 2043 int left_block = instr->TrueDestination(chunk_); 2044 int right_block = instr->FalseDestination(chunk_); 2045 2046 int next_block = GetNextEmittedBlock(); 2047 2048 if (right_block == left_block || cc == no_condition) { 2049 EmitGoto(left_block); 2050 } else if (left_block == next_block) { 2051 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); 2052 } else if (right_block == next_block) { 2053 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 2054 } else { 2055 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 2056 __ jmp(chunk_->GetAssemblyLabel(right_block)); 2057 } 2058 } 2059 2060 2061 template<class InstrType> 2062 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { 2063 int false_block = instr->FalseDestination(chunk_); 2064 if (cc == no_condition) { 2065 __ jmp(chunk_->GetAssemblyLabel(false_block)); 2066 } else { 2067 __ j(cc, chunk_->GetAssemblyLabel(false_block)); 2068 } 2069 } 2070 2071 2072 void LCodeGen::DoBranch(LBranch* instr) { 2073 Representation r = instr->hydrogen()->value()->representation(); 2074 if (r.IsSmiOrInteger32()) { 2075 Register reg = ToRegister(instr->value()); 2076 __ test(reg, Operand(reg)); 2077 EmitBranch(instr, not_zero); 2078 } else if (r.IsDouble()) { 2079 ASSERT(!info()->IsStub()); 2080 XMMRegister reg = ToDoubleRegister(instr->value()); 2081 XMMRegister xmm_scratch = double_scratch0(); 2082 __ xorps(xmm_scratch, xmm_scratch); 2083 __ ucomisd(reg, xmm_scratch); 2084 EmitBranch(instr, not_equal); 2085 } else { 2086 ASSERT(r.IsTagged()); 2087 Register reg = ToRegister(instr->value()); 2088 HType type = instr->hydrogen()->value()->type(); 2089 if (type.IsBoolean()) { 2090 ASSERT(!info()->IsStub()); 2091 __ cmp(reg, factory()->true_value()); 2092 EmitBranch(instr, equal); 2093 } else if (type.IsSmi()) { 2094 ASSERT(!info()->IsStub()); 2095 __ test(reg, Operand(reg)); 2096 EmitBranch(instr, not_equal); 2097 } else if (type.IsJSArray()) { 2098 ASSERT(!info()->IsStub()); 2099 EmitBranch(instr, no_condition); 2100 } else if (type.IsHeapNumber()) { 2101 ASSERT(!info()->IsStub()); 2102 XMMRegister xmm_scratch = double_scratch0(); 2103 __ xorps(xmm_scratch, xmm_scratch); 2104 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); 2105 EmitBranch(instr, not_equal); 2106 } else if (type.IsString()) { 2107 ASSERT(!info()->IsStub()); 2108 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 2109 EmitBranch(instr, not_equal); 2110 } else { 2111 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 2112 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); 2113 2114 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 2115 // undefined -> false. 2116 __ cmp(reg, factory()->undefined_value()); 2117 __ j(equal, instr->FalseLabel(chunk_)); 2118 } 2119 if (expected.Contains(ToBooleanStub::BOOLEAN)) { 2120 // true -> true. 2121 __ cmp(reg, factory()->true_value()); 2122 __ j(equal, instr->TrueLabel(chunk_)); 2123 // false -> false. 2124 __ cmp(reg, factory()->false_value()); 2125 __ j(equal, instr->FalseLabel(chunk_)); 2126 } 2127 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { 2128 // 'null' -> false. 2129 __ cmp(reg, factory()->null_value()); 2130 __ j(equal, instr->FalseLabel(chunk_)); 2131 } 2132 2133 if (expected.Contains(ToBooleanStub::SMI)) { 2134 // Smis: 0 -> false, all other -> true. 2135 __ test(reg, Operand(reg)); 2136 __ j(equal, instr->FalseLabel(chunk_)); 2137 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2138 } else if (expected.NeedsMap()) { 2139 // If we need a map later and have a Smi -> deopt. 2140 __ test(reg, Immediate(kSmiTagMask)); 2141 DeoptimizeIf(zero, instr->environment()); 2142 } 2143 2144 Register map = no_reg; // Keep the compiler happy. 2145 if (expected.NeedsMap()) { 2146 map = ToRegister(instr->temp()); 2147 ASSERT(!map.is(reg)); 2148 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); 2149 2150 if (expected.CanBeUndetectable()) { 2151 // Undetectable -> false. 2152 __ test_b(FieldOperand(map, Map::kBitFieldOffset), 2153 1 << Map::kIsUndetectable); 2154 __ j(not_zero, instr->FalseLabel(chunk_)); 2155 } 2156 } 2157 2158 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { 2159 // spec object -> true. 2160 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); 2161 __ j(above_equal, instr->TrueLabel(chunk_)); 2162 } 2163 2164 if (expected.Contains(ToBooleanStub::STRING)) { 2165 // String value -> false iff empty. 2166 Label not_string; 2167 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); 2168 __ j(above_equal, ¬_string, Label::kNear); 2169 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 2170 __ j(not_zero, instr->TrueLabel(chunk_)); 2171 __ jmp(instr->FalseLabel(chunk_)); 2172 __ bind(¬_string); 2173 } 2174 2175 if (expected.Contains(ToBooleanStub::SYMBOL)) { 2176 // Symbol value -> true. 2177 __ CmpInstanceType(map, SYMBOL_TYPE); 2178 __ j(equal, instr->TrueLabel(chunk_)); 2179 } 2180 2181 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 2182 // heap number -> false iff +0, -0, or NaN. 2183 Label not_heap_number; 2184 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 2185 factory()->heap_number_map()); 2186 __ j(not_equal, ¬_heap_number, Label::kNear); 2187 XMMRegister xmm_scratch = double_scratch0(); 2188 __ xorps(xmm_scratch, xmm_scratch); 2189 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); 2190 __ j(zero, instr->FalseLabel(chunk_)); 2191 __ jmp(instr->TrueLabel(chunk_)); 2192 __ bind(¬_heap_number); 2193 } 2194 2195 if (!expected.IsGeneric()) { 2196 // We've seen something for the first time -> deopt. 2197 // This can only happen if we are not generic already. 2198 DeoptimizeIf(no_condition, instr->environment()); 2199 } 2200 } 2201 } 2202 } 2203 2204 2205 void LCodeGen::EmitGoto(int block) { 2206 if (!IsNextEmittedBlock(block)) { 2207 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2208 } 2209 } 2210 2211 2212 void LCodeGen::DoGoto(LGoto* instr) { 2213 EmitGoto(instr->block_id()); 2214 } 2215 2216 2217 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 2218 Condition cond = no_condition; 2219 switch (op) { 2220 case Token::EQ: 2221 case Token::EQ_STRICT: 2222 cond = equal; 2223 break; 2224 case Token::NE: 2225 case Token::NE_STRICT: 2226 cond = not_equal; 2227 break; 2228 case Token::LT: 2229 cond = is_unsigned ? below : less; 2230 break; 2231 case Token::GT: 2232 cond = is_unsigned ? above : greater; 2233 break; 2234 case Token::LTE: 2235 cond = is_unsigned ? below_equal : less_equal; 2236 break; 2237 case Token::GTE: 2238 cond = is_unsigned ? above_equal : greater_equal; 2239 break; 2240 case Token::IN: 2241 case Token::INSTANCEOF: 2242 default: 2243 UNREACHABLE(); 2244 } 2245 return cond; 2246 } 2247 2248 2249 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2250 LOperand* left = instr->left(); 2251 LOperand* right = instr->right(); 2252 bool is_unsigned = 2253 instr->is_double() || 2254 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2255 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2256 Condition cc = TokenToCondition(instr->op(), is_unsigned); 2257 2258 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2259 // We can statically evaluate the comparison. 2260 double left_val = ToDouble(LConstantOperand::cast(left)); 2261 double right_val = ToDouble(LConstantOperand::cast(right)); 2262 int next_block = EvalComparison(instr->op(), left_val, right_val) ? 2263 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); 2264 EmitGoto(next_block); 2265 } else { 2266 if (instr->is_double()) { 2267 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); 2268 // Don't base result on EFLAGS when a NaN is involved. Instead 2269 // jump to the false block. 2270 __ j(parity_even, instr->FalseLabel(chunk_)); 2271 } else { 2272 if (right->IsConstantOperand()) { 2273 __ cmp(ToOperand(left), 2274 ToImmediate(right, instr->hydrogen()->representation())); 2275 } else if (left->IsConstantOperand()) { 2276 __ cmp(ToOperand(right), 2277 ToImmediate(left, instr->hydrogen()->representation())); 2278 // We commuted the operands, so commute the condition. 2279 cc = CommuteCondition(cc); 2280 } else { 2281 __ cmp(ToRegister(left), ToOperand(right)); 2282 } 2283 } 2284 EmitBranch(instr, cc); 2285 } 2286 } 2287 2288 2289 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2290 Register left = ToRegister(instr->left()); 2291 2292 if (instr->right()->IsConstantOperand()) { 2293 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right())); 2294 __ CmpObject(left, right); 2295 } else { 2296 Operand right = ToOperand(instr->right()); 2297 __ cmp(left, right); 2298 } 2299 EmitBranch(instr, equal); 2300 } 2301 2302 2303 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2304 if (instr->hydrogen()->representation().IsTagged()) { 2305 Register input_reg = ToRegister(instr->object()); 2306 __ cmp(input_reg, factory()->the_hole_value()); 2307 EmitBranch(instr, equal); 2308 return; 2309 } 2310 2311 XMMRegister input_reg = ToDoubleRegister(instr->object()); 2312 __ ucomisd(input_reg, input_reg); 2313 EmitFalseBranch(instr, parity_odd); 2314 2315 __ sub(esp, Immediate(kDoubleSize)); 2316 __ movsd(MemOperand(esp, 0), input_reg); 2317 2318 __ add(esp, Immediate(kDoubleSize)); 2319 int offset = sizeof(kHoleNanUpper32); 2320 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); 2321 EmitBranch(instr, equal); 2322 } 2323 2324 2325 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { 2326 Representation rep = instr->hydrogen()->value()->representation(); 2327 ASSERT(!rep.IsInteger32()); 2328 Register scratch = ToRegister(instr->temp()); 2329 2330 if (rep.IsDouble()) { 2331 XMMRegister value = ToDoubleRegister(instr->value()); 2332 XMMRegister xmm_scratch = double_scratch0(); 2333 __ xorps(xmm_scratch, xmm_scratch); 2334 __ ucomisd(xmm_scratch, value); 2335 EmitFalseBranch(instr, not_equal); 2336 __ movmskpd(scratch, value); 2337 __ test(scratch, Immediate(1)); 2338 EmitBranch(instr, not_zero); 2339 } else { 2340 Register value = ToRegister(instr->value()); 2341 Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); 2342 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); 2343 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset), 2344 Immediate(0x1)); 2345 EmitFalseBranch(instr, no_overflow); 2346 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset), 2347 Immediate(0x00000000)); 2348 EmitBranch(instr, equal); 2349 } 2350 } 2351 2352 2353 Condition LCodeGen::EmitIsObject(Register input, 2354 Register temp1, 2355 Label* is_not_object, 2356 Label* is_object) { 2357 __ JumpIfSmi(input, is_not_object); 2358 2359 __ cmp(input, isolate()->factory()->null_value()); 2360 __ j(equal, is_object); 2361 2362 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset)); 2363 // Undetectable objects behave like undefined. 2364 __ test_b(FieldOperand(temp1, Map::kBitFieldOffset), 2365 1 << Map::kIsUndetectable); 2366 __ j(not_zero, is_not_object); 2367 2368 __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset)); 2369 __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); 2370 __ j(below, is_not_object); 2371 __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); 2372 return below_equal; 2373 } 2374 2375 2376 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { 2377 Register reg = ToRegister(instr->value()); 2378 Register temp = ToRegister(instr->temp()); 2379 2380 Condition true_cond = EmitIsObject( 2381 reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_)); 2382 2383 EmitBranch(instr, true_cond); 2384 } 2385 2386 2387 Condition LCodeGen::EmitIsString(Register input, 2388 Register temp1, 2389 Label* is_not_string, 2390 SmiCheck check_needed = INLINE_SMI_CHECK) { 2391 if (check_needed == INLINE_SMI_CHECK) { 2392 __ JumpIfSmi(input, is_not_string); 2393 } 2394 2395 Condition cond = masm_->IsObjectStringType(input, temp1, temp1); 2396 2397 return cond; 2398 } 2399 2400 2401 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2402 Register reg = ToRegister(instr->value()); 2403 Register temp = ToRegister(instr->temp()); 2404 2405 SmiCheck check_needed = 2406 instr->hydrogen()->value()->type().IsHeapObject() 2407 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2408 2409 Condition true_cond = EmitIsString( 2410 reg, temp, instr->FalseLabel(chunk_), check_needed); 2411 2412 EmitBranch(instr, true_cond); 2413 } 2414 2415 2416 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2417 Operand input = ToOperand(instr->value()); 2418 2419 __ test(input, Immediate(kSmiTagMask)); 2420 EmitBranch(instr, zero); 2421 } 2422 2423 2424 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2425 Register input = ToRegister(instr->value()); 2426 Register temp = ToRegister(instr->temp()); 2427 2428 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2429 STATIC_ASSERT(kSmiTag == 0); 2430 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2431 } 2432 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); 2433 __ test_b(FieldOperand(temp, Map::kBitFieldOffset), 2434 1 << Map::kIsUndetectable); 2435 EmitBranch(instr, not_zero); 2436 } 2437 2438 2439 static Condition ComputeCompareCondition(Token::Value op) { 2440 switch (op) { 2441 case Token::EQ_STRICT: 2442 case Token::EQ: 2443 return equal; 2444 case Token::LT: 2445 return less; 2446 case Token::GT: 2447 return greater; 2448 case Token::LTE: 2449 return less_equal; 2450 case Token::GTE: 2451 return greater_equal; 2452 default: 2453 UNREACHABLE(); 2454 return no_condition; 2455 } 2456 } 2457 2458 2459 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2460 Token::Value op = instr->op(); 2461 2462 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2463 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2464 2465 Condition condition = ComputeCompareCondition(op); 2466 __ test(eax, Operand(eax)); 2467 2468 EmitBranch(instr, condition); 2469 } 2470 2471 2472 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2473 InstanceType from = instr->from(); 2474 InstanceType to = instr->to(); 2475 if (from == FIRST_TYPE) return to; 2476 ASSERT(from == to || to == LAST_TYPE); 2477 return from; 2478 } 2479 2480 2481 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2482 InstanceType from = instr->from(); 2483 InstanceType to = instr->to(); 2484 if (from == to) return equal; 2485 if (to == LAST_TYPE) return above_equal; 2486 if (from == FIRST_TYPE) return below_equal; 2487 UNREACHABLE(); 2488 return equal; 2489 } 2490 2491 2492 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2493 Register input = ToRegister(instr->value()); 2494 Register temp = ToRegister(instr->temp()); 2495 2496 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2497 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2498 } 2499 2500 __ CmpObjectType(input, TestType(instr->hydrogen()), temp); 2501 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2502 } 2503 2504 2505 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2506 Register input = ToRegister(instr->value()); 2507 Register result = ToRegister(instr->result()); 2508 2509 __ AssertString(input); 2510 2511 __ mov(result, FieldOperand(input, String::kHashFieldOffset)); 2512 __ IndexFromHash(result, result); 2513 } 2514 2515 2516 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2517 LHasCachedArrayIndexAndBranch* instr) { 2518 Register input = ToRegister(instr->value()); 2519 2520 __ test(FieldOperand(input, String::kHashFieldOffset), 2521 Immediate(String::kContainsCachedArrayIndexMask)); 2522 EmitBranch(instr, equal); 2523 } 2524 2525 2526 // Branches to a label or falls through with the answer in the z flag. Trashes 2527 // the temp registers, but not the input. 2528 void LCodeGen::EmitClassOfTest(Label* is_true, 2529 Label* is_false, 2530 Handle<String>class_name, 2531 Register input, 2532 Register temp, 2533 Register temp2) { 2534 ASSERT(!input.is(temp)); 2535 ASSERT(!input.is(temp2)); 2536 ASSERT(!temp.is(temp2)); 2537 __ JumpIfSmi(input, is_false); 2538 2539 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { 2540 // Assuming the following assertions, we can use the same compares to test 2541 // for both being a function type and being in the object type range. 2542 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 2543 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == 2544 FIRST_SPEC_OBJECT_TYPE + 1); 2545 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == 2546 LAST_SPEC_OBJECT_TYPE - 1); 2547 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); 2548 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); 2549 __ j(below, is_false); 2550 __ j(equal, is_true); 2551 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); 2552 __ j(equal, is_true); 2553 } else { 2554 // Faster code path to avoid two compares: subtract lower bound from the 2555 // actual type and do a signed compare with the width of the type range. 2556 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); 2557 __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); 2558 __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2559 __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - 2560 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2561 __ j(above, is_false); 2562 } 2563 2564 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. 2565 // Check if the constructor in the map is a function. 2566 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); 2567 // Objects with a non-function constructor have class 'Object'. 2568 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); 2569 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { 2570 __ j(not_equal, is_true); 2571 } else { 2572 __ j(not_equal, is_false); 2573 } 2574 2575 // temp now contains the constructor function. Grab the 2576 // instance class name from there. 2577 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2578 __ mov(temp, FieldOperand(temp, 2579 SharedFunctionInfo::kInstanceClassNameOffset)); 2580 // The class name we are testing against is internalized since it's a literal. 2581 // The name in the constructor is internalized because of the way the context 2582 // is booted. This routine isn't expected to work for random API-created 2583 // classes and it doesn't have to because you can't access it with natives 2584 // syntax. Since both sides are internalized it is sufficient to use an 2585 // identity comparison. 2586 __ cmp(temp, class_name); 2587 // End with the answer in the z flag. 2588 } 2589 2590 2591 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2592 Register input = ToRegister(instr->value()); 2593 Register temp = ToRegister(instr->temp()); 2594 Register temp2 = ToRegister(instr->temp2()); 2595 2596 Handle<String> class_name = instr->hydrogen()->class_name(); 2597 2598 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2599 class_name, input, temp, temp2); 2600 2601 EmitBranch(instr, equal); 2602 } 2603 2604 2605 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2606 Register reg = ToRegister(instr->value()); 2607 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); 2608 EmitBranch(instr, equal); 2609 } 2610 2611 2612 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2613 // Object and function are in fixed registers defined by the stub. 2614 ASSERT(ToRegister(instr->context()).is(esi)); 2615 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); 2616 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2617 2618 Label true_value, done; 2619 __ test(eax, Operand(eax)); 2620 __ j(zero, &true_value, Label::kNear); 2621 __ mov(ToRegister(instr->result()), factory()->false_value()); 2622 __ jmp(&done, Label::kNear); 2623 __ bind(&true_value); 2624 __ mov(ToRegister(instr->result()), factory()->true_value()); 2625 __ bind(&done); 2626 } 2627 2628 2629 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 2630 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { 2631 public: 2632 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 2633 LInstanceOfKnownGlobal* instr) 2634 : LDeferredCode(codegen), instr_(instr) { } 2635 virtual void Generate() V8_OVERRIDE { 2636 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); 2637 } 2638 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 2639 Label* map_check() { return &map_check_; } 2640 private: 2641 LInstanceOfKnownGlobal* instr_; 2642 Label map_check_; 2643 }; 2644 2645 DeferredInstanceOfKnownGlobal* deferred; 2646 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); 2647 2648 Label done, false_result; 2649 Register object = ToRegister(instr->value()); 2650 Register temp = ToRegister(instr->temp()); 2651 2652 // A Smi is not an instance of anything. 2653 __ JumpIfSmi(object, &false_result, Label::kNear); 2654 2655 // This is the inlined call site instanceof cache. The two occurences of the 2656 // hole value will be patched to the last map/result pair generated by the 2657 // instanceof stub. 2658 Label cache_miss; 2659 Register map = ToRegister(instr->temp()); 2660 __ mov(map, FieldOperand(object, HeapObject::kMapOffset)); 2661 __ bind(deferred->map_check()); // Label for calculating code patching. 2662 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value()); 2663 __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map. 2664 __ j(not_equal, &cache_miss, Label::kNear); 2665 __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. 2666 __ jmp(&done, Label::kNear); 2667 2668 // The inlined call site cache did not match. Check for null and string 2669 // before calling the deferred code. 2670 __ bind(&cache_miss); 2671 // Null is not an instance of anything. 2672 __ cmp(object, factory()->null_value()); 2673 __ j(equal, &false_result, Label::kNear); 2674 2675 // String values are not instances of anything. 2676 Condition is_string = masm_->IsObjectStringType(object, temp, temp); 2677 __ j(is_string, &false_result, Label::kNear); 2678 2679 // Go to the deferred code. 2680 __ jmp(deferred->entry()); 2681 2682 __ bind(&false_result); 2683 __ mov(ToRegister(instr->result()), factory()->false_value()); 2684 2685 // Here result has either true or false. Deferred code also produces true or 2686 // false object. 2687 __ bind(deferred->exit()); 2688 __ bind(&done); 2689 } 2690 2691 2692 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, 2693 Label* map_check) { 2694 PushSafepointRegistersScope scope(this); 2695 2696 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 2697 flags = static_cast<InstanceofStub::Flags>( 2698 flags | InstanceofStub::kArgsInRegisters); 2699 flags = static_cast<InstanceofStub::Flags>( 2700 flags | InstanceofStub::kCallSiteInlineCheck); 2701 flags = static_cast<InstanceofStub::Flags>( 2702 flags | InstanceofStub::kReturnTrueFalseObject); 2703 InstanceofStub stub(isolate(), flags); 2704 2705 // Get the temp register reserved by the instruction. This needs to be a 2706 // register which is pushed last by PushSafepointRegisters as top of the 2707 // stack is used to pass the offset to the location of the map check to 2708 // the stub. 2709 Register temp = ToRegister(instr->temp()); 2710 ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0); 2711 __ LoadHeapObject(InstanceofStub::right(), instr->function()); 2712 static const int kAdditionalDelta = 13; 2713 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; 2714 __ mov(temp, Immediate(delta)); 2715 __ StoreToSafepointRegisterSlot(temp, temp); 2716 CallCodeGeneric(stub.GetCode(), 2717 RelocInfo::CODE_TARGET, 2718 instr, 2719 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 2720 // Get the deoptimization index of the LLazyBailout-environment that 2721 // corresponds to this instruction. 2722 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2723 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2724 2725 // Put the result value into the eax slot and restore all registers. 2726 __ StoreToSafepointRegisterSlot(eax, eax); 2727 } 2728 2729 2730 void LCodeGen::DoCmpT(LCmpT* instr) { 2731 Token::Value op = instr->op(); 2732 2733 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2734 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2735 2736 Condition condition = ComputeCompareCondition(op); 2737 Label true_value, done; 2738 __ test(eax, Operand(eax)); 2739 __ j(condition, &true_value, Label::kNear); 2740 __ mov(ToRegister(instr->result()), factory()->false_value()); 2741 __ jmp(&done, Label::kNear); 2742 __ bind(&true_value); 2743 __ mov(ToRegister(instr->result()), factory()->true_value()); 2744 __ bind(&done); 2745 } 2746 2747 2748 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) { 2749 int extra_value_count = dynamic_frame_alignment ? 2 : 1; 2750 2751 if (instr->has_constant_parameter_count()) { 2752 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2753 if (dynamic_frame_alignment && FLAG_debug_code) { 2754 __ cmp(Operand(esp, 2755 (parameter_count + extra_value_count) * kPointerSize), 2756 Immediate(kAlignmentZapValue)); 2757 __ Assert(equal, kExpectedAlignmentMarker); 2758 } 2759 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); 2760 } else { 2761 Register reg = ToRegister(instr->parameter_count()); 2762 // The argument count parameter is a smi 2763 __ SmiUntag(reg); 2764 Register return_addr_reg = reg.is(ecx) ? ebx : ecx; 2765 if (dynamic_frame_alignment && FLAG_debug_code) { 2766 ASSERT(extra_value_count == 2); 2767 __ cmp(Operand(esp, reg, times_pointer_size, 2768 extra_value_count * kPointerSize), 2769 Immediate(kAlignmentZapValue)); 2770 __ Assert(equal, kExpectedAlignmentMarker); 2771 } 2772 2773 // emit code to restore stack based on instr->parameter_count() 2774 __ pop(return_addr_reg); // save return address 2775 if (dynamic_frame_alignment) { 2776 __ inc(reg); // 1 more for alignment 2777 } 2778 __ shl(reg, kPointerSizeLog2); 2779 __ add(esp, reg); 2780 __ jmp(return_addr_reg); 2781 } 2782 } 2783 2784 2785 void LCodeGen::DoReturn(LReturn* instr) { 2786 if (FLAG_trace && info()->IsOptimizing()) { 2787 // Preserve the return value on the stack and rely on the runtime call 2788 // to return the value in the same register. We're leaving the code 2789 // managed by the register allocator and tearing down the frame, it's 2790 // safe to write to the context register. 2791 __ push(eax); 2792 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2793 __ CallRuntime(Runtime::kTraceExit, 1); 2794 } 2795 if (info()->saves_caller_doubles()) RestoreCallerDoubles(); 2796 if (dynamic_frame_alignment_) { 2797 // Fetch the state of the dynamic frame alignment. 2798 __ mov(edx, Operand(ebp, 2799 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 2800 } 2801 int no_frame_start = -1; 2802 if (NeedsEagerFrame()) { 2803 __ mov(esp, ebp); 2804 __ pop(ebp); 2805 no_frame_start = masm_->pc_offset(); 2806 } 2807 if (dynamic_frame_alignment_) { 2808 Label no_padding; 2809 __ cmp(edx, Immediate(kNoAlignmentPadding)); 2810 __ j(equal, &no_padding, Label::kNear); 2811 2812 EmitReturn(instr, true); 2813 __ bind(&no_padding); 2814 } 2815 2816 EmitReturn(instr, false); 2817 if (no_frame_start != -1) { 2818 info()->AddNoFrameRange(no_frame_start, masm_->pc_offset()); 2819 } 2820 } 2821 2822 2823 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2824 Register result = ToRegister(instr->result()); 2825 __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle())); 2826 if (instr->hydrogen()->RequiresHoleCheck()) { 2827 __ cmp(result, factory()->the_hole_value()); 2828 DeoptimizeIf(equal, instr->environment()); 2829 } 2830 } 2831 2832 2833 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2834 ASSERT(ToRegister(instr->context()).is(esi)); 2835 ASSERT(ToRegister(instr->global_object()).is(edx)); 2836 ASSERT(ToRegister(instr->result()).is(eax)); 2837 2838 __ mov(ecx, instr->name()); 2839 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; 2840 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); 2841 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2842 } 2843 2844 2845 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { 2846 Register value = ToRegister(instr->value()); 2847 Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle(); 2848 2849 // If the cell we are storing to contains the hole it could have 2850 // been deleted from the property dictionary. In that case, we need 2851 // to update the property details in the property dictionary to mark 2852 // it as no longer deleted. We deoptimize in that case. 2853 if (instr->hydrogen()->RequiresHoleCheck()) { 2854 __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value()); 2855 DeoptimizeIf(equal, instr->environment()); 2856 } 2857 2858 // Store the value. 2859 __ mov(Operand::ForCell(cell_handle), value); 2860 // Cells are always rescanned, so no write barrier here. 2861 } 2862 2863 2864 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2865 Register context = ToRegister(instr->context()); 2866 Register result = ToRegister(instr->result()); 2867 __ mov(result, ContextOperand(context, instr->slot_index())); 2868 2869 if (instr->hydrogen()->RequiresHoleCheck()) { 2870 __ cmp(result, factory()->the_hole_value()); 2871 if (instr->hydrogen()->DeoptimizesOnHole()) { 2872 DeoptimizeIf(equal, instr->environment()); 2873 } else { 2874 Label is_not_hole; 2875 __ j(not_equal, &is_not_hole, Label::kNear); 2876 __ mov(result, factory()->undefined_value()); 2877 __ bind(&is_not_hole); 2878 } 2879 } 2880 } 2881 2882 2883 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2884 Register context = ToRegister(instr->context()); 2885 Register value = ToRegister(instr->value()); 2886 2887 Label skip_assignment; 2888 2889 Operand target = ContextOperand(context, instr->slot_index()); 2890 if (instr->hydrogen()->RequiresHoleCheck()) { 2891 __ cmp(target, factory()->the_hole_value()); 2892 if (instr->hydrogen()->DeoptimizesOnHole()) { 2893 DeoptimizeIf(equal, instr->environment()); 2894 } else { 2895 __ j(not_equal, &skip_assignment, Label::kNear); 2896 } 2897 } 2898 2899 __ mov(target, value); 2900 if (instr->hydrogen()->NeedsWriteBarrier()) { 2901 SmiCheck check_needed = 2902 instr->hydrogen()->value()->type().IsHeapObject() 2903 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2904 Register temp = ToRegister(instr->temp()); 2905 int offset = Context::SlotOffset(instr->slot_index()); 2906 __ RecordWriteContextSlot(context, 2907 offset, 2908 value, 2909 temp, 2910 kSaveFPRegs, 2911 EMIT_REMEMBERED_SET, 2912 check_needed); 2913 } 2914 2915 __ bind(&skip_assignment); 2916 } 2917 2918 2919 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2920 HObjectAccess access = instr->hydrogen()->access(); 2921 int offset = access.offset(); 2922 2923 if (access.IsExternalMemory()) { 2924 Register result = ToRegister(instr->result()); 2925 MemOperand operand = instr->object()->IsConstantOperand() 2926 ? MemOperand::StaticVariable(ToExternalReference( 2927 LConstantOperand::cast(instr->object()))) 2928 : MemOperand(ToRegister(instr->object()), offset); 2929 __ Load(result, operand, access.representation()); 2930 return; 2931 } 2932 2933 Register object = ToRegister(instr->object()); 2934 if (instr->hydrogen()->representation().IsDouble()) { 2935 XMMRegister result = ToDoubleRegister(instr->result()); 2936 __ movsd(result, FieldOperand(object, offset)); 2937 return; 2938 } 2939 2940 Register result = ToRegister(instr->result()); 2941 if (!access.IsInobject()) { 2942 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2943 object = result; 2944 } 2945 __ Load(result, FieldOperand(object, offset), access.representation()); 2946 } 2947 2948 2949 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { 2950 ASSERT(!operand->IsDoubleRegister()); 2951 if (operand->IsConstantOperand()) { 2952 Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); 2953 AllowDeferredHandleDereference smi_check; 2954 if (object->IsSmi()) { 2955 __ Push(Handle<Smi>::cast(object)); 2956 } else { 2957 __ PushHeapObject(Handle<HeapObject>::cast(object)); 2958 } 2959 } else if (operand->IsRegister()) { 2960 __ push(ToRegister(operand)); 2961 } else { 2962 __ push(ToOperand(operand)); 2963 } 2964 } 2965 2966 2967 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2968 ASSERT(ToRegister(instr->context()).is(esi)); 2969 ASSERT(ToRegister(instr->object()).is(edx)); 2970 ASSERT(ToRegister(instr->result()).is(eax)); 2971 2972 __ mov(ecx, instr->name()); 2973 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); 2974 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2975 } 2976 2977 2978 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 2979 Register function = ToRegister(instr->function()); 2980 Register temp = ToRegister(instr->temp()); 2981 Register result = ToRegister(instr->result()); 2982 2983 // Check that the function really is a function. 2984 __ CmpObjectType(function, JS_FUNCTION_TYPE, result); 2985 DeoptimizeIf(not_equal, instr->environment()); 2986 2987 // Check whether the function has an instance prototype. 2988 Label non_instance; 2989 __ test_b(FieldOperand(result, Map::kBitFieldOffset), 2990 1 << Map::kHasNonInstancePrototype); 2991 __ j(not_zero, &non_instance, Label::kNear); 2992 2993 // Get the prototype or initial map from the function. 2994 __ mov(result, 2995 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2996 2997 // Check that the function has a prototype or an initial map. 2998 __ cmp(Operand(result), Immediate(factory()->the_hole_value())); 2999 DeoptimizeIf(equal, instr->environment()); 3000 3001 // If the function does not have an initial map, we're done. 3002 Label done; 3003 __ CmpObjectType(result, MAP_TYPE, temp); 3004 __ j(not_equal, &done, Label::kNear); 3005 3006 // Get the prototype from the initial map. 3007 __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); 3008 __ jmp(&done, Label::kNear); 3009 3010 // Non-instance prototype: Fetch prototype from constructor field 3011 // in the function's map. 3012 __ bind(&non_instance); 3013 __ mov(result, FieldOperand(result, Map::kConstructorOffset)); 3014 3015 // All done. 3016 __ bind(&done); 3017 } 3018 3019 3020 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 3021 Register result = ToRegister(instr->result()); 3022 __ LoadRoot(result, instr->index()); 3023 } 3024 3025 3026 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3027 Register arguments = ToRegister(instr->arguments()); 3028 Register result = ToRegister(instr->result()); 3029 if (instr->length()->IsConstantOperand() && 3030 instr->index()->IsConstantOperand()) { 3031 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3032 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 3033 int index = (const_length - const_index) + 1; 3034 __ mov(result, Operand(arguments, index * kPointerSize)); 3035 } else { 3036 Register length = ToRegister(instr->length()); 3037 Operand index = ToOperand(instr->index()); 3038 // There are two words between the frame pointer and the last argument. 3039 // Subtracting from length accounts for one of them add one more. 3040 __ sub(length, index); 3041 __ mov(result, Operand(arguments, length, times_4, kPointerSize)); 3042 } 3043 } 3044 3045 3046 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 3047 ElementsKind elements_kind = instr->elements_kind(); 3048 LOperand* key = instr->key(); 3049 if (!key->IsConstantOperand() && 3050 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), 3051 elements_kind)) { 3052 __ SmiUntag(ToRegister(key)); 3053 } 3054 Operand operand(BuildFastArrayOperand( 3055 instr->elements(), 3056 key, 3057 instr->hydrogen()->key()->representation(), 3058 elements_kind, 3059 instr->base_offset())); 3060 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3061 elements_kind == FLOAT32_ELEMENTS) { 3062 XMMRegister result(ToDoubleRegister(instr->result())); 3063 __ movss(result, operand); 3064 __ cvtss2sd(result, result); 3065 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 3066 elements_kind == FLOAT64_ELEMENTS) { 3067 __ movsd(ToDoubleRegister(instr->result()), operand); 3068 } else { 3069 Register result(ToRegister(instr->result())); 3070 switch (elements_kind) { 3071 case EXTERNAL_INT8_ELEMENTS: 3072 case INT8_ELEMENTS: 3073 __ movsx_b(result, operand); 3074 break; 3075 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 3076 case EXTERNAL_UINT8_ELEMENTS: 3077 case UINT8_ELEMENTS: 3078 case UINT8_CLAMPED_ELEMENTS: 3079 __ movzx_b(result, operand); 3080 break; 3081 case EXTERNAL_INT16_ELEMENTS: 3082 case INT16_ELEMENTS: 3083 __ movsx_w(result, operand); 3084 break; 3085 case EXTERNAL_UINT16_ELEMENTS: 3086 case UINT16_ELEMENTS: 3087 __ movzx_w(result, operand); 3088 break; 3089 case EXTERNAL_INT32_ELEMENTS: 3090 case INT32_ELEMENTS: 3091 __ mov(result, operand); 3092 break; 3093 case EXTERNAL_UINT32_ELEMENTS: 3094 case UINT32_ELEMENTS: 3095 __ mov(result, operand); 3096 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3097 __ test(result, Operand(result)); 3098 DeoptimizeIf(negative, instr->environment()); 3099 } 3100 break; 3101 case EXTERNAL_FLOAT32_ELEMENTS: 3102 case EXTERNAL_FLOAT64_ELEMENTS: 3103 case FLOAT32_ELEMENTS: 3104 case FLOAT64_ELEMENTS: 3105 case FAST_SMI_ELEMENTS: 3106 case FAST_ELEMENTS: 3107 case FAST_DOUBLE_ELEMENTS: 3108 case FAST_HOLEY_SMI_ELEMENTS: 3109 case FAST_HOLEY_ELEMENTS: 3110 case FAST_HOLEY_DOUBLE_ELEMENTS: 3111 case DICTIONARY_ELEMENTS: 3112 case SLOPPY_ARGUMENTS_ELEMENTS: 3113 UNREACHABLE(); 3114 break; 3115 } 3116 } 3117 } 3118 3119 3120 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3121 if (instr->hydrogen()->RequiresHoleCheck()) { 3122 Operand hole_check_operand = BuildFastArrayOperand( 3123 instr->elements(), instr->key(), 3124 instr->hydrogen()->key()->representation(), 3125 FAST_DOUBLE_ELEMENTS, 3126 instr->base_offset() + sizeof(kHoleNanLower32)); 3127 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); 3128 DeoptimizeIf(equal, instr->environment()); 3129 } 3130 3131 Operand double_load_operand = BuildFastArrayOperand( 3132 instr->elements(), 3133 instr->key(), 3134 instr->hydrogen()->key()->representation(), 3135 FAST_DOUBLE_ELEMENTS, 3136 instr->base_offset()); 3137 XMMRegister result = ToDoubleRegister(instr->result()); 3138 __ movsd(result, double_load_operand); 3139 } 3140 3141 3142 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3143 Register result = ToRegister(instr->result()); 3144 3145 // Load the result. 3146 __ mov(result, 3147 BuildFastArrayOperand(instr->elements(), 3148 instr->key(), 3149 instr->hydrogen()->key()->representation(), 3150 FAST_ELEMENTS, 3151 instr->base_offset())); 3152 3153 // Check for the hole value. 3154 if (instr->hydrogen()->RequiresHoleCheck()) { 3155 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3156 __ test(result, Immediate(kSmiTagMask)); 3157 DeoptimizeIf(not_equal, instr->environment()); 3158 } else { 3159 __ cmp(result, factory()->the_hole_value()); 3160 DeoptimizeIf(equal, instr->environment()); 3161 } 3162 } 3163 } 3164 3165 3166 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3167 if (instr->is_typed_elements()) { 3168 DoLoadKeyedExternalArray(instr); 3169 } else if (instr->hydrogen()->representation().IsDouble()) { 3170 DoLoadKeyedFixedDoubleArray(instr); 3171 } else { 3172 DoLoadKeyedFixedArray(instr); 3173 } 3174 } 3175 3176 3177 Operand LCodeGen::BuildFastArrayOperand( 3178 LOperand* elements_pointer, 3179 LOperand* key, 3180 Representation key_representation, 3181 ElementsKind elements_kind, 3182 uint32_t base_offset) { 3183 Register elements_pointer_reg = ToRegister(elements_pointer); 3184 int element_shift_size = ElementsKindToShiftSize(elements_kind); 3185 int shift_size = element_shift_size; 3186 if (key->IsConstantOperand()) { 3187 int constant_value = ToInteger32(LConstantOperand::cast(key)); 3188 if (constant_value & 0xF0000000) { 3189 Abort(kArrayIndexConstantValueTooBig); 3190 } 3191 return Operand(elements_pointer_reg, 3192 ((constant_value) << shift_size) 3193 + base_offset); 3194 } else { 3195 // Take the tag bit into account while computing the shift size. 3196 if (key_representation.IsSmi() && (shift_size >= 1)) { 3197 shift_size -= kSmiTagSize; 3198 } 3199 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); 3200 return Operand(elements_pointer_reg, 3201 ToRegister(key), 3202 scale_factor, 3203 base_offset); 3204 } 3205 } 3206 3207 3208 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3209 ASSERT(ToRegister(instr->context()).is(esi)); 3210 ASSERT(ToRegister(instr->object()).is(edx)); 3211 ASSERT(ToRegister(instr->key()).is(ecx)); 3212 3213 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); 3214 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3215 } 3216 3217 3218 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3219 Register result = ToRegister(instr->result()); 3220 3221 if (instr->hydrogen()->from_inlined()) { 3222 __ lea(result, Operand(esp, -2 * kPointerSize)); 3223 } else { 3224 // Check for arguments adapter frame. 3225 Label done, adapted; 3226 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3227 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset)); 3228 __ cmp(Operand(result), 3229 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3230 __ j(equal, &adapted, Label::kNear); 3231 3232 // No arguments adaptor frame. 3233 __ mov(result, Operand(ebp)); 3234 __ jmp(&done, Label::kNear); 3235 3236 // Arguments adaptor frame present. 3237 __ bind(&adapted); 3238 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3239 3240 // Result is the frame pointer for the frame if not adapted and for the real 3241 // frame below the adaptor frame if adapted. 3242 __ bind(&done); 3243 } 3244 } 3245 3246 3247 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3248 Operand elem = ToOperand(instr->elements()); 3249 Register result = ToRegister(instr->result()); 3250 3251 Label done; 3252 3253 // If no arguments adaptor frame the number of arguments is fixed. 3254 __ cmp(ebp, elem); 3255 __ mov(result, Immediate(scope()->num_parameters())); 3256 __ j(equal, &done, Label::kNear); 3257 3258 // Arguments adaptor frame present. Get argument length from there. 3259 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3260 __ mov(result, Operand(result, 3261 ArgumentsAdaptorFrameConstants::kLengthOffset)); 3262 __ SmiUntag(result); 3263 3264 // Argument length is in result register. 3265 __ bind(&done); 3266 } 3267 3268 3269 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3270 Register receiver = ToRegister(instr->receiver()); 3271 Register function = ToRegister(instr->function()); 3272 3273 // If the receiver is null or undefined, we have to pass the global 3274 // object as a receiver to normal functions. Values have to be 3275 // passed unchanged to builtins and strict-mode functions. 3276 Label receiver_ok, global_object; 3277 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 3278 Register scratch = ToRegister(instr->temp()); 3279 3280 if (!instr->hydrogen()->known_function()) { 3281 // Do not transform the receiver to object for strict mode 3282 // functions. 3283 __ mov(scratch, 3284 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3285 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset), 3286 1 << SharedFunctionInfo::kStrictModeBitWithinByte); 3287 __ j(not_equal, &receiver_ok, dist); 3288 3289 // Do not transform the receiver to object for builtins. 3290 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset), 3291 1 << SharedFunctionInfo::kNativeBitWithinByte); 3292 __ j(not_equal, &receiver_ok, dist); 3293 } 3294 3295 // Normal function. Replace undefined or null with global receiver. 3296 __ cmp(receiver, factory()->null_value()); 3297 __ j(equal, &global_object, Label::kNear); 3298 __ cmp(receiver, factory()->undefined_value()); 3299 __ j(equal, &global_object, Label::kNear); 3300 3301 // The receiver should be a JS object. 3302 __ test(receiver, Immediate(kSmiTagMask)); 3303 DeoptimizeIf(equal, instr->environment()); 3304 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch); 3305 DeoptimizeIf(below, instr->environment()); 3306 3307 __ jmp(&receiver_ok, Label::kNear); 3308 __ bind(&global_object); 3309 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); 3310 const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); 3311 __ mov(receiver, Operand(receiver, global_offset)); 3312 const int receiver_offset = GlobalObject::kGlobalReceiverOffset; 3313 __ mov(receiver, FieldOperand(receiver, receiver_offset)); 3314 __ bind(&receiver_ok); 3315 } 3316 3317 3318 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3319 Register receiver = ToRegister(instr->receiver()); 3320 Register function = ToRegister(instr->function()); 3321 Register length = ToRegister(instr->length()); 3322 Register elements = ToRegister(instr->elements()); 3323 ASSERT(receiver.is(eax)); // Used for parameter count. 3324 ASSERT(function.is(edi)); // Required by InvokeFunction. 3325 ASSERT(ToRegister(instr->result()).is(eax)); 3326 3327 // Copy the arguments to this function possibly from the 3328 // adaptor frame below it. 3329 const uint32_t kArgumentsLimit = 1 * KB; 3330 __ cmp(length, kArgumentsLimit); 3331 DeoptimizeIf(above, instr->environment()); 3332 3333 __ push(receiver); 3334 __ mov(receiver, length); 3335 3336 // Loop through the arguments pushing them onto the execution 3337 // stack. 3338 Label invoke, loop; 3339 // length is a small non-negative integer, due to the test above. 3340 __ test(length, Operand(length)); 3341 __ j(zero, &invoke, Label::kNear); 3342 __ bind(&loop); 3343 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); 3344 __ dec(length); 3345 __ j(not_zero, &loop); 3346 3347 // Invoke the function. 3348 __ bind(&invoke); 3349 ASSERT(instr->HasPointerMap()); 3350 LPointerMap* pointers = instr->pointer_map(); 3351 SafepointGenerator safepoint_generator( 3352 this, pointers, Safepoint::kLazyDeopt); 3353 ParameterCount actual(eax); 3354 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); 3355 } 3356 3357 3358 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 3359 __ int3(); 3360 } 3361 3362 3363 void LCodeGen::DoPushArgument(LPushArgument* instr) { 3364 LOperand* argument = instr->value(); 3365 EmitPushTaggedOperand(argument); 3366 } 3367 3368 3369 void LCodeGen::DoDrop(LDrop* instr) { 3370 __ Drop(instr->count()); 3371 } 3372 3373 3374 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3375 Register result = ToRegister(instr->result()); 3376 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 3377 } 3378 3379 3380 void LCodeGen::DoContext(LContext* instr) { 3381 Register result = ToRegister(instr->result()); 3382 if (info()->IsOptimizing()) { 3383 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); 3384 } else { 3385 // If there is no frame, the context must be in esi. 3386 ASSERT(result.is(esi)); 3387 } 3388 } 3389 3390 3391 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3392 ASSERT(ToRegister(instr->context()).is(esi)); 3393 __ push(esi); // The context is the first argument. 3394 __ push(Immediate(instr->hydrogen()->pairs())); 3395 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); 3396 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr); 3397 } 3398 3399 3400 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3401 int formal_parameter_count, 3402 int arity, 3403 LInstruction* instr, 3404 EDIState edi_state) { 3405 bool dont_adapt_arguments = 3406 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3407 bool can_invoke_directly = 3408 dont_adapt_arguments || formal_parameter_count == arity; 3409 3410 if (can_invoke_directly) { 3411 if (edi_state == EDI_UNINITIALIZED) { 3412 __ LoadHeapObject(edi, function); 3413 } 3414 3415 // Change context. 3416 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); 3417 3418 // Set eax to arguments count if adaption is not needed. Assumes that eax 3419 // is available to write to at this point. 3420 if (dont_adapt_arguments) { 3421 __ mov(eax, arity); 3422 } 3423 3424 // Invoke function directly. 3425 if (function.is_identical_to(info()->closure())) { 3426 __ CallSelf(); 3427 } else { 3428 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); 3429 } 3430 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3431 } else { 3432 // We need to adapt arguments. 3433 LPointerMap* pointers = instr->pointer_map(); 3434 SafepointGenerator generator( 3435 this, pointers, Safepoint::kLazyDeopt); 3436 ParameterCount count(arity); 3437 ParameterCount expected(formal_parameter_count); 3438 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); 3439 } 3440 } 3441 3442 3443 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 3444 ASSERT(ToRegister(instr->result()).is(eax)); 3445 3446 LPointerMap* pointers = instr->pointer_map(); 3447 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3448 3449 if (instr->target()->IsConstantOperand()) { 3450 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3451 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3452 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 3453 __ call(code, RelocInfo::CODE_TARGET); 3454 } else { 3455 ASSERT(instr->target()->IsRegister()); 3456 Register target = ToRegister(instr->target()); 3457 generator.BeforeCall(__ CallSize(Operand(target))); 3458 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); 3459 __ call(target); 3460 } 3461 generator.AfterCall(); 3462 } 3463 3464 3465 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { 3466 ASSERT(ToRegister(instr->function()).is(edi)); 3467 ASSERT(ToRegister(instr->result()).is(eax)); 3468 3469 if (instr->hydrogen()->pass_argument_count()) { 3470 __ mov(eax, instr->arity()); 3471 } 3472 3473 // Change context. 3474 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); 3475 3476 bool is_self_call = false; 3477 if (instr->hydrogen()->function()->IsConstant()) { 3478 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function()); 3479 Handle<JSFunction> jsfun = 3480 Handle<JSFunction>::cast(fun_const->handle(isolate())); 3481 is_self_call = jsfun.is_identical_to(info()->closure()); 3482 } 3483 3484 if (is_self_call) { 3485 __ CallSelf(); 3486 } else { 3487 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); 3488 } 3489 3490 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3491 } 3492 3493 3494 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3495 Register input_reg = ToRegister(instr->value()); 3496 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 3497 factory()->heap_number_map()); 3498 DeoptimizeIf(not_equal, instr->environment()); 3499 3500 Label slow, allocated, done; 3501 Register tmp = input_reg.is(eax) ? ecx : eax; 3502 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx; 3503 3504 // Preserve the value of all registers. 3505 PushSafepointRegistersScope scope(this); 3506 3507 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 3508 // Check the sign of the argument. If the argument is positive, just 3509 // return it. We do not need to patch the stack since |input| and 3510 // |result| are the same register and |input| will be restored 3511 // unchanged by popping safepoint registers. 3512 __ test(tmp, Immediate(HeapNumber::kSignMask)); 3513 __ j(zero, &done, Label::kNear); 3514 3515 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); 3516 __ jmp(&allocated, Label::kNear); 3517 3518 // Slow case: Call the runtime system to do the number allocation. 3519 __ bind(&slow); 3520 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, 3521 instr, instr->context()); 3522 // Set the pointer to the new heap number in tmp. 3523 if (!tmp.is(eax)) __ mov(tmp, eax); 3524 // Restore input_reg after call to runtime. 3525 __ LoadFromSafepointRegisterSlot(input_reg, input_reg); 3526 3527 __ bind(&allocated); 3528 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 3529 __ and_(tmp2, ~HeapNumber::kSignMask); 3530 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); 3531 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); 3532 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); 3533 __ StoreToSafepointRegisterSlot(input_reg, tmp); 3534 3535 __ bind(&done); 3536 } 3537 3538 3539 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3540 Register input_reg = ToRegister(instr->value()); 3541 __ test(input_reg, Operand(input_reg)); 3542 Label is_positive; 3543 __ j(not_sign, &is_positive, Label::kNear); 3544 __ neg(input_reg); // Sets flags. 3545 DeoptimizeIf(negative, instr->environment()); 3546 __ bind(&is_positive); 3547 } 3548 3549 3550 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3551 // Class for deferred case. 3552 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { 3553 public: 3554 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, 3555 LMathAbs* instr) 3556 : LDeferredCode(codegen), instr_(instr) { } 3557 virtual void Generate() V8_OVERRIDE { 3558 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3559 } 3560 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 3561 private: 3562 LMathAbs* instr_; 3563 }; 3564 3565 ASSERT(instr->value()->Equals(instr->result())); 3566 Representation r = instr->hydrogen()->value()->representation(); 3567 3568 if (r.IsDouble()) { 3569 XMMRegister scratch = double_scratch0(); 3570 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3571 __ xorps(scratch, scratch); 3572 __ subsd(scratch, input_reg); 3573 __ andps(input_reg, scratch); 3574 } else if (r.IsSmiOrInteger32()) { 3575 EmitIntegerMathAbs(instr); 3576 } else { // Tagged case. 3577 DeferredMathAbsTaggedHeapNumber* deferred = 3578 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3579 Register input_reg = ToRegister(instr->value()); 3580 // Smi check. 3581 __ JumpIfNotSmi(input_reg, deferred->entry()); 3582 EmitIntegerMathAbs(instr); 3583 __ bind(deferred->exit()); 3584 } 3585 } 3586 3587 3588 void LCodeGen::DoMathFloor(LMathFloor* instr) { 3589 XMMRegister xmm_scratch = double_scratch0(); 3590 Register output_reg = ToRegister(instr->result()); 3591 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3592 3593 if (CpuFeatures::IsSupported(SSE4_1)) { 3594 CpuFeatureScope scope(masm(), SSE4_1); 3595 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3596 // Deoptimize on negative zero. 3597 Label non_zero; 3598 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. 3599 __ ucomisd(input_reg, xmm_scratch); 3600 __ j(not_equal, &non_zero, Label::kNear); 3601 __ movmskpd(output_reg, input_reg); 3602 __ test(output_reg, Immediate(1)); 3603 DeoptimizeIf(not_zero, instr->environment()); 3604 __ bind(&non_zero); 3605 } 3606 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); 3607 __ cvttsd2si(output_reg, Operand(xmm_scratch)); 3608 // Overflow is signalled with minint. 3609 __ cmp(output_reg, 0x1); 3610 DeoptimizeIf(overflow, instr->environment()); 3611 } else { 3612 Label negative_sign, done; 3613 // Deoptimize on unordered. 3614 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. 3615 __ ucomisd(input_reg, xmm_scratch); 3616 DeoptimizeIf(parity_even, instr->environment()); 3617 __ j(below, &negative_sign, Label::kNear); 3618 3619 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3620 // Check for negative zero. 3621 Label positive_sign; 3622 __ j(above, &positive_sign, Label::kNear); 3623 __ movmskpd(output_reg, input_reg); 3624 __ test(output_reg, Immediate(1)); 3625 DeoptimizeIf(not_zero, instr->environment()); 3626 __ Move(output_reg, Immediate(0)); 3627 __ jmp(&done, Label::kNear); 3628 __ bind(&positive_sign); 3629 } 3630 3631 // Use truncating instruction (OK because input is positive). 3632 __ cvttsd2si(output_reg, Operand(input_reg)); 3633 // Overflow is signalled with minint. 3634 __ cmp(output_reg, 0x1); 3635 DeoptimizeIf(overflow, instr->environment()); 3636 __ jmp(&done, Label::kNear); 3637 3638 // Non-zero negative reaches here. 3639 __ bind(&negative_sign); 3640 // Truncate, then compare and compensate. 3641 __ cvttsd2si(output_reg, Operand(input_reg)); 3642 __ Cvtsi2sd(xmm_scratch, output_reg); 3643 __ ucomisd(input_reg, xmm_scratch); 3644 __ j(equal, &done, Label::kNear); 3645 __ sub(output_reg, Immediate(1)); 3646 DeoptimizeIf(overflow, instr->environment()); 3647 3648 __ bind(&done); 3649 } 3650 } 3651 3652 3653 void LCodeGen::DoMathRound(LMathRound* instr) { 3654 Register output_reg = ToRegister(instr->result()); 3655 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3656 XMMRegister xmm_scratch = double_scratch0(); 3657 XMMRegister input_temp = ToDoubleRegister(instr->temp()); 3658 ExternalReference one_half = ExternalReference::address_of_one_half(); 3659 ExternalReference minus_one_half = 3660 ExternalReference::address_of_minus_one_half(); 3661 3662 Label done, round_to_zero, below_one_half, do_not_compensate; 3663 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 3664 3665 __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); 3666 __ ucomisd(xmm_scratch, input_reg); 3667 __ j(above, &below_one_half, Label::kNear); 3668 3669 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). 3670 __ addsd(xmm_scratch, input_reg); 3671 __ cvttsd2si(output_reg, Operand(xmm_scratch)); 3672 // Overflow is signalled with minint. 3673 __ cmp(output_reg, 0x1); 3674 __ RecordComment("D2I conversion overflow"); 3675 DeoptimizeIf(overflow, instr->environment()); 3676 __ jmp(&done, dist); 3677 3678 __ bind(&below_one_half); 3679 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); 3680 __ ucomisd(xmm_scratch, input_reg); 3681 __ j(below_equal, &round_to_zero, Label::kNear); 3682 3683 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then 3684 // compare and compensate. 3685 __ movaps(input_temp, input_reg); // Do not alter input_reg. 3686 __ subsd(input_temp, xmm_scratch); 3687 __ cvttsd2si(output_reg, Operand(input_temp)); 3688 // Catch minint due to overflow, and to prevent overflow when compensating. 3689 __ cmp(output_reg, 0x1); 3690 __ RecordComment("D2I conversion overflow"); 3691 DeoptimizeIf(overflow, instr->environment()); 3692 3693 __ Cvtsi2sd(xmm_scratch, output_reg); 3694 __ ucomisd(xmm_scratch, input_temp); 3695 __ j(equal, &done, dist); 3696 __ sub(output_reg, Immediate(1)); 3697 // No overflow because we already ruled out minint. 3698 __ jmp(&done, dist); 3699 3700 __ bind(&round_to_zero); 3701 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if 3702 // we can ignore the difference between a result of -0 and +0. 3703 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3704 // If the sign is positive, we return +0. 3705 __ movmskpd(output_reg, input_reg); 3706 __ test(output_reg, Immediate(1)); 3707 __ RecordComment("Minus zero"); 3708 DeoptimizeIf(not_zero, instr->environment()); 3709 } 3710 __ Move(output_reg, Immediate(0)); 3711 __ bind(&done); 3712 } 3713 3714 3715 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3716 Operand input = ToOperand(instr->value()); 3717 XMMRegister output = ToDoubleRegister(instr->result()); 3718 __ sqrtsd(output, input); 3719 } 3720 3721 3722 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3723 XMMRegister xmm_scratch = double_scratch0(); 3724 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3725 Register scratch = ToRegister(instr->temp()); 3726 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3727 3728 // Note that according to ECMA-262 15.8.2.13: 3729 // Math.pow(-Infinity, 0.5) == Infinity 3730 // Math.sqrt(-Infinity) == NaN 3731 Label done, sqrt; 3732 // Check base for -Infinity. According to IEEE-754, single-precision 3733 // -Infinity has the highest 9 bits set and the lowest 23 bits cleared. 3734 __ mov(scratch, 0xFF800000); 3735 __ movd(xmm_scratch, scratch); 3736 __ cvtss2sd(xmm_scratch, xmm_scratch); 3737 __ ucomisd(input_reg, xmm_scratch); 3738 // Comparing -Infinity with NaN results in "unordered", which sets the 3739 // zero flag as if both were equal. However, it also sets the carry flag. 3740 __ j(not_equal, &sqrt, Label::kNear); 3741 __ j(carry, &sqrt, Label::kNear); 3742 // If input is -Infinity, return Infinity. 3743 __ xorps(input_reg, input_reg); 3744 __ subsd(input_reg, xmm_scratch); 3745 __ jmp(&done, Label::kNear); 3746 3747 // Square root. 3748 __ bind(&sqrt); 3749 __ xorps(xmm_scratch, xmm_scratch); 3750 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. 3751 __ sqrtsd(input_reg, input_reg); 3752 __ bind(&done); 3753 } 3754 3755 3756 void LCodeGen::DoPower(LPower* instr) { 3757 Representation exponent_type = instr->hydrogen()->right()->representation(); 3758 // Having marked this as a call, we can use any registers. 3759 // Just make sure that the input/output registers are the expected ones. 3760 ASSERT(!instr->right()->IsDoubleRegister() || 3761 ToDoubleRegister(instr->right()).is(xmm1)); 3762 ASSERT(!instr->right()->IsRegister() || 3763 ToRegister(instr->right()).is(eax)); 3764 ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); 3765 ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); 3766 3767 if (exponent_type.IsSmi()) { 3768 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3769 __ CallStub(&stub); 3770 } else if (exponent_type.IsTagged()) { 3771 Label no_deopt; 3772 __ JumpIfSmi(eax, &no_deopt); 3773 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); 3774 DeoptimizeIf(not_equal, instr->environment()); 3775 __ bind(&no_deopt); 3776 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3777 __ CallStub(&stub); 3778 } else if (exponent_type.IsInteger32()) { 3779 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3780 __ CallStub(&stub); 3781 } else { 3782 ASSERT(exponent_type.IsDouble()); 3783 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3784 __ CallStub(&stub); 3785 } 3786 } 3787 3788 3789 void LCodeGen::DoMathLog(LMathLog* instr) { 3790 ASSERT(instr->value()->Equals(instr->result())); 3791 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3792 XMMRegister xmm_scratch = double_scratch0(); 3793 Label positive, done, zero; 3794 __ xorps(xmm_scratch, xmm_scratch); 3795 __ ucomisd(input_reg, xmm_scratch); 3796 __ j(above, &positive, Label::kNear); 3797 __ j(not_carry, &zero, Label::kNear); 3798 ExternalReference nan = 3799 ExternalReference::address_of_canonical_non_hole_nan(); 3800 __ movsd(input_reg, Operand::StaticVariable(nan)); 3801 __ jmp(&done, Label::kNear); 3802 __ bind(&zero); 3803 ExternalReference ninf = 3804 ExternalReference::address_of_negative_infinity(); 3805 __ movsd(input_reg, Operand::StaticVariable(ninf)); 3806 __ jmp(&done, Label::kNear); 3807 __ bind(&positive); 3808 __ fldln2(); 3809 __ sub(Operand(esp), Immediate(kDoubleSize)); 3810 __ movsd(Operand(esp, 0), input_reg); 3811 __ fld_d(Operand(esp, 0)); 3812 __ fyl2x(); 3813 __ fstp_d(Operand(esp, 0)); 3814 __ movsd(input_reg, Operand(esp, 0)); 3815 __ add(Operand(esp), Immediate(kDoubleSize)); 3816 __ bind(&done); 3817 } 3818 3819 3820 void LCodeGen::DoMathClz32(LMathClz32* instr) { 3821 Register input = ToRegister(instr->value()); 3822 Register result = ToRegister(instr->result()); 3823 Label not_zero_input; 3824 __ bsr(result, input); 3825 3826 __ j(not_zero, ¬_zero_input); 3827 __ Move(result, Immediate(63)); // 63^31 == 32 3828 3829 __ bind(¬_zero_input); 3830 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x. 3831 } 3832 3833 3834 void LCodeGen::DoMathExp(LMathExp* instr) { 3835 XMMRegister input = ToDoubleRegister(instr->value()); 3836 XMMRegister result = ToDoubleRegister(instr->result()); 3837 XMMRegister temp0 = double_scratch0(); 3838 Register temp1 = ToRegister(instr->temp1()); 3839 Register temp2 = ToRegister(instr->temp2()); 3840 3841 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); 3842 } 3843 3844 3845 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 3846 ASSERT(ToRegister(instr->context()).is(esi)); 3847 ASSERT(ToRegister(instr->function()).is(edi)); 3848 ASSERT(instr->HasPointerMap()); 3849 3850 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 3851 if (known_function.is_null()) { 3852 LPointerMap* pointers = instr->pointer_map(); 3853 SafepointGenerator generator( 3854 this, pointers, Safepoint::kLazyDeopt); 3855 ParameterCount count(instr->arity()); 3856 __ InvokeFunction(edi, count, CALL_FUNCTION, generator); 3857 } else { 3858 CallKnownFunction(known_function, 3859 instr->hydrogen()->formal_parameter_count(), 3860 instr->arity(), 3861 instr, 3862 EDI_CONTAINS_TARGET); 3863 } 3864 } 3865 3866 3867 void LCodeGen::DoCallFunction(LCallFunction* instr) { 3868 ASSERT(ToRegister(instr->context()).is(esi)); 3869 ASSERT(ToRegister(instr->function()).is(edi)); 3870 ASSERT(ToRegister(instr->result()).is(eax)); 3871 3872 int arity = instr->arity(); 3873 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); 3874 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3875 } 3876 3877 3878 void LCodeGen::DoCallNew(LCallNew* instr) { 3879 ASSERT(ToRegister(instr->context()).is(esi)); 3880 ASSERT(ToRegister(instr->constructor()).is(edi)); 3881 ASSERT(ToRegister(instr->result()).is(eax)); 3882 3883 // No cell in ebx for construct type feedback in optimized code 3884 __ mov(ebx, isolate()->factory()->undefined_value()); 3885 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); 3886 __ Move(eax, Immediate(instr->arity())); 3887 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 3888 } 3889 3890 3891 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 3892 ASSERT(ToRegister(instr->context()).is(esi)); 3893 ASSERT(ToRegister(instr->constructor()).is(edi)); 3894 ASSERT(ToRegister(instr->result()).is(eax)); 3895 3896 __ Move(eax, Immediate(instr->arity())); 3897 __ mov(ebx, isolate()->factory()->undefined_value()); 3898 ElementsKind kind = instr->hydrogen()->elements_kind(); 3899 AllocationSiteOverrideMode override_mode = 3900 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 3901 ? DISABLE_ALLOCATION_SITES 3902 : DONT_OVERRIDE; 3903 3904 if (instr->arity() == 0) { 3905 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 3906 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 3907 } else if (instr->arity() == 1) { 3908 Label done; 3909 if (IsFastPackedElementsKind(kind)) { 3910 Label packed_case; 3911 // We might need a change here 3912 // look at the first argument 3913 __ mov(ecx, Operand(esp, 0)); 3914 __ test(ecx, ecx); 3915 __ j(zero, &packed_case, Label::kNear); 3916 3917 ElementsKind holey_kind = GetHoleyElementsKind(kind); 3918 ArraySingleArgumentConstructorStub stub(isolate(), 3919 holey_kind, 3920 override_mode); 3921 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 3922 __ jmp(&done, Label::kNear); 3923 __ bind(&packed_case); 3924 } 3925 3926 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 3927 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 3928 __ bind(&done); 3929 } else { 3930 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); 3931 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 3932 } 3933 } 3934 3935 3936 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 3937 ASSERT(ToRegister(instr->context()).is(esi)); 3938 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); 3939 } 3940 3941 3942 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 3943 Register function = ToRegister(instr->function()); 3944 Register code_object = ToRegister(instr->code_object()); 3945 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); 3946 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); 3947 } 3948 3949 3950 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 3951 Register result = ToRegister(instr->result()); 3952 Register base = ToRegister(instr->base_object()); 3953 if (instr->offset()->IsConstantOperand()) { 3954 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 3955 __ lea(result, Operand(base, ToInteger32(offset))); 3956 } else { 3957 Register offset = ToRegister(instr->offset()); 3958 __ lea(result, Operand(base, offset, times_1, 0)); 3959 } 3960 } 3961 3962 3963 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 3964 Representation representation = instr->hydrogen()->field_representation(); 3965 3966 HObjectAccess access = instr->hydrogen()->access(); 3967 int offset = access.offset(); 3968 3969 if (access.IsExternalMemory()) { 3970 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 3971 MemOperand operand = instr->object()->IsConstantOperand() 3972 ? MemOperand::StaticVariable( 3973 ToExternalReference(LConstantOperand::cast(instr->object()))) 3974 : MemOperand(ToRegister(instr->object()), offset); 3975 if (instr->value()->IsConstantOperand()) { 3976 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 3977 __ mov(operand, Immediate(ToInteger32(operand_value))); 3978 } else { 3979 Register value = ToRegister(instr->value()); 3980 __ Store(value, operand, representation); 3981 } 3982 return; 3983 } 3984 3985 Register object = ToRegister(instr->object()); 3986 __ AssertNotSmi(object); 3987 3988 ASSERT(!representation.IsSmi() || 3989 !instr->value()->IsConstantOperand() || 3990 IsSmi(LConstantOperand::cast(instr->value()))); 3991 if (representation.IsDouble()) { 3992 ASSERT(access.IsInobject()); 3993 ASSERT(!instr->hydrogen()->has_transition()); 3994 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 3995 XMMRegister value = ToDoubleRegister(instr->value()); 3996 __ movsd(FieldOperand(object, offset), value); 3997 return; 3998 } 3999 4000 if (instr->hydrogen()->has_transition()) { 4001 Handle<Map> transition = instr->hydrogen()->transition_map(); 4002 AddDeprecationDependency(transition); 4003 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); 4004 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { 4005 Register temp = ToRegister(instr->temp()); 4006 Register temp_map = ToRegister(instr->temp_map()); 4007 // Update the write barrier for the map field. 4008 __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs); 4009 } 4010 } 4011 4012 // Do the store. 4013 Register write_register = object; 4014 if (!access.IsInobject()) { 4015 write_register = ToRegister(instr->temp()); 4016 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); 4017 } 4018 4019 MemOperand operand = FieldOperand(write_register, offset); 4020 if (instr->value()->IsConstantOperand()) { 4021 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 4022 if (operand_value->IsRegister()) { 4023 Register value = ToRegister(operand_value); 4024 __ Store(value, operand, representation); 4025 } else if (representation.IsInteger32()) { 4026 Immediate immediate = ToImmediate(operand_value, representation); 4027 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4028 __ mov(operand, immediate); 4029 } else { 4030 Handle<Object> handle_value = ToHandle(operand_value); 4031 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4032 __ mov(operand, handle_value); 4033 } 4034 } else { 4035 Register value = ToRegister(instr->value()); 4036 __ Store(value, operand, representation); 4037 } 4038 4039 if (instr->hydrogen()->NeedsWriteBarrier()) { 4040 Register value = ToRegister(instr->value()); 4041 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; 4042 // Update the write barrier for the object for in-object properties. 4043 __ RecordWriteField(write_register, 4044 offset, 4045 value, 4046 temp, 4047 kSaveFPRegs, 4048 EMIT_REMEMBERED_SET, 4049 instr->hydrogen()->SmiCheckForWriteBarrier(), 4050 instr->hydrogen()->PointersToHereCheckForValue()); 4051 } 4052 } 4053 4054 4055 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4056 ASSERT(ToRegister(instr->context()).is(esi)); 4057 ASSERT(ToRegister(instr->object()).is(edx)); 4058 ASSERT(ToRegister(instr->value()).is(eax)); 4059 4060 __ mov(ecx, instr->name()); 4061 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); 4062 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4063 } 4064 4065 4066 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4067 Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal; 4068 if (instr->index()->IsConstantOperand()) { 4069 __ cmp(ToOperand(instr->length()), 4070 ToImmediate(LConstantOperand::cast(instr->index()), 4071 instr->hydrogen()->length()->representation())); 4072 cc = CommuteCondition(cc); 4073 } else if (instr->length()->IsConstantOperand()) { 4074 __ cmp(ToOperand(instr->index()), 4075 ToImmediate(LConstantOperand::cast(instr->length()), 4076 instr->hydrogen()->index()->representation())); 4077 } else { 4078 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); 4079 } 4080 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 4081 Label done; 4082 __ j(NegateCondition(cc), &done, Label::kNear); 4083 __ int3(); 4084 __ bind(&done); 4085 } else { 4086 DeoptimizeIf(cc, instr->environment()); 4087 } 4088 } 4089 4090 4091 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4092 ElementsKind elements_kind = instr->elements_kind(); 4093 LOperand* key = instr->key(); 4094 if (!key->IsConstantOperand() && 4095 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), 4096 elements_kind)) { 4097 __ SmiUntag(ToRegister(key)); 4098 } 4099 Operand operand(BuildFastArrayOperand( 4100 instr->elements(), 4101 key, 4102 instr->hydrogen()->key()->representation(), 4103 elements_kind, 4104 instr->base_offset())); 4105 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4106 elements_kind == FLOAT32_ELEMENTS) { 4107 XMMRegister xmm_scratch = double_scratch0(); 4108 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); 4109 __ movss(operand, xmm_scratch); 4110 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 4111 elements_kind == FLOAT64_ELEMENTS) { 4112 __ movsd(operand, ToDoubleRegister(instr->value())); 4113 } else { 4114 Register value = ToRegister(instr->value()); 4115 switch (elements_kind) { 4116 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 4117 case EXTERNAL_UINT8_ELEMENTS: 4118 case EXTERNAL_INT8_ELEMENTS: 4119 case UINT8_ELEMENTS: 4120 case INT8_ELEMENTS: 4121 case UINT8_CLAMPED_ELEMENTS: 4122 __ mov_b(operand, value); 4123 break; 4124 case EXTERNAL_INT16_ELEMENTS: 4125 case EXTERNAL_UINT16_ELEMENTS: 4126 case UINT16_ELEMENTS: 4127 case INT16_ELEMENTS: 4128 __ mov_w(operand, value); 4129 break; 4130 case EXTERNAL_INT32_ELEMENTS: 4131 case EXTERNAL_UINT32_ELEMENTS: 4132 case UINT32_ELEMENTS: 4133 case INT32_ELEMENTS: 4134 __ mov(operand, value); 4135 break; 4136 case EXTERNAL_FLOAT32_ELEMENTS: 4137 case EXTERNAL_FLOAT64_ELEMENTS: 4138 case FLOAT32_ELEMENTS: 4139 case FLOAT64_ELEMENTS: 4140 case FAST_SMI_ELEMENTS: 4141 case FAST_ELEMENTS: 4142 case FAST_DOUBLE_ELEMENTS: 4143 case FAST_HOLEY_SMI_ELEMENTS: 4144 case FAST_HOLEY_ELEMENTS: 4145 case FAST_HOLEY_DOUBLE_ELEMENTS: 4146 case DICTIONARY_ELEMENTS: 4147 case SLOPPY_ARGUMENTS_ELEMENTS: 4148 UNREACHABLE(); 4149 break; 4150 } 4151 } 4152 } 4153 4154 4155 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4156 ExternalReference canonical_nan_reference = 4157 ExternalReference::address_of_canonical_non_hole_nan(); 4158 Operand double_store_operand = BuildFastArrayOperand( 4159 instr->elements(), 4160 instr->key(), 4161 instr->hydrogen()->key()->representation(), 4162 FAST_DOUBLE_ELEMENTS, 4163 instr->base_offset()); 4164 4165 XMMRegister value = ToDoubleRegister(instr->value()); 4166 4167 if (instr->NeedsCanonicalization()) { 4168 Label have_value; 4169 4170 __ ucomisd(value, value); 4171 __ j(parity_odd, &have_value, Label::kNear); // NaN. 4172 4173 __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); 4174 __ bind(&have_value); 4175 } 4176 4177 __ movsd(double_store_operand, value); 4178 } 4179 4180 4181 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4182 Register elements = ToRegister(instr->elements()); 4183 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 4184 4185 Operand operand = BuildFastArrayOperand( 4186 instr->elements(), 4187 instr->key(), 4188 instr->hydrogen()->key()->representation(), 4189 FAST_ELEMENTS, 4190 instr->base_offset()); 4191 if (instr->value()->IsRegister()) { 4192 __ mov(operand, ToRegister(instr->value())); 4193 } else { 4194 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 4195 if (IsSmi(operand_value)) { 4196 Immediate immediate = ToImmediate(operand_value, Representation::Smi()); 4197 __ mov(operand, immediate); 4198 } else { 4199 ASSERT(!IsInteger32(operand_value)); 4200 Handle<Object> handle_value = ToHandle(operand_value); 4201 __ mov(operand, handle_value); 4202 } 4203 } 4204 4205 if (instr->hydrogen()->NeedsWriteBarrier()) { 4206 ASSERT(instr->value()->IsRegister()); 4207 Register value = ToRegister(instr->value()); 4208 ASSERT(!instr->key()->IsConstantOperand()); 4209 SmiCheck check_needed = 4210 instr->hydrogen()->value()->type().IsHeapObject() 4211 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4212 // Compute address of modified element and store it into key register. 4213 __ lea(key, operand); 4214 __ RecordWrite(elements, 4215 key, 4216 value, 4217 kSaveFPRegs, 4218 EMIT_REMEMBERED_SET, 4219 check_needed, 4220 instr->hydrogen()->PointersToHereCheckForValue()); 4221 } 4222 } 4223 4224 4225 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4226 // By cases...external, fast-double, fast 4227 if (instr->is_typed_elements()) { 4228 DoStoreKeyedExternalArray(instr); 4229 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4230 DoStoreKeyedFixedDoubleArray(instr); 4231 } else { 4232 DoStoreKeyedFixedArray(instr); 4233 } 4234 } 4235 4236 4237 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4238 ASSERT(ToRegister(instr->context()).is(esi)); 4239 ASSERT(ToRegister(instr->object()).is(edx)); 4240 ASSERT(ToRegister(instr->key()).is(ecx)); 4241 ASSERT(ToRegister(instr->value()).is(eax)); 4242 4243 Handle<Code> ic = instr->strict_mode() == STRICT 4244 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4245 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4246 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4247 } 4248 4249 4250 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4251 Register object = ToRegister(instr->object()); 4252 Register temp = ToRegister(instr->temp()); 4253 Label no_memento_found; 4254 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); 4255 DeoptimizeIf(equal, instr->environment()); 4256 __ bind(&no_memento_found); 4257 } 4258 4259 4260 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4261 Register object_reg = ToRegister(instr->object()); 4262 4263 Handle<Map> from_map = instr->original_map(); 4264 Handle<Map> to_map = instr->transitioned_map(); 4265 ElementsKind from_kind = instr->from_kind(); 4266 ElementsKind to_kind = instr->to_kind(); 4267 4268 Label not_applicable; 4269 bool is_simple_map_transition = 4270 IsSimpleMapChangeTransition(from_kind, to_kind); 4271 Label::Distance branch_distance = 4272 is_simple_map_transition ? Label::kNear : Label::kFar; 4273 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); 4274 __ j(not_equal, ¬_applicable, branch_distance); 4275 if (is_simple_map_transition) { 4276 Register new_map_reg = ToRegister(instr->new_map_temp()); 4277 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), 4278 Immediate(to_map)); 4279 // Write barrier. 4280 ASSERT_NE(instr->temp(), NULL); 4281 __ RecordWriteForMap(object_reg, to_map, new_map_reg, 4282 ToRegister(instr->temp()), 4283 kDontSaveFPRegs); 4284 } else { 4285 ASSERT(ToRegister(instr->context()).is(esi)); 4286 ASSERT(object_reg.is(eax)); 4287 PushSafepointRegistersScope scope(this); 4288 __ mov(ebx, to_map); 4289 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; 4290 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); 4291 __ CallStub(&stub); 4292 RecordSafepointWithLazyDeopt(instr, 4293 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4294 } 4295 __ bind(¬_applicable); 4296 } 4297 4298 4299 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4300 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { 4301 public: 4302 DeferredStringCharCodeAt(LCodeGen* codegen, 4303 LStringCharCodeAt* instr) 4304 : LDeferredCode(codegen), instr_(instr) { } 4305 virtual void Generate() V8_OVERRIDE { 4306 codegen()->DoDeferredStringCharCodeAt(instr_); 4307 } 4308 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4309 private: 4310 LStringCharCodeAt* instr_; 4311 }; 4312 4313 DeferredStringCharCodeAt* deferred = 4314 new(zone()) DeferredStringCharCodeAt(this, instr); 4315 4316 StringCharLoadGenerator::Generate(masm(), 4317 factory(), 4318 ToRegister(instr->string()), 4319 ToRegister(instr->index()), 4320 ToRegister(instr->result()), 4321 deferred->entry()); 4322 __ bind(deferred->exit()); 4323 } 4324 4325 4326 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4327 Register string = ToRegister(instr->string()); 4328 Register result = ToRegister(instr->result()); 4329 4330 // TODO(3095996): Get rid of this. For now, we need to make the 4331 // result register contain a valid pointer because it is already 4332 // contained in the register pointer map. 4333 __ Move(result, Immediate(0)); 4334 4335 PushSafepointRegistersScope scope(this); 4336 __ push(string); 4337 // Push the index as a smi. This is safe because of the checks in 4338 // DoStringCharCodeAt above. 4339 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); 4340 if (instr->index()->IsConstantOperand()) { 4341 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()), 4342 Representation::Smi()); 4343 __ push(immediate); 4344 } else { 4345 Register index = ToRegister(instr->index()); 4346 __ SmiTag(index); 4347 __ push(index); 4348 } 4349 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, 4350 instr, instr->context()); 4351 __ AssertSmi(eax); 4352 __ SmiUntag(eax); 4353 __ StoreToSafepointRegisterSlot(result, eax); 4354 } 4355 4356 4357 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4358 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { 4359 public: 4360 DeferredStringCharFromCode(LCodeGen* codegen, 4361 LStringCharFromCode* instr) 4362 : LDeferredCode(codegen), instr_(instr) { } 4363 virtual void Generate() V8_OVERRIDE { 4364 codegen()->DoDeferredStringCharFromCode(instr_); 4365 } 4366 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4367 private: 4368 LStringCharFromCode* instr_; 4369 }; 4370 4371 DeferredStringCharFromCode* deferred = 4372 new(zone()) DeferredStringCharFromCode(this, instr); 4373 4374 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); 4375 Register char_code = ToRegister(instr->char_code()); 4376 Register result = ToRegister(instr->result()); 4377 ASSERT(!char_code.is(result)); 4378 4379 __ cmp(char_code, String::kMaxOneByteCharCode); 4380 __ j(above, deferred->entry()); 4381 __ Move(result, Immediate(factory()->single_character_string_cache())); 4382 __ mov(result, FieldOperand(result, 4383 char_code, times_pointer_size, 4384 FixedArray::kHeaderSize)); 4385 __ cmp(result, factory()->undefined_value()); 4386 __ j(equal, deferred->entry()); 4387 __ bind(deferred->exit()); 4388 } 4389 4390 4391 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4392 Register char_code = ToRegister(instr->char_code()); 4393 Register result = ToRegister(instr->result()); 4394 4395 // TODO(3095996): Get rid of this. For now, we need to make the 4396 // result register contain a valid pointer because it is already 4397 // contained in the register pointer map. 4398 __ Move(result, Immediate(0)); 4399 4400 PushSafepointRegistersScope scope(this); 4401 __ SmiTag(char_code); 4402 __ push(char_code); 4403 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); 4404 __ StoreToSafepointRegisterSlot(result, eax); 4405 } 4406 4407 4408 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4409 ASSERT(ToRegister(instr->context()).is(esi)); 4410 ASSERT(ToRegister(instr->left()).is(edx)); 4411 ASSERT(ToRegister(instr->right()).is(eax)); 4412 StringAddStub stub(isolate(), 4413 instr->hydrogen()->flags(), 4414 instr->hydrogen()->pretenure_flag()); 4415 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4416 } 4417 4418 4419 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4420 LOperand* input = instr->value(); 4421 LOperand* output = instr->result(); 4422 ASSERT(input->IsRegister() || input->IsStackSlot()); 4423 ASSERT(output->IsDoubleRegister()); 4424 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); 4425 } 4426 4427 4428 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4429 LOperand* input = instr->value(); 4430 LOperand* output = instr->result(); 4431 __ LoadUint32(ToDoubleRegister(output), ToRegister(input)); 4432 } 4433 4434 4435 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4436 class DeferredNumberTagI V8_FINAL : public LDeferredCode { 4437 public: 4438 DeferredNumberTagI(LCodeGen* codegen, 4439 LNumberTagI* instr) 4440 : LDeferredCode(codegen), instr_(instr) { } 4441 virtual void Generate() V8_OVERRIDE { 4442 codegen()->DoDeferredNumberTagIU( 4443 instr_, instr_->value(), instr_->temp(), SIGNED_INT32); 4444 } 4445 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4446 private: 4447 LNumberTagI* instr_; 4448 }; 4449 4450 LOperand* input = instr->value(); 4451 ASSERT(input->IsRegister() && input->Equals(instr->result())); 4452 Register reg = ToRegister(input); 4453 4454 DeferredNumberTagI* deferred = 4455 new(zone()) DeferredNumberTagI(this, instr); 4456 __ SmiTag(reg); 4457 __ j(overflow, deferred->entry()); 4458 __ bind(deferred->exit()); 4459 } 4460 4461 4462 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4463 class DeferredNumberTagU V8_FINAL : public LDeferredCode { 4464 public: 4465 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4466 : LDeferredCode(codegen), instr_(instr) { } 4467 virtual void Generate() V8_OVERRIDE { 4468 codegen()->DoDeferredNumberTagIU( 4469 instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32); 4470 } 4471 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4472 private: 4473 LNumberTagU* instr_; 4474 }; 4475 4476 LOperand* input = instr->value(); 4477 ASSERT(input->IsRegister() && input->Equals(instr->result())); 4478 Register reg = ToRegister(input); 4479 4480 DeferredNumberTagU* deferred = 4481 new(zone()) DeferredNumberTagU(this, instr); 4482 __ cmp(reg, Immediate(Smi::kMaxValue)); 4483 __ j(above, deferred->entry()); 4484 __ SmiTag(reg); 4485 __ bind(deferred->exit()); 4486 } 4487 4488 4489 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, 4490 LOperand* value, 4491 LOperand* temp, 4492 IntegerSignedness signedness) { 4493 Label done, slow; 4494 Register reg = ToRegister(value); 4495 Register tmp = ToRegister(temp); 4496 XMMRegister xmm_scratch = double_scratch0(); 4497 4498 if (signedness == SIGNED_INT32) { 4499 // There was overflow, so bits 30 and 31 of the original integer 4500 // disagree. Try to allocate a heap number in new space and store 4501 // the value in there. If that fails, call the runtime system. 4502 __ SmiUntag(reg); 4503 __ xor_(reg, 0x80000000); 4504 __ Cvtsi2sd(xmm_scratch, Operand(reg)); 4505 } else { 4506 __ LoadUint32(xmm_scratch, reg); 4507 } 4508 4509 if (FLAG_inline_new) { 4510 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); 4511 __ jmp(&done, Label::kNear); 4512 } 4513 4514 // Slow case: Call the runtime system to do the number allocation. 4515 __ bind(&slow); 4516 { 4517 // TODO(3095996): Put a valid pointer value in the stack slot where the 4518 // result register is stored, as this register is in the pointer map, but 4519 // contains an integer value. 4520 __ Move(reg, Immediate(0)); 4521 4522 // Preserve the value of all registers. 4523 PushSafepointRegistersScope scope(this); 4524 4525 // NumberTagI and NumberTagD use the context from the frame, rather than 4526 // the environment's HContext or HInlinedContext value. 4527 // They only call Runtime::kHiddenAllocateHeapNumber. 4528 // The corresponding HChange instructions are added in a phase that does 4529 // not have easy access to the local context. 4530 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 4531 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); 4532 RecordSafepointWithRegisters( 4533 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4534 __ StoreToSafepointRegisterSlot(reg, eax); 4535 } 4536 4537 // Done. Put the value in xmm_scratch into the value of the allocated heap 4538 // number. 4539 __ bind(&done); 4540 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); 4541 } 4542 4543 4544 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4545 class DeferredNumberTagD V8_FINAL : public LDeferredCode { 4546 public: 4547 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4548 : LDeferredCode(codegen), instr_(instr) { } 4549 virtual void Generate() V8_OVERRIDE { 4550 codegen()->DoDeferredNumberTagD(instr_); 4551 } 4552 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4553 private: 4554 LNumberTagD* instr_; 4555 }; 4556 4557 Register reg = ToRegister(instr->result()); 4558 4559 DeferredNumberTagD* deferred = 4560 new(zone()) DeferredNumberTagD(this, instr); 4561 if (FLAG_inline_new) { 4562 Register tmp = ToRegister(instr->temp()); 4563 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 4564 } else { 4565 __ jmp(deferred->entry()); 4566 } 4567 __ bind(deferred->exit()); 4568 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4569 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 4570 } 4571 4572 4573 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4574 // TODO(3095996): Get rid of this. For now, we need to make the 4575 // result register contain a valid pointer because it is already 4576 // contained in the register pointer map. 4577 Register reg = ToRegister(instr->result()); 4578 __ Move(reg, Immediate(0)); 4579 4580 PushSafepointRegistersScope scope(this); 4581 // NumberTagI and NumberTagD use the context from the frame, rather than 4582 // the environment's HContext or HInlinedContext value. 4583 // They only call Runtime::kHiddenAllocateHeapNumber. 4584 // The corresponding HChange instructions are added in a phase that does 4585 // not have easy access to the local context. 4586 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 4587 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); 4588 RecordSafepointWithRegisters( 4589 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4590 __ StoreToSafepointRegisterSlot(reg, eax); 4591 } 4592 4593 4594 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4595 HChange* hchange = instr->hydrogen(); 4596 Register input = ToRegister(instr->value()); 4597 if (hchange->CheckFlag(HValue::kCanOverflow) && 4598 hchange->value()->CheckFlag(HValue::kUint32)) { 4599 __ test(input, Immediate(0xc0000000)); 4600 DeoptimizeIf(not_zero, instr->environment()); 4601 } 4602 __ SmiTag(input); 4603 if (hchange->CheckFlag(HValue::kCanOverflow) && 4604 !hchange->value()->CheckFlag(HValue::kUint32)) { 4605 DeoptimizeIf(overflow, instr->environment()); 4606 } 4607 } 4608 4609 4610 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4611 LOperand* input = instr->value(); 4612 Register result = ToRegister(input); 4613 ASSERT(input->IsRegister() && input->Equals(instr->result())); 4614 if (instr->needs_check()) { 4615 __ test(result, Immediate(kSmiTagMask)); 4616 DeoptimizeIf(not_zero, instr->environment()); 4617 } else { 4618 __ AssertSmi(result); 4619 } 4620 __ SmiUntag(result); 4621 } 4622 4623 4624 void LCodeGen::EmitNumberUntagD(Register input_reg, 4625 Register temp_reg, 4626 XMMRegister result_reg, 4627 bool can_convert_undefined_to_nan, 4628 bool deoptimize_on_minus_zero, 4629 LEnvironment* env, 4630 NumberUntagDMode mode) { 4631 Label convert, load_smi, done; 4632 4633 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4634 // Smi check. 4635 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 4636 4637 // Heap number map check. 4638 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 4639 factory()->heap_number_map()); 4640 if (can_convert_undefined_to_nan) { 4641 __ j(not_equal, &convert, Label::kNear); 4642 } else { 4643 DeoptimizeIf(not_equal, env); 4644 } 4645 4646 // Heap number to XMM conversion. 4647 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); 4648 4649 if (deoptimize_on_minus_zero) { 4650 XMMRegister xmm_scratch = double_scratch0(); 4651 __ xorps(xmm_scratch, xmm_scratch); 4652 __ ucomisd(result_reg, xmm_scratch); 4653 __ j(not_zero, &done, Label::kNear); 4654 __ movmskpd(temp_reg, result_reg); 4655 __ test_b(temp_reg, 1); 4656 DeoptimizeIf(not_zero, env); 4657 } 4658 __ jmp(&done, Label::kNear); 4659 4660 if (can_convert_undefined_to_nan) { 4661 __ bind(&convert); 4662 4663 // Convert undefined (and hole) to NaN. 4664 __ cmp(input_reg, factory()->undefined_value()); 4665 DeoptimizeIf(not_equal, env); 4666 4667 ExternalReference nan = 4668 ExternalReference::address_of_canonical_non_hole_nan(); 4669 __ movsd(result_reg, Operand::StaticVariable(nan)); 4670 __ jmp(&done, Label::kNear); 4671 } 4672 } else { 4673 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 4674 } 4675 4676 __ bind(&load_smi); 4677 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the 4678 // input register since we avoid dependencies. 4679 __ mov(temp_reg, input_reg); 4680 __ SmiUntag(temp_reg); // Untag smi before converting to float. 4681 __ Cvtsi2sd(result_reg, Operand(temp_reg)); 4682 __ bind(&done); 4683 } 4684 4685 4686 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { 4687 Register input_reg = ToRegister(instr->value()); 4688 4689 // The input was optimistically untagged; revert it. 4690 STATIC_ASSERT(kSmiTagSize == 1); 4691 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); 4692 4693 if (instr->truncating()) { 4694 Label no_heap_number, check_bools, check_false; 4695 4696 // Heap number map check. 4697 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 4698 factory()->heap_number_map()); 4699 __ j(not_equal, &no_heap_number, Label::kNear); 4700 __ TruncateHeapNumberToI(input_reg, input_reg); 4701 __ jmp(done); 4702 4703 __ bind(&no_heap_number); 4704 // Check for Oddballs. Undefined/False is converted to zero and True to one 4705 // for truncating conversions. 4706 __ cmp(input_reg, factory()->undefined_value()); 4707 __ j(not_equal, &check_bools, Label::kNear); 4708 __ Move(input_reg, Immediate(0)); 4709 __ jmp(done); 4710 4711 __ bind(&check_bools); 4712 __ cmp(input_reg, factory()->true_value()); 4713 __ j(not_equal, &check_false, Label::kNear); 4714 __ Move(input_reg, Immediate(1)); 4715 __ jmp(done); 4716 4717 __ bind(&check_false); 4718 __ cmp(input_reg, factory()->false_value()); 4719 __ RecordComment("Deferred TaggedToI: cannot truncate"); 4720 DeoptimizeIf(not_equal, instr->environment()); 4721 __ Move(input_reg, Immediate(0)); 4722 } else { 4723 Label bailout; 4724 XMMRegister scratch = (instr->temp() != NULL) 4725 ? ToDoubleRegister(instr->temp()) 4726 : no_xmm_reg; 4727 __ TaggedToI(input_reg, input_reg, scratch, 4728 instr->hydrogen()->GetMinusZeroMode(), &bailout); 4729 __ jmp(done); 4730 __ bind(&bailout); 4731 DeoptimizeIf(no_condition, instr->environment()); 4732 } 4733 } 4734 4735 4736 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4737 class DeferredTaggedToI V8_FINAL : public LDeferredCode { 4738 public: 4739 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4740 : LDeferredCode(codegen), instr_(instr) { } 4741 virtual void Generate() V8_OVERRIDE { 4742 codegen()->DoDeferredTaggedToI(instr_, done()); 4743 } 4744 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4745 private: 4746 LTaggedToI* instr_; 4747 }; 4748 4749 LOperand* input = instr->value(); 4750 ASSERT(input->IsRegister()); 4751 Register input_reg = ToRegister(input); 4752 ASSERT(input_reg.is(ToRegister(instr->result()))); 4753 4754 if (instr->hydrogen()->value()->representation().IsSmi()) { 4755 __ SmiUntag(input_reg); 4756 } else { 4757 DeferredTaggedToI* deferred = 4758 new(zone()) DeferredTaggedToI(this, instr); 4759 // Optimistically untag the input. 4760 // If the input is a HeapObject, SmiUntag will set the carry flag. 4761 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 4762 __ SmiUntag(input_reg); 4763 // Branch to deferred code if the input was tagged. 4764 // The deferred code will take care of restoring the tag. 4765 __ j(carry, deferred->entry()); 4766 __ bind(deferred->exit()); 4767 } 4768 } 4769 4770 4771 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4772 LOperand* input = instr->value(); 4773 ASSERT(input->IsRegister()); 4774 LOperand* temp = instr->temp(); 4775 ASSERT(temp->IsRegister()); 4776 LOperand* result = instr->result(); 4777 ASSERT(result->IsDoubleRegister()); 4778 4779 Register input_reg = ToRegister(input); 4780 bool deoptimize_on_minus_zero = 4781 instr->hydrogen()->deoptimize_on_minus_zero(); 4782 Register temp_reg = ToRegister(temp); 4783 4784 HValue* value = instr->hydrogen()->value(); 4785 NumberUntagDMode mode = value->representation().IsSmi() 4786 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 4787 4788 XMMRegister result_reg = ToDoubleRegister(result); 4789 EmitNumberUntagD(input_reg, 4790 temp_reg, 4791 result_reg, 4792 instr->hydrogen()->can_convert_undefined_to_nan(), 4793 deoptimize_on_minus_zero, 4794 instr->environment(), 4795 mode); 4796 } 4797 4798 4799 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4800 LOperand* input = instr->value(); 4801 ASSERT(input->IsDoubleRegister()); 4802 LOperand* result = instr->result(); 4803 ASSERT(result->IsRegister()); 4804 Register result_reg = ToRegister(result); 4805 4806 if (instr->truncating()) { 4807 XMMRegister input_reg = ToDoubleRegister(input); 4808 __ TruncateDoubleToI(result_reg, input_reg); 4809 } else { 4810 Label bailout, done; 4811 XMMRegister input_reg = ToDoubleRegister(input); 4812 XMMRegister xmm_scratch = double_scratch0(); 4813 __ DoubleToI(result_reg, input_reg, xmm_scratch, 4814 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); 4815 __ jmp(&done, Label::kNear); 4816 __ bind(&bailout); 4817 DeoptimizeIf(no_condition, instr->environment()); 4818 __ bind(&done); 4819 } 4820 } 4821 4822 4823 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 4824 LOperand* input = instr->value(); 4825 ASSERT(input->IsDoubleRegister()); 4826 LOperand* result = instr->result(); 4827 ASSERT(result->IsRegister()); 4828 Register result_reg = ToRegister(result); 4829 4830 Label bailout, done; 4831 XMMRegister input_reg = ToDoubleRegister(input); 4832 XMMRegister xmm_scratch = double_scratch0(); 4833 __ DoubleToI(result_reg, input_reg, xmm_scratch, 4834 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); 4835 __ jmp(&done, Label::kNear); 4836 __ bind(&bailout); 4837 DeoptimizeIf(no_condition, instr->environment()); 4838 __ bind(&done); 4839 4840 __ SmiTag(result_reg); 4841 DeoptimizeIf(overflow, instr->environment()); 4842 } 4843 4844 4845 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 4846 LOperand* input = instr->value(); 4847 __ test(ToOperand(input), Immediate(kSmiTagMask)); 4848 DeoptimizeIf(not_zero, instr->environment()); 4849 } 4850 4851 4852 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 4853 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4854 LOperand* input = instr->value(); 4855 __ test(ToOperand(input), Immediate(kSmiTagMask)); 4856 DeoptimizeIf(zero, instr->environment()); 4857 } 4858 } 4859 4860 4861 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 4862 Register input = ToRegister(instr->value()); 4863 Register temp = ToRegister(instr->temp()); 4864 4865 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); 4866 4867 if (instr->hydrogen()->is_interval_check()) { 4868 InstanceType first; 4869 InstanceType last; 4870 instr->hydrogen()->GetCheckInterval(&first, &last); 4871 4872 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), 4873 static_cast<int8_t>(first)); 4874 4875 // If there is only one type in the interval check for equality. 4876 if (first == last) { 4877 DeoptimizeIf(not_equal, instr->environment()); 4878 } else { 4879 DeoptimizeIf(below, instr->environment()); 4880 // Omit check for the last type. 4881 if (last != LAST_TYPE) { 4882 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), 4883 static_cast<int8_t>(last)); 4884 DeoptimizeIf(above, instr->environment()); 4885 } 4886 } 4887 } else { 4888 uint8_t mask; 4889 uint8_t tag; 4890 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 4891 4892 if (IsPowerOf2(mask)) { 4893 ASSERT(tag == 0 || IsPowerOf2(tag)); 4894 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask); 4895 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment()); 4896 } else { 4897 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); 4898 __ and_(temp, mask); 4899 __ cmp(temp, tag); 4900 DeoptimizeIf(not_equal, instr->environment()); 4901 } 4902 } 4903 } 4904 4905 4906 void LCodeGen::DoCheckValue(LCheckValue* instr) { 4907 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 4908 if (instr->hydrogen()->object_in_new_space()) { 4909 Register reg = ToRegister(instr->value()); 4910 Handle<Cell> cell = isolate()->factory()->NewCell(object); 4911 __ cmp(reg, Operand::ForCell(cell)); 4912 } else { 4913 Operand operand = ToOperand(instr->value()); 4914 __ cmp(operand, object); 4915 } 4916 DeoptimizeIf(not_equal, instr->environment()); 4917 } 4918 4919 4920 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 4921 { 4922 PushSafepointRegistersScope scope(this); 4923 __ push(object); 4924 __ xor_(esi, esi); 4925 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 4926 RecordSafepointWithRegisters( 4927 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 4928 4929 __ test(eax, Immediate(kSmiTagMask)); 4930 } 4931 DeoptimizeIf(zero, instr->environment()); 4932 } 4933 4934 4935 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 4936 class DeferredCheckMaps V8_FINAL : public LDeferredCode { 4937 public: 4938 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 4939 : LDeferredCode(codegen), instr_(instr), object_(object) { 4940 SetExit(check_maps()); 4941 } 4942 virtual void Generate() V8_OVERRIDE { 4943 codegen()->DoDeferredInstanceMigration(instr_, object_); 4944 } 4945 Label* check_maps() { return &check_maps_; } 4946 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4947 private: 4948 LCheckMaps* instr_; 4949 Label check_maps_; 4950 Register object_; 4951 }; 4952 4953 if (instr->hydrogen()->IsStabilityCheck()) { 4954 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 4955 for (int i = 0; i < maps->size(); ++i) { 4956 AddStabilityDependency(maps->at(i).handle()); 4957 } 4958 return; 4959 } 4960 4961 LOperand* input = instr->value(); 4962 ASSERT(input->IsRegister()); 4963 Register reg = ToRegister(input); 4964 4965 DeferredCheckMaps* deferred = NULL; 4966 if (instr->hydrogen()->HasMigrationTarget()) { 4967 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); 4968 __ bind(deferred->check_maps()); 4969 } 4970 4971 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 4972 Label success; 4973 for (int i = 0; i < maps->size() - 1; i++) { 4974 Handle<Map> map = maps->at(i).handle(); 4975 __ CompareMap(reg, map); 4976 __ j(equal, &success, Label::kNear); 4977 } 4978 4979 Handle<Map> map = maps->at(maps->size() - 1).handle(); 4980 __ CompareMap(reg, map); 4981 if (instr->hydrogen()->HasMigrationTarget()) { 4982 __ j(not_equal, deferred->entry()); 4983 } else { 4984 DeoptimizeIf(not_equal, instr->environment()); 4985 } 4986 4987 __ bind(&success); 4988 } 4989 4990 4991 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 4992 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); 4993 XMMRegister xmm_scratch = double_scratch0(); 4994 Register result_reg = ToRegister(instr->result()); 4995 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); 4996 } 4997 4998 4999 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5000 ASSERT(instr->unclamped()->Equals(instr->result())); 5001 Register value_reg = ToRegister(instr->result()); 5002 __ ClampUint8(value_reg); 5003 } 5004 5005 5006 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5007 ASSERT(instr->unclamped()->Equals(instr->result())); 5008 Register input_reg = ToRegister(instr->unclamped()); 5009 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); 5010 XMMRegister xmm_scratch = double_scratch0(); 5011 Label is_smi, done, heap_number; 5012 5013 __ JumpIfSmi(input_reg, &is_smi); 5014 5015 // Check for heap number 5016 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5017 factory()->heap_number_map()); 5018 __ j(equal, &heap_number, Label::kNear); 5019 5020 // Check for undefined. Undefined is converted to zero for clamping 5021 // conversions. 5022 __ cmp(input_reg, factory()->undefined_value()); 5023 DeoptimizeIf(not_equal, instr->environment()); 5024 __ mov(input_reg, 0); 5025 __ jmp(&done, Label::kNear); 5026 5027 // Heap number 5028 __ bind(&heap_number); 5029 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); 5030 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); 5031 __ jmp(&done, Label::kNear); 5032 5033 // smi 5034 __ bind(&is_smi); 5035 __ SmiUntag(input_reg); 5036 __ ClampUint8(input_reg); 5037 __ bind(&done); 5038 } 5039 5040 5041 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { 5042 XMMRegister value_reg = ToDoubleRegister(instr->value()); 5043 Register result_reg = ToRegister(instr->result()); 5044 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { 5045 if (CpuFeatures::IsSupported(SSE4_1)) { 5046 CpuFeatureScope scope2(masm(), SSE4_1); 5047 __ pextrd(result_reg, value_reg, 1); 5048 } else { 5049 XMMRegister xmm_scratch = double_scratch0(); 5050 __ pshufd(xmm_scratch, value_reg, 1); 5051 __ movd(result_reg, xmm_scratch); 5052 } 5053 } else { 5054 __ movd(result_reg, value_reg); 5055 } 5056 } 5057 5058 5059 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { 5060 Register hi_reg = ToRegister(instr->hi()); 5061 Register lo_reg = ToRegister(instr->lo()); 5062 XMMRegister result_reg = ToDoubleRegister(instr->result()); 5063 5064 if (CpuFeatures::IsSupported(SSE4_1)) { 5065 CpuFeatureScope scope2(masm(), SSE4_1); 5066 __ movd(result_reg, lo_reg); 5067 __ pinsrd(result_reg, hi_reg, 1); 5068 } else { 5069 XMMRegister xmm_scratch = double_scratch0(); 5070 __ movd(result_reg, hi_reg); 5071 __ psllq(result_reg, 32); 5072 __ movd(xmm_scratch, lo_reg); 5073 __ orps(result_reg, xmm_scratch); 5074 } 5075 } 5076 5077 5078 void LCodeGen::DoAllocate(LAllocate* instr) { 5079 class DeferredAllocate V8_FINAL : public LDeferredCode { 5080 public: 5081 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5082 : LDeferredCode(codegen), instr_(instr) { } 5083 virtual void Generate() V8_OVERRIDE { 5084 codegen()->DoDeferredAllocate(instr_); 5085 } 5086 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5087 private: 5088 LAllocate* instr_; 5089 }; 5090 5091 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); 5092 5093 Register result = ToRegister(instr->result()); 5094 Register temp = ToRegister(instr->temp()); 5095 5096 // Allocate memory for the object. 5097 AllocationFlags flags = TAG_OBJECT; 5098 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5099 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5100 } 5101 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5102 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5103 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5104 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); 5105 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5106 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5107 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); 5108 } 5109 5110 if (instr->size()->IsConstantOperand()) { 5111 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5112 if (size <= Page::kMaxRegularHeapObjectSize) { 5113 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); 5114 } else { 5115 __ jmp(deferred->entry()); 5116 } 5117 } else { 5118 Register size = ToRegister(instr->size()); 5119 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); 5120 } 5121 5122 __ bind(deferred->exit()); 5123 5124 if (instr->hydrogen()->MustPrefillWithFiller()) { 5125 if (instr->size()->IsConstantOperand()) { 5126 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5127 __ mov(temp, (size / kPointerSize) - 1); 5128 } else { 5129 temp = ToRegister(instr->size()); 5130 __ shr(temp, kPointerSizeLog2); 5131 __ dec(temp); 5132 } 5133 Label loop; 5134 __ bind(&loop); 5135 __ mov(FieldOperand(result, temp, times_pointer_size, 0), 5136 isolate()->factory()->one_pointer_filler_map()); 5137 __ dec(temp); 5138 __ j(not_zero, &loop); 5139 } 5140 } 5141 5142 5143 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5144 Register result = ToRegister(instr->result()); 5145 5146 // TODO(3095996): Get rid of this. For now, we need to make the 5147 // result register contain a valid pointer because it is already 5148 // contained in the register pointer map. 5149 __ Move(result, Immediate(Smi::FromInt(0))); 5150 5151 PushSafepointRegistersScope scope(this); 5152 if (instr->size()->IsRegister()) { 5153 Register size = ToRegister(instr->size()); 5154 ASSERT(!size.is(result)); 5155 __ SmiTag(ToRegister(instr->size())); 5156 __ push(size); 5157 } else { 5158 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5159 if (size >= 0 && size <= Smi::kMaxValue) { 5160 __ push(Immediate(Smi::FromInt(size))); 5161 } else { 5162 // We should never get here at runtime => abort 5163 __ int3(); 5164 return; 5165 } 5166 } 5167 5168 int flags = AllocateDoubleAlignFlag::encode( 5169 instr->hydrogen()->MustAllocateDoubleAligned()); 5170 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5171 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5172 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5173 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); 5174 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5175 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5176 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); 5177 } else { 5178 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5179 } 5180 __ push(Immediate(Smi::FromInt(flags))); 5181 5182 CallRuntimeFromDeferred( 5183 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context()); 5184 __ StoreToSafepointRegisterSlot(result, eax); 5185 } 5186 5187 5188 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5189 ASSERT(ToRegister(instr->value()).is(eax)); 5190 __ push(eax); 5191 CallRuntime(Runtime::kToFastProperties, 1, instr); 5192 } 5193 5194 5195 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 5196 ASSERT(ToRegister(instr->context()).is(esi)); 5197 Label materialized; 5198 // Registers will be used as follows: 5199 // ecx = literals array. 5200 // ebx = regexp literal. 5201 // eax = regexp literal clone. 5202 // esi = context. 5203 int literal_offset = 5204 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); 5205 __ LoadHeapObject(ecx, instr->hydrogen()->literals()); 5206 __ mov(ebx, FieldOperand(ecx, literal_offset)); 5207 __ cmp(ebx, factory()->undefined_value()); 5208 __ j(not_equal, &materialized, Label::kNear); 5209 5210 // Create regexp literal using runtime function 5211 // Result will be in eax. 5212 __ push(ecx); 5213 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); 5214 __ push(Immediate(instr->hydrogen()->pattern())); 5215 __ push(Immediate(instr->hydrogen()->flags())); 5216 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr); 5217 __ mov(ebx, eax); 5218 5219 __ bind(&materialized); 5220 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 5221 Label allocated, runtime_allocate; 5222 __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); 5223 __ jmp(&allocated, Label::kNear); 5224 5225 __ bind(&runtime_allocate); 5226 __ push(ebx); 5227 __ push(Immediate(Smi::FromInt(size))); 5228 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr); 5229 __ pop(ebx); 5230 5231 __ bind(&allocated); 5232 // Copy the content into the newly allocated memory. 5233 // (Unroll copy loop once for better throughput). 5234 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { 5235 __ mov(edx, FieldOperand(ebx, i)); 5236 __ mov(ecx, FieldOperand(ebx, i + kPointerSize)); 5237 __ mov(FieldOperand(eax, i), edx); 5238 __ mov(FieldOperand(eax, i + kPointerSize), ecx); 5239 } 5240 if ((size % (2 * kPointerSize)) != 0) { 5241 __ mov(edx, FieldOperand(ebx, size - kPointerSize)); 5242 __ mov(FieldOperand(eax, size - kPointerSize), edx); 5243 } 5244 } 5245 5246 5247 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5248 ASSERT(ToRegister(instr->context()).is(esi)); 5249 // Use the fast case closure allocation code that allocates in new 5250 // space for nested functions that don't need literals cloning. 5251 bool pretenure = instr->hydrogen()->pretenure(); 5252 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5253 FastNewClosureStub stub(isolate(), 5254 instr->hydrogen()->strict_mode(), 5255 instr->hydrogen()->is_generator()); 5256 __ mov(ebx, Immediate(instr->hydrogen()->shared_info())); 5257 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5258 } else { 5259 __ push(esi); 5260 __ push(Immediate(instr->hydrogen()->shared_info())); 5261 __ push(Immediate(pretenure ? factory()->true_value() 5262 : factory()->false_value())); 5263 CallRuntime(Runtime::kHiddenNewClosure, 3, instr); 5264 } 5265 } 5266 5267 5268 void LCodeGen::DoTypeof(LTypeof* instr) { 5269 ASSERT(ToRegister(instr->context()).is(esi)); 5270 LOperand* input = instr->value(); 5271 EmitPushTaggedOperand(input); 5272 CallRuntime(Runtime::kTypeof, 1, instr); 5273 } 5274 5275 5276 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5277 Register input = ToRegister(instr->value()); 5278 Condition final_branch_condition = EmitTypeofIs(instr, input); 5279 if (final_branch_condition != no_condition) { 5280 EmitBranch(instr, final_branch_condition); 5281 } 5282 } 5283 5284 5285 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { 5286 Label* true_label = instr->TrueLabel(chunk_); 5287 Label* false_label = instr->FalseLabel(chunk_); 5288 Handle<String> type_name = instr->type_literal(); 5289 int left_block = instr->TrueDestination(chunk_); 5290 int right_block = instr->FalseDestination(chunk_); 5291 int next_block = GetNextEmittedBlock(); 5292 5293 Label::Distance true_distance = left_block == next_block ? Label::kNear 5294 : Label::kFar; 5295 Label::Distance false_distance = right_block == next_block ? Label::kNear 5296 : Label::kFar; 5297 Condition final_branch_condition = no_condition; 5298 if (String::Equals(type_name, factory()->number_string())) { 5299 __ JumpIfSmi(input, true_label, true_distance); 5300 __ cmp(FieldOperand(input, HeapObject::kMapOffset), 5301 factory()->heap_number_map()); 5302 final_branch_condition = equal; 5303 5304 } else if (String::Equals(type_name, factory()->string_string())) { 5305 __ JumpIfSmi(input, false_label, false_distance); 5306 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); 5307 __ j(above_equal, false_label, false_distance); 5308 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 5309 1 << Map::kIsUndetectable); 5310 final_branch_condition = zero; 5311 5312 } else if (String::Equals(type_name, factory()->symbol_string())) { 5313 __ JumpIfSmi(input, false_label, false_distance); 5314 __ CmpObjectType(input, SYMBOL_TYPE, input); 5315 final_branch_condition = equal; 5316 5317 } else if (String::Equals(type_name, factory()->boolean_string())) { 5318 __ cmp(input, factory()->true_value()); 5319 __ j(equal, true_label, true_distance); 5320 __ cmp(input, factory()->false_value()); 5321 final_branch_condition = equal; 5322 5323 } else if (FLAG_harmony_typeof && 5324 String::Equals(type_name, factory()->null_string())) { 5325 __ cmp(input, factory()->null_value()); 5326 final_branch_condition = equal; 5327 5328 } else if (String::Equals(type_name, factory()->undefined_string())) { 5329 __ cmp(input, factory()->undefined_value()); 5330 __ j(equal, true_label, true_distance); 5331 __ JumpIfSmi(input, false_label, false_distance); 5332 // Check for undetectable objects => true. 5333 __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); 5334 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 5335 1 << Map::kIsUndetectable); 5336 final_branch_condition = not_zero; 5337 5338 } else if (String::Equals(type_name, factory()->function_string())) { 5339 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 5340 __ JumpIfSmi(input, false_label, false_distance); 5341 __ CmpObjectType(input, JS_FUNCTION_TYPE, input); 5342 __ j(equal, true_label, true_distance); 5343 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); 5344 final_branch_condition = equal; 5345 5346 } else if (String::Equals(type_name, factory()->object_string())) { 5347 __ JumpIfSmi(input, false_label, false_distance); 5348 if (!FLAG_harmony_typeof) { 5349 __ cmp(input, factory()->null_value()); 5350 __ j(equal, true_label, true_distance); 5351 } 5352 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); 5353 __ j(below, false_label, false_distance); 5354 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); 5355 __ j(above, false_label, false_distance); 5356 // Check for undetectable objects => false. 5357 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 5358 1 << Map::kIsUndetectable); 5359 final_branch_condition = zero; 5360 5361 } else { 5362 __ jmp(false_label, false_distance); 5363 } 5364 return final_branch_condition; 5365 } 5366 5367 5368 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { 5369 Register temp = ToRegister(instr->temp()); 5370 5371 EmitIsConstructCall(temp); 5372 EmitBranch(instr, equal); 5373 } 5374 5375 5376 void LCodeGen::EmitIsConstructCall(Register temp) { 5377 // Get the frame pointer for the calling frame. 5378 __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 5379 5380 // Skip the arguments adaptor frame if it exists. 5381 Label check_frame_marker; 5382 __ cmp(Operand(temp, StandardFrameConstants::kContextOffset), 5383 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 5384 __ j(not_equal, &check_frame_marker, Label::kNear); 5385 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); 5386 5387 // Check the marker in the calling frame. 5388 __ bind(&check_frame_marker); 5389 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), 5390 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); 5391 } 5392 5393 5394 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5395 if (!info()->IsStub()) { 5396 // Ensure that we have enough space after the previous lazy-bailout 5397 // instruction for patching the code here. 5398 int current_pc = masm()->pc_offset(); 5399 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5400 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5401 __ Nop(padding_size); 5402 } 5403 } 5404 last_lazy_deopt_pc_ = masm()->pc_offset(); 5405 } 5406 5407 5408 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5409 last_lazy_deopt_pc_ = masm()->pc_offset(); 5410 ASSERT(instr->HasEnvironment()); 5411 LEnvironment* env = instr->environment(); 5412 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5413 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5414 } 5415 5416 5417 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5418 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5419 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5420 // needed return address), even though the implementation of LAZY and EAGER is 5421 // now identical. When LAZY is eventually completely folded into EAGER, remove 5422 // the special case below. 5423 if (info()->IsStub() && type == Deoptimizer::EAGER) { 5424 type = Deoptimizer::LAZY; 5425 } 5426 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); 5427 DeoptimizeIf(no_condition, instr->environment(), type); 5428 } 5429 5430 5431 void LCodeGen::DoDummy(LDummy* instr) { 5432 // Nothing to see here, move on! 5433 } 5434 5435 5436 void LCodeGen::DoDummyUse(LDummyUse* instr) { 5437 // Nothing to see here, move on! 5438 } 5439 5440 5441 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5442 PushSafepointRegistersScope scope(this); 5443 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 5444 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard); 5445 RecordSafepointWithLazyDeopt( 5446 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5447 ASSERT(instr->HasEnvironment()); 5448 LEnvironment* env = instr->environment(); 5449 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5450 } 5451 5452 5453 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5454 class DeferredStackCheck V8_FINAL : public LDeferredCode { 5455 public: 5456 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5457 : LDeferredCode(codegen), instr_(instr) { } 5458 virtual void Generate() V8_OVERRIDE { 5459 codegen()->DoDeferredStackCheck(instr_); 5460 } 5461 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5462 private: 5463 LStackCheck* instr_; 5464 }; 5465 5466 ASSERT(instr->HasEnvironment()); 5467 LEnvironment* env = instr->environment(); 5468 // There is no LLazyBailout instruction for stack-checks. We have to 5469 // prepare for lazy deoptimization explicitly here. 5470 if (instr->hydrogen()->is_function_entry()) { 5471 // Perform stack overflow check. 5472 Label done; 5473 ExternalReference stack_limit = 5474 ExternalReference::address_of_stack_limit(isolate()); 5475 __ cmp(esp, Operand::StaticVariable(stack_limit)); 5476 __ j(above_equal, &done, Label::kNear); 5477 5478 ASSERT(instr->context()->IsRegister()); 5479 ASSERT(ToRegister(instr->context()).is(esi)); 5480 CallCode(isolate()->builtins()->StackCheck(), 5481 RelocInfo::CODE_TARGET, 5482 instr); 5483 __ bind(&done); 5484 } else { 5485 ASSERT(instr->hydrogen()->is_backwards_branch()); 5486 // Perform stack overflow check if this goto needs it before jumping. 5487 DeferredStackCheck* deferred_stack_check = 5488 new(zone()) DeferredStackCheck(this, instr); 5489 ExternalReference stack_limit = 5490 ExternalReference::address_of_stack_limit(isolate()); 5491 __ cmp(esp, Operand::StaticVariable(stack_limit)); 5492 __ j(below, deferred_stack_check->entry()); 5493 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5494 __ bind(instr->done_label()); 5495 deferred_stack_check->SetExit(instr->done_label()); 5496 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5497 // Don't record a deoptimization index for the safepoint here. 5498 // This will be done explicitly when emitting call and the safepoint in 5499 // the deferred code. 5500 } 5501 } 5502 5503 5504 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5505 // This is a pseudo-instruction that ensures that the environment here is 5506 // properly registered for deoptimization and records the assembler's PC 5507 // offset. 5508 LEnvironment* environment = instr->environment(); 5509 5510 // If the environment were already registered, we would have no way of 5511 // backpatching it with the spill slot operands. 5512 ASSERT(!environment->HasBeenRegistered()); 5513 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5514 5515 GenerateOsrPrologue(); 5516 } 5517 5518 5519 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5520 ASSERT(ToRegister(instr->context()).is(esi)); 5521 __ cmp(eax, isolate()->factory()->undefined_value()); 5522 DeoptimizeIf(equal, instr->environment()); 5523 5524 __ cmp(eax, isolate()->factory()->null_value()); 5525 DeoptimizeIf(equal, instr->environment()); 5526 5527 __ test(eax, Immediate(kSmiTagMask)); 5528 DeoptimizeIf(zero, instr->environment()); 5529 5530 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 5531 __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); 5532 DeoptimizeIf(below_equal, instr->environment()); 5533 5534 Label use_cache, call_runtime; 5535 __ CheckEnumCache(&call_runtime); 5536 5537 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); 5538 __ jmp(&use_cache, Label::kNear); 5539 5540 // Get the set of properties to enumerate. 5541 __ bind(&call_runtime); 5542 __ push(eax); 5543 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 5544 5545 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), 5546 isolate()->factory()->meta_map()); 5547 DeoptimizeIf(not_equal, instr->environment()); 5548 __ bind(&use_cache); 5549 } 5550 5551 5552 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 5553 Register map = ToRegister(instr->map()); 5554 Register result = ToRegister(instr->result()); 5555 Label load_cache, done; 5556 __ EnumLength(result, map); 5557 __ cmp(result, Immediate(Smi::FromInt(0))); 5558 __ j(not_equal, &load_cache, Label::kNear); 5559 __ mov(result, isolate()->factory()->empty_fixed_array()); 5560 __ jmp(&done, Label::kNear); 5561 5562 __ bind(&load_cache); 5563 __ LoadInstanceDescriptors(map, result); 5564 __ mov(result, 5565 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); 5566 __ mov(result, 5567 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); 5568 __ bind(&done); 5569 __ test(result, result); 5570 DeoptimizeIf(equal, instr->environment()); 5571 } 5572 5573 5574 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5575 Register object = ToRegister(instr->value()); 5576 __ cmp(ToRegister(instr->map()), 5577 FieldOperand(object, HeapObject::kMapOffset)); 5578 DeoptimizeIf(not_equal, instr->environment()); 5579 } 5580 5581 5582 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5583 Register object, 5584 Register index) { 5585 PushSafepointRegistersScope scope(this); 5586 __ push(object); 5587 __ push(index); 5588 __ xor_(esi, esi); 5589 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 5590 RecordSafepointWithRegisters( 5591 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); 5592 __ StoreToSafepointRegisterSlot(object, eax); 5593 } 5594 5595 5596 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5597 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { 5598 public: 5599 DeferredLoadMutableDouble(LCodeGen* codegen, 5600 LLoadFieldByIndex* instr, 5601 Register object, 5602 Register index) 5603 : LDeferredCode(codegen), 5604 instr_(instr), 5605 object_(object), 5606 index_(index) { 5607 } 5608 virtual void Generate() V8_OVERRIDE { 5609 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); 5610 } 5611 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5612 private: 5613 LLoadFieldByIndex* instr_; 5614 Register object_; 5615 Register index_; 5616 }; 5617 5618 Register object = ToRegister(instr->object()); 5619 Register index = ToRegister(instr->index()); 5620 5621 DeferredLoadMutableDouble* deferred; 5622 deferred = new(zone()) DeferredLoadMutableDouble( 5623 this, instr, object, index); 5624 5625 Label out_of_object, done; 5626 __ test(index, Immediate(Smi::FromInt(1))); 5627 __ j(not_zero, deferred->entry()); 5628 5629 __ sar(index, 1); 5630 5631 __ cmp(index, Immediate(0)); 5632 __ j(less, &out_of_object, Label::kNear); 5633 __ mov(object, FieldOperand(object, 5634 index, 5635 times_half_pointer_size, 5636 JSObject::kHeaderSize)); 5637 __ jmp(&done, Label::kNear); 5638 5639 __ bind(&out_of_object); 5640 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset)); 5641 __ neg(index); 5642 // Index is now equal to out of object property index plus 1. 5643 __ mov(object, FieldOperand(object, 5644 index, 5645 times_half_pointer_size, 5646 FixedArray::kHeaderSize - kPointerSize)); 5647 __ bind(deferred->exit()); 5648 __ bind(&done); 5649 } 5650 5651 5652 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { 5653 Register context = ToRegister(instr->context()); 5654 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context); 5655 } 5656 5657 5658 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { 5659 Handle<ScopeInfo> scope_info = instr->scope_info(); 5660 __ Push(scope_info); 5661 __ push(ToRegister(instr->function())); 5662 CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr); 5663 RecordSafepoint(Safepoint::kNoLazyDeopt); 5664 } 5665 5666 5667 #undef __ 5668 5669 } } // namespace v8::internal 5670 5671 #endif // V8_TARGET_ARCH_IA32 5672