1 // Copyright 2011 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "v8.h" 29 30 #if defined(V8_TARGET_ARCH_IA32) 31 32 #include "ia32/lithium-codegen-ia32.h" 33 #include "code-stubs.h" 34 #include "deoptimizer.h" 35 #include "stub-cache.h" 36 37 namespace v8 { 38 namespace internal { 39 40 41 // When invoking builtins, we need to record the safepoint in the middle of 42 // the invoke instruction sequence generated by the macro assembler. 43 class SafepointGenerator : public PostCallGenerator { 44 public: 45 SafepointGenerator(LCodeGen* codegen, 46 LPointerMap* pointers, 47 int deoptimization_index) 48 : codegen_(codegen), 49 pointers_(pointers), 50 deoptimization_index_(deoptimization_index) {} 51 virtual ~SafepointGenerator() { } 52 53 virtual void Generate() { 54 codegen_->RecordSafepoint(pointers_, deoptimization_index_); 55 } 56 57 private: 58 LCodeGen* codegen_; 59 LPointerMap* pointers_; 60 int deoptimization_index_; 61 }; 62 63 64 #define __ masm()-> 65 66 bool LCodeGen::GenerateCode() { 67 HPhase phase("Code generation", chunk()); 68 ASSERT(is_unused()); 69 status_ = GENERATING; 70 CpuFeatures::Scope scope(SSE2); 71 return GeneratePrologue() && 72 GenerateBody() && 73 GenerateDeferredCode() && 74 GenerateSafepointTable(); 75 } 76 77 78 void LCodeGen::FinishCode(Handle<Code> code) { 79 ASSERT(is_done()); 80 code->set_stack_slots(StackSlotCount()); 81 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 82 PopulateDeoptimizationData(code); 83 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); 84 } 85 86 87 void LCodeGen::Abort(const char* format, ...) { 88 if (FLAG_trace_bailout) { 89 SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); 90 PrintF("Aborting LCodeGen in @\"%s\": ", *name); 91 va_list arguments; 92 va_start(arguments, format); 93 OS::VPrint(format, arguments); 94 va_end(arguments); 95 PrintF("\n"); 96 } 97 status_ = ABORTED; 98 } 99 100 101 void LCodeGen::Comment(const char* format, ...) { 102 if (!FLAG_code_comments) return; 103 char buffer[4 * KB]; 104 StringBuilder builder(buffer, ARRAY_SIZE(buffer)); 105 va_list arguments; 106 va_start(arguments, format); 107 builder.AddFormattedList(format, arguments); 108 va_end(arguments); 109 110 // Copy the string before recording it in the assembler to avoid 111 // issues when the stack allocated buffer goes out of scope. 112 size_t length = builder.position(); 113 Vector<char> copy = Vector<char>::New(length + 1); 114 memcpy(copy.start(), builder.Finalize(), copy.length()); 115 masm()->RecordComment(copy.start()); 116 } 117 118 119 bool LCodeGen::GeneratePrologue() { 120 ASSERT(is_generating()); 121 122 #ifdef DEBUG 123 if (strlen(FLAG_stop_at) > 0 && 124 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { 125 __ int3(); 126 } 127 #endif 128 129 __ push(ebp); // Caller's frame pointer. 130 __ mov(ebp, esp); 131 __ push(esi); // Callee's context. 132 __ push(edi); // Callee's JS function. 133 134 // Reserve space for the stack slots needed by the code. 135 int slots = StackSlotCount(); 136 if (slots > 0) { 137 if (FLAG_debug_code) { 138 __ mov(Operand(eax), Immediate(slots)); 139 Label loop; 140 __ bind(&loop); 141 __ push(Immediate(kSlotsZapValue)); 142 __ dec(eax); 143 __ j(not_zero, &loop); 144 } else { 145 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 146 #ifdef _MSC_VER 147 // On windows, you may not access the stack more than one page below 148 // the most recently mapped page. To make the allocated area randomly 149 // accessible, we write to each page in turn (the value is irrelevant). 150 const int kPageSize = 4 * KB; 151 for (int offset = slots * kPointerSize - kPageSize; 152 offset > 0; 153 offset -= kPageSize) { 154 __ mov(Operand(esp, offset), eax); 155 } 156 #endif 157 } 158 } 159 160 // Possibly allocate a local context. 161 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 162 if (heap_slots > 0) { 163 Comment(";;; Allocate local context"); 164 // Argument to NewContext is the function, which is still in edi. 165 __ push(edi); 166 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 167 FastNewContextStub stub(heap_slots); 168 __ CallStub(&stub); 169 } else { 170 __ CallRuntime(Runtime::kNewContext, 1); 171 } 172 RecordSafepoint(Safepoint::kNoDeoptimizationIndex); 173 // Context is returned in both eax and esi. It replaces the context 174 // passed to us. It's saved in the stack and kept live in esi. 175 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); 176 177 // Copy parameters into context if necessary. 178 int num_parameters = scope()->num_parameters(); 179 for (int i = 0; i < num_parameters; i++) { 180 Slot* slot = scope()->parameter(i)->AsSlot(); 181 if (slot != NULL && slot->type() == Slot::CONTEXT) { 182 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 183 (num_parameters - 1 - i) * kPointerSize; 184 // Load parameter from stack. 185 __ mov(eax, Operand(ebp, parameter_offset)); 186 // Store it in the context. 187 int context_offset = Context::SlotOffset(slot->index()); 188 __ mov(Operand(esi, context_offset), eax); 189 // Update the write barrier. This clobbers all involved 190 // registers, so we have to use a third register to avoid 191 // clobbering esi. 192 __ mov(ecx, esi); 193 __ RecordWrite(ecx, context_offset, eax, ebx); 194 } 195 } 196 Comment(";;; End allocate local context"); 197 } 198 199 // Trace the call. 200 if (FLAG_trace) { 201 // We have not executed any compiled code yet, so esi still holds the 202 // incoming context. 203 __ CallRuntime(Runtime::kTraceEnter, 0); 204 } 205 return !is_aborted(); 206 } 207 208 209 bool LCodeGen::GenerateBody() { 210 ASSERT(is_generating()); 211 bool emit_instructions = true; 212 for (current_instruction_ = 0; 213 !is_aborted() && current_instruction_ < instructions_->length(); 214 current_instruction_++) { 215 LInstruction* instr = instructions_->at(current_instruction_); 216 if (instr->IsLabel()) { 217 LLabel* label = LLabel::cast(instr); 218 emit_instructions = !label->HasReplacement(); 219 } 220 221 if (emit_instructions) { 222 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); 223 instr->CompileToNative(this); 224 } 225 } 226 return !is_aborted(); 227 } 228 229 230 LInstruction* LCodeGen::GetNextInstruction() { 231 if (current_instruction_ < instructions_->length() - 1) { 232 return instructions_->at(current_instruction_ + 1); 233 } else { 234 return NULL; 235 } 236 } 237 238 239 bool LCodeGen::GenerateDeferredCode() { 240 ASSERT(is_generating()); 241 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 242 LDeferredCode* code = deferred_[i]; 243 __ bind(code->entry()); 244 code->Generate(); 245 __ jmp(code->exit()); 246 } 247 248 // Deferred code is the last part of the instruction sequence. Mark 249 // the generated code as done unless we bailed out. 250 if (!is_aborted()) status_ = DONE; 251 return !is_aborted(); 252 } 253 254 255 bool LCodeGen::GenerateSafepointTable() { 256 ASSERT(is_done()); 257 safepoints_.Emit(masm(), StackSlotCount()); 258 return !is_aborted(); 259 } 260 261 262 Register LCodeGen::ToRegister(int index) const { 263 return Register::FromAllocationIndex(index); 264 } 265 266 267 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 268 return XMMRegister::FromAllocationIndex(index); 269 } 270 271 272 Register LCodeGen::ToRegister(LOperand* op) const { 273 ASSERT(op->IsRegister()); 274 return ToRegister(op->index()); 275 } 276 277 278 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 279 ASSERT(op->IsDoubleRegister()); 280 return ToDoubleRegister(op->index()); 281 } 282 283 284 int LCodeGen::ToInteger32(LConstantOperand* op) const { 285 Handle<Object> value = chunk_->LookupLiteral(op); 286 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); 287 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) == 288 value->Number()); 289 return static_cast<int32_t>(value->Number()); 290 } 291 292 293 Immediate LCodeGen::ToImmediate(LOperand* op) { 294 LConstantOperand* const_op = LConstantOperand::cast(op); 295 Handle<Object> literal = chunk_->LookupLiteral(const_op); 296 Representation r = chunk_->LookupLiteralRepresentation(const_op); 297 if (r.IsInteger32()) { 298 ASSERT(literal->IsNumber()); 299 return Immediate(static_cast<int32_t>(literal->Number())); 300 } else if (r.IsDouble()) { 301 Abort("unsupported double immediate"); 302 } 303 ASSERT(r.IsTagged()); 304 return Immediate(literal); 305 } 306 307 308 Operand LCodeGen::ToOperand(LOperand* op) const { 309 if (op->IsRegister()) return Operand(ToRegister(op)); 310 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); 311 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 312 int index = op->index(); 313 if (index >= 0) { 314 // Local or spill slot. Skip the frame pointer, function, and 315 // context in the fixed part of the frame. 316 return Operand(ebp, -(index + 3) * kPointerSize); 317 } else { 318 // Incoming parameter. Skip the return address. 319 return Operand(ebp, -(index - 1) * kPointerSize); 320 } 321 } 322 323 324 Operand LCodeGen::HighOperand(LOperand* op) { 325 ASSERT(op->IsDoubleStackSlot()); 326 int index = op->index(); 327 int offset = (index >= 0) ? index + 3 : index - 1; 328 return Operand(ebp, -offset * kPointerSize); 329 } 330 331 332 void LCodeGen::WriteTranslation(LEnvironment* environment, 333 Translation* translation) { 334 if (environment == NULL) return; 335 336 // The translation includes one command per value in the environment. 337 int translation_size = environment->values()->length(); 338 // The output frame height does not include the parameters. 339 int height = translation_size - environment->parameter_count(); 340 341 WriteTranslation(environment->outer(), translation); 342 int closure_id = DefineDeoptimizationLiteral(environment->closure()); 343 translation->BeginFrame(environment->ast_id(), closure_id, height); 344 for (int i = 0; i < translation_size; ++i) { 345 LOperand* value = environment->values()->at(i); 346 // spilled_registers_ and spilled_double_registers_ are either 347 // both NULL or both set. 348 if (environment->spilled_registers() != NULL && value != NULL) { 349 if (value->IsRegister() && 350 environment->spilled_registers()[value->index()] != NULL) { 351 translation->MarkDuplicate(); 352 AddToTranslation(translation, 353 environment->spilled_registers()[value->index()], 354 environment->HasTaggedValueAt(i)); 355 } else if ( 356 value->IsDoubleRegister() && 357 environment->spilled_double_registers()[value->index()] != NULL) { 358 translation->MarkDuplicate(); 359 AddToTranslation( 360 translation, 361 environment->spilled_double_registers()[value->index()], 362 false); 363 } 364 } 365 366 AddToTranslation(translation, value, environment->HasTaggedValueAt(i)); 367 } 368 } 369 370 371 void LCodeGen::AddToTranslation(Translation* translation, 372 LOperand* op, 373 bool is_tagged) { 374 if (op == NULL) { 375 // TODO(twuerthinger): Introduce marker operands to indicate that this value 376 // is not present and must be reconstructed from the deoptimizer. Currently 377 // this is only used for the arguments object. 378 translation->StoreArgumentsObject(); 379 } else if (op->IsStackSlot()) { 380 if (is_tagged) { 381 translation->StoreStackSlot(op->index()); 382 } else { 383 translation->StoreInt32StackSlot(op->index()); 384 } 385 } else if (op->IsDoubleStackSlot()) { 386 translation->StoreDoubleStackSlot(op->index()); 387 } else if (op->IsArgument()) { 388 ASSERT(is_tagged); 389 int src_index = StackSlotCount() + op->index(); 390 translation->StoreStackSlot(src_index); 391 } else if (op->IsRegister()) { 392 Register reg = ToRegister(op); 393 if (is_tagged) { 394 translation->StoreRegister(reg); 395 } else { 396 translation->StoreInt32Register(reg); 397 } 398 } else if (op->IsDoubleRegister()) { 399 XMMRegister reg = ToDoubleRegister(op); 400 translation->StoreDoubleRegister(reg); 401 } else if (op->IsConstantOperand()) { 402 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op)); 403 int src_index = DefineDeoptimizationLiteral(literal); 404 translation->StoreLiteral(src_index); 405 } else { 406 UNREACHABLE(); 407 } 408 } 409 410 411 void LCodeGen::CallCodeGeneric(Handle<Code> code, 412 RelocInfo::Mode mode, 413 LInstruction* instr, 414 ContextMode context_mode, 415 SafepointMode safepoint_mode) { 416 ASSERT(instr != NULL); 417 LPointerMap* pointers = instr->pointer_map(); 418 RecordPosition(pointers->position()); 419 420 if (context_mode == RESTORE_CONTEXT) { 421 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 422 } 423 __ call(code, mode); 424 425 RegisterLazyDeoptimization(instr, safepoint_mode); 426 427 // Signal that we don't inline smi code before these stubs in the 428 // optimizing code generator. 429 if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC || 430 code->kind() == Code::COMPARE_IC) { 431 __ nop(); 432 } 433 } 434 435 436 void LCodeGen::CallCode(Handle<Code> code, 437 RelocInfo::Mode mode, 438 LInstruction* instr, 439 ContextMode context_mode) { 440 CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT); 441 } 442 443 444 void LCodeGen::CallRuntime(const Runtime::Function* fun, 445 int argc, 446 LInstruction* instr, 447 ContextMode context_mode) { 448 ASSERT(instr != NULL); 449 ASSERT(instr->HasPointerMap()); 450 LPointerMap* pointers = instr->pointer_map(); 451 RecordPosition(pointers->position()); 452 453 if (context_mode == RESTORE_CONTEXT) { 454 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 455 } 456 __ CallRuntime(fun, argc); 457 458 RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT); 459 } 460 461 462 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 463 int argc, 464 LInstruction* instr) { 465 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 466 __ CallRuntimeSaveDoubles(id); 467 RecordSafepointWithRegisters( 468 instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex); 469 } 470 471 472 void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr, 473 SafepointMode safepoint_mode) { 474 // Create the environment to bailout to. If the call has side effects 475 // execution has to continue after the call otherwise execution can continue 476 // from a previous bailout point repeating the call. 477 LEnvironment* deoptimization_environment; 478 if (instr->HasDeoptimizationEnvironment()) { 479 deoptimization_environment = instr->deoptimization_environment(); 480 } else { 481 deoptimization_environment = instr->environment(); 482 } 483 484 RegisterEnvironmentForDeoptimization(deoptimization_environment); 485 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 486 RecordSafepoint(instr->pointer_map(), 487 deoptimization_environment->deoptimization_index()); 488 } else { 489 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 490 RecordSafepointWithRegisters( 491 instr->pointer_map(), 492 0, 493 deoptimization_environment->deoptimization_index()); 494 } 495 } 496 497 498 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) { 499 if (!environment->HasBeenRegistered()) { 500 // Physical stack frame layout: 501 // -x ............. -4 0 ..................................... y 502 // [incoming arguments] [spill slots] [pushed outgoing arguments] 503 504 // Layout of the environment: 505 // 0 ..................................................... size-1 506 // [parameters] [locals] [expression stack including arguments] 507 508 // Layout of the translation: 509 // 0 ........................................................ size - 1 + 4 510 // [expression stack including arguments] [locals] [4 words] [parameters] 511 // |>------------ translation_size ------------<| 512 513 int frame_count = 0; 514 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 515 ++frame_count; 516 } 517 Translation translation(&translations_, frame_count); 518 WriteTranslation(environment, &translation); 519 int deoptimization_index = deoptimizations_.length(); 520 environment->Register(deoptimization_index, translation.index()); 521 deoptimizations_.Add(environment); 522 } 523 } 524 525 526 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { 527 RegisterEnvironmentForDeoptimization(environment); 528 ASSERT(environment->HasBeenRegistered()); 529 int id = environment->deoptimization_index(); 530 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); 531 ASSERT(entry != NULL); 532 if (entry == NULL) { 533 Abort("bailout was not prepared"); 534 return; 535 } 536 537 if (FLAG_deopt_every_n_times != 0) { 538 Handle<SharedFunctionInfo> shared(info_->shared_info()); 539 Label no_deopt; 540 __ pushfd(); 541 __ push(eax); 542 __ push(ebx); 543 __ mov(ebx, shared); 544 __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset)); 545 __ sub(Operand(eax), Immediate(Smi::FromInt(1))); 546 __ j(not_zero, &no_deopt); 547 if (FLAG_trap_on_deopt) __ int3(); 548 __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times))); 549 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax); 550 __ pop(ebx); 551 __ pop(eax); 552 __ popfd(); 553 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 554 555 __ bind(&no_deopt); 556 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax); 557 __ pop(ebx); 558 __ pop(eax); 559 __ popfd(); 560 } 561 562 if (cc == no_condition) { 563 if (FLAG_trap_on_deopt) __ int3(); 564 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 565 } else { 566 if (FLAG_trap_on_deopt) { 567 NearLabel done; 568 __ j(NegateCondition(cc), &done); 569 __ int3(); 570 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 571 __ bind(&done); 572 } else { 573 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken); 574 } 575 } 576 } 577 578 579 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 580 int length = deoptimizations_.length(); 581 if (length == 0) return; 582 ASSERT(FLAG_deopt); 583 Handle<DeoptimizationInputData> data = 584 factory()->NewDeoptimizationInputData(length, TENURED); 585 586 Handle<ByteArray> translations = translations_.CreateByteArray(); 587 data->SetTranslationByteArray(*translations); 588 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); 589 590 Handle<FixedArray> literals = 591 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); 592 for (int i = 0; i < deoptimization_literals_.length(); i++) { 593 literals->set(i, *deoptimization_literals_[i]); 594 } 595 data->SetLiteralArray(*literals); 596 597 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id())); 598 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); 599 600 // Populate the deoptimization entries. 601 for (int i = 0; i < length; i++) { 602 LEnvironment* env = deoptimizations_[i]; 603 data->SetAstId(i, Smi::FromInt(env->ast_id())); 604 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); 605 data->SetArgumentsStackHeight(i, 606 Smi::FromInt(env->arguments_stack_height())); 607 } 608 code->set_deoptimization_data(*data); 609 } 610 611 612 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { 613 int result = deoptimization_literals_.length(); 614 for (int i = 0; i < deoptimization_literals_.length(); ++i) { 615 if (deoptimization_literals_[i].is_identical_to(literal)) return i; 616 } 617 deoptimization_literals_.Add(literal); 618 return result; 619 } 620 621 622 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { 623 ASSERT(deoptimization_literals_.length() == 0); 624 625 const ZoneList<Handle<JSFunction> >* inlined_closures = 626 chunk()->inlined_closures(); 627 628 for (int i = 0, length = inlined_closures->length(); 629 i < length; 630 i++) { 631 DefineDeoptimizationLiteral(inlined_closures->at(i)); 632 } 633 634 inlined_function_count_ = deoptimization_literals_.length(); 635 } 636 637 638 void LCodeGen::RecordSafepoint( 639 LPointerMap* pointers, 640 Safepoint::Kind kind, 641 int arguments, 642 int deoptimization_index) { 643 ASSERT(kind == expected_safepoint_kind_); 644 const ZoneList<LOperand*>* operands = pointers->operands(); 645 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), 646 kind, arguments, deoptimization_index); 647 for (int i = 0; i < operands->length(); i++) { 648 LOperand* pointer = operands->at(i); 649 if (pointer->IsStackSlot()) { 650 safepoint.DefinePointerSlot(pointer->index()); 651 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 652 safepoint.DefinePointerRegister(ToRegister(pointer)); 653 } 654 } 655 } 656 657 658 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 659 int deoptimization_index) { 660 RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index); 661 } 662 663 664 void LCodeGen::RecordSafepoint(int deoptimization_index) { 665 LPointerMap empty_pointers(RelocInfo::kNoPosition); 666 RecordSafepoint(&empty_pointers, deoptimization_index); 667 } 668 669 670 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 671 int arguments, 672 int deoptimization_index) { 673 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, 674 deoptimization_index); 675 } 676 677 678 void LCodeGen::RecordPosition(int position) { 679 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return; 680 masm()->positions_recorder()->RecordPosition(position); 681 } 682 683 684 void LCodeGen::DoLabel(LLabel* label) { 685 if (label->is_loop_header()) { 686 Comment(";;; B%d - LOOP entry", label->block_id()); 687 } else { 688 Comment(";;; B%d", label->block_id()); 689 } 690 __ bind(label->label()); 691 current_block_ = label->block_id(); 692 LCodeGen::DoGap(label); 693 } 694 695 696 void LCodeGen::DoParallelMove(LParallelMove* move) { 697 resolver_.Resolve(move); 698 } 699 700 701 void LCodeGen::DoGap(LGap* gap) { 702 for (int i = LGap::FIRST_INNER_POSITION; 703 i <= LGap::LAST_INNER_POSITION; 704 i++) { 705 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 706 LParallelMove* move = gap->GetParallelMove(inner_pos); 707 if (move != NULL) DoParallelMove(move); 708 } 709 710 LInstruction* next = GetNextInstruction(); 711 if (next != NULL && next->IsLazyBailout()) { 712 int pc = masm()->pc_offset(); 713 safepoints_.SetPcAfterGap(pc); 714 } 715 } 716 717 718 void LCodeGen::DoParameter(LParameter* instr) { 719 // Nothing to do. 720 } 721 722 723 void LCodeGen::DoCallStub(LCallStub* instr) { 724 ASSERT(ToRegister(instr->context()).is(esi)); 725 ASSERT(ToRegister(instr->result()).is(eax)); 726 switch (instr->hydrogen()->major_key()) { 727 case CodeStub::RegExpConstructResult: { 728 RegExpConstructResultStub stub; 729 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 730 break; 731 } 732 case CodeStub::RegExpExec: { 733 RegExpExecStub stub; 734 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 735 break; 736 } 737 case CodeStub::SubString: { 738 SubStringStub stub; 739 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 740 break; 741 } 742 case CodeStub::NumberToString: { 743 NumberToStringStub stub; 744 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 745 break; 746 } 747 case CodeStub::StringAdd: { 748 StringAddStub stub(NO_STRING_ADD_FLAGS); 749 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 750 break; 751 } 752 case CodeStub::StringCompare: { 753 StringCompareStub stub; 754 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 755 break; 756 } 757 case CodeStub::TranscendentalCache: { 758 TranscendentalCacheStub stub(instr->transcendental_type(), 759 TranscendentalCacheStub::TAGGED); 760 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 761 break; 762 } 763 default: 764 UNREACHABLE(); 765 } 766 } 767 768 769 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 770 // Nothing to do. 771 } 772 773 774 void LCodeGen::DoModI(LModI* instr) { 775 if (instr->hydrogen()->HasPowerOf2Divisor()) { 776 Register dividend = ToRegister(instr->InputAt(0)); 777 778 int32_t divisor = 779 HConstant::cast(instr->hydrogen()->right())->Integer32Value(); 780 781 if (divisor < 0) divisor = -divisor; 782 783 NearLabel positive_dividend, done; 784 __ test(dividend, Operand(dividend)); 785 __ j(not_sign, &positive_dividend); 786 __ neg(dividend); 787 __ and_(dividend, divisor - 1); 788 __ neg(dividend); 789 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 790 __ j(not_zero, &done); 791 DeoptimizeIf(no_condition, instr->environment()); 792 } else { 793 __ jmp(&done); 794 } 795 __ bind(&positive_dividend); 796 __ and_(dividend, divisor - 1); 797 __ bind(&done); 798 } else { 799 LOperand* right = instr->InputAt(1); 800 ASSERT(ToRegister(instr->InputAt(0)).is(eax)); 801 ASSERT(ToRegister(instr->result()).is(edx)); 802 803 Register right_reg = ToRegister(right); 804 ASSERT(!right_reg.is(eax)); 805 ASSERT(!right_reg.is(edx)); 806 807 // Check for x % 0. 808 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 809 __ test(right_reg, ToOperand(right)); 810 DeoptimizeIf(zero, instr->environment()); 811 } 812 813 // Sign extend to edx. 814 __ cdq(); 815 816 // Check for (0 % -x) that will produce negative zero. 817 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 818 NearLabel positive_left; 819 NearLabel done; 820 __ test(eax, Operand(eax)); 821 __ j(not_sign, &positive_left); 822 __ idiv(right_reg); 823 824 // Test the remainder for 0, because then the result would be -0. 825 __ test(edx, Operand(edx)); 826 __ j(not_zero, &done); 827 828 DeoptimizeIf(no_condition, instr->environment()); 829 __ bind(&positive_left); 830 __ idiv(right_reg); 831 __ bind(&done); 832 } else { 833 __ idiv(right_reg); 834 } 835 } 836 } 837 838 839 void LCodeGen::DoDivI(LDivI* instr) { 840 LOperand* right = instr->InputAt(1); 841 ASSERT(ToRegister(instr->result()).is(eax)); 842 ASSERT(ToRegister(instr->InputAt(0)).is(eax)); 843 ASSERT(!ToRegister(instr->InputAt(1)).is(eax)); 844 ASSERT(!ToRegister(instr->InputAt(1)).is(edx)); 845 846 Register left_reg = eax; 847 848 // Check for x / 0. 849 Register right_reg = ToRegister(right); 850 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 851 __ test(right_reg, ToOperand(right)); 852 DeoptimizeIf(zero, instr->environment()); 853 } 854 855 // Check for (0 / -x) that will produce negative zero. 856 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 857 NearLabel left_not_zero; 858 __ test(left_reg, Operand(left_reg)); 859 __ j(not_zero, &left_not_zero); 860 __ test(right_reg, ToOperand(right)); 861 DeoptimizeIf(sign, instr->environment()); 862 __ bind(&left_not_zero); 863 } 864 865 // Check for (-kMinInt / -1). 866 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 867 NearLabel left_not_min_int; 868 __ cmp(left_reg, kMinInt); 869 __ j(not_zero, &left_not_min_int); 870 __ cmp(right_reg, -1); 871 DeoptimizeIf(zero, instr->environment()); 872 __ bind(&left_not_min_int); 873 } 874 875 // Sign extend to edx. 876 __ cdq(); 877 __ idiv(right_reg); 878 879 // Deoptimize if remainder is not 0. 880 __ test(edx, Operand(edx)); 881 DeoptimizeIf(not_zero, instr->environment()); 882 } 883 884 885 void LCodeGen::DoMulI(LMulI* instr) { 886 Register left = ToRegister(instr->InputAt(0)); 887 LOperand* right = instr->InputAt(1); 888 889 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 890 __ mov(ToRegister(instr->TempAt(0)), left); 891 } 892 893 if (right->IsConstantOperand()) { 894 // Try strength reductions on the multiplication. 895 // All replacement instructions are at most as long as the imul 896 // and have better latency. 897 int constant = ToInteger32(LConstantOperand::cast(right)); 898 if (constant == -1) { 899 __ neg(left); 900 } else if (constant == 0) { 901 __ xor_(left, Operand(left)); 902 } else if (constant == 2) { 903 __ add(left, Operand(left)); 904 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 905 // If we know that the multiplication can't overflow, it's safe to 906 // use instructions that don't set the overflow flag for the 907 // multiplication. 908 switch (constant) { 909 case 1: 910 // Do nothing. 911 break; 912 case 3: 913 __ lea(left, Operand(left, left, times_2, 0)); 914 break; 915 case 4: 916 __ shl(left, 2); 917 break; 918 case 5: 919 __ lea(left, Operand(left, left, times_4, 0)); 920 break; 921 case 8: 922 __ shl(left, 3); 923 break; 924 case 9: 925 __ lea(left, Operand(left, left, times_8, 0)); 926 break; 927 case 16: 928 __ shl(left, 4); 929 break; 930 default: 931 __ imul(left, left, constant); 932 break; 933 } 934 } else { 935 __ imul(left, left, constant); 936 } 937 } else { 938 __ imul(left, ToOperand(right)); 939 } 940 941 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 942 DeoptimizeIf(overflow, instr->environment()); 943 } 944 945 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 946 // Bail out if the result is supposed to be negative zero. 947 NearLabel done; 948 __ test(left, Operand(left)); 949 __ j(not_zero, &done); 950 if (right->IsConstantOperand()) { 951 if (ToInteger32(LConstantOperand::cast(right)) <= 0) { 952 DeoptimizeIf(no_condition, instr->environment()); 953 } 954 } else { 955 // Test the non-zero operand for negative sign. 956 __ or_(ToRegister(instr->TempAt(0)), ToOperand(right)); 957 DeoptimizeIf(sign, instr->environment()); 958 } 959 __ bind(&done); 960 } 961 } 962 963 964 void LCodeGen::DoBitI(LBitI* instr) { 965 LOperand* left = instr->InputAt(0); 966 LOperand* right = instr->InputAt(1); 967 ASSERT(left->Equals(instr->result())); 968 ASSERT(left->IsRegister()); 969 970 if (right->IsConstantOperand()) { 971 int right_operand = ToInteger32(LConstantOperand::cast(right)); 972 switch (instr->op()) { 973 case Token::BIT_AND: 974 __ and_(ToRegister(left), right_operand); 975 break; 976 case Token::BIT_OR: 977 __ or_(ToRegister(left), right_operand); 978 break; 979 case Token::BIT_XOR: 980 __ xor_(ToRegister(left), right_operand); 981 break; 982 default: 983 UNREACHABLE(); 984 break; 985 } 986 } else { 987 switch (instr->op()) { 988 case Token::BIT_AND: 989 __ and_(ToRegister(left), ToOperand(right)); 990 break; 991 case Token::BIT_OR: 992 __ or_(ToRegister(left), ToOperand(right)); 993 break; 994 case Token::BIT_XOR: 995 __ xor_(ToRegister(left), ToOperand(right)); 996 break; 997 default: 998 UNREACHABLE(); 999 break; 1000 } 1001 } 1002 } 1003 1004 1005 void LCodeGen::DoShiftI(LShiftI* instr) { 1006 LOperand* left = instr->InputAt(0); 1007 LOperand* right = instr->InputAt(1); 1008 ASSERT(left->Equals(instr->result())); 1009 ASSERT(left->IsRegister()); 1010 if (right->IsRegister()) { 1011 ASSERT(ToRegister(right).is(ecx)); 1012 1013 switch (instr->op()) { 1014 case Token::SAR: 1015 __ sar_cl(ToRegister(left)); 1016 break; 1017 case Token::SHR: 1018 __ shr_cl(ToRegister(left)); 1019 if (instr->can_deopt()) { 1020 __ test(ToRegister(left), Immediate(0x80000000)); 1021 DeoptimizeIf(not_zero, instr->environment()); 1022 } 1023 break; 1024 case Token::SHL: 1025 __ shl_cl(ToRegister(left)); 1026 break; 1027 default: 1028 UNREACHABLE(); 1029 break; 1030 } 1031 } else { 1032 int value = ToInteger32(LConstantOperand::cast(right)); 1033 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1034 switch (instr->op()) { 1035 case Token::SAR: 1036 if (shift_count != 0) { 1037 __ sar(ToRegister(left), shift_count); 1038 } 1039 break; 1040 case Token::SHR: 1041 if (shift_count == 0 && instr->can_deopt()) { 1042 __ test(ToRegister(left), Immediate(0x80000000)); 1043 DeoptimizeIf(not_zero, instr->environment()); 1044 } else { 1045 __ shr(ToRegister(left), shift_count); 1046 } 1047 break; 1048 case Token::SHL: 1049 if (shift_count != 0) { 1050 __ shl(ToRegister(left), shift_count); 1051 } 1052 break; 1053 default: 1054 UNREACHABLE(); 1055 break; 1056 } 1057 } 1058 } 1059 1060 1061 void LCodeGen::DoSubI(LSubI* instr) { 1062 LOperand* left = instr->InputAt(0); 1063 LOperand* right = instr->InputAt(1); 1064 ASSERT(left->Equals(instr->result())); 1065 1066 if (right->IsConstantOperand()) { 1067 __ sub(ToOperand(left), ToImmediate(right)); 1068 } else { 1069 __ sub(ToRegister(left), ToOperand(right)); 1070 } 1071 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1072 DeoptimizeIf(overflow, instr->environment()); 1073 } 1074 } 1075 1076 1077 void LCodeGen::DoConstantI(LConstantI* instr) { 1078 ASSERT(instr->result()->IsRegister()); 1079 __ Set(ToRegister(instr->result()), Immediate(instr->value())); 1080 } 1081 1082 1083 void LCodeGen::DoConstantD(LConstantD* instr) { 1084 ASSERT(instr->result()->IsDoubleRegister()); 1085 XMMRegister res = ToDoubleRegister(instr->result()); 1086 double v = instr->value(); 1087 // Use xor to produce +0.0 in a fast and compact way, but avoid to 1088 // do so if the constant is -0.0. 1089 if (BitCast<uint64_t, double>(v) == 0) { 1090 __ xorpd(res, res); 1091 } else { 1092 Register temp = ToRegister(instr->TempAt(0)); 1093 uint64_t int_val = BitCast<uint64_t, double>(v); 1094 int32_t lower = static_cast<int32_t>(int_val); 1095 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1096 if (CpuFeatures::IsSupported(SSE4_1)) { 1097 CpuFeatures::Scope scope(SSE4_1); 1098 if (lower != 0) { 1099 __ Set(temp, Immediate(lower)); 1100 __ movd(res, Operand(temp)); 1101 __ Set(temp, Immediate(upper)); 1102 __ pinsrd(res, Operand(temp), 1); 1103 } else { 1104 __ xorpd(res, res); 1105 __ Set(temp, Immediate(upper)); 1106 __ pinsrd(res, Operand(temp), 1); 1107 } 1108 } else { 1109 __ Set(temp, Immediate(upper)); 1110 __ movd(res, Operand(temp)); 1111 __ psllq(res, 32); 1112 if (lower != 0) { 1113 __ Set(temp, Immediate(lower)); 1114 __ movd(xmm0, Operand(temp)); 1115 __ por(res, xmm0); 1116 } 1117 } 1118 } 1119 } 1120 1121 1122 void LCodeGen::DoConstantT(LConstantT* instr) { 1123 ASSERT(instr->result()->IsRegister()); 1124 __ Set(ToRegister(instr->result()), Immediate(instr->value())); 1125 } 1126 1127 1128 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { 1129 Register result = ToRegister(instr->result()); 1130 Register array = ToRegister(instr->InputAt(0)); 1131 __ mov(result, FieldOperand(array, JSArray::kLengthOffset)); 1132 } 1133 1134 1135 void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) { 1136 Register result = ToRegister(instr->result()); 1137 Register array = ToRegister(instr->InputAt(0)); 1138 __ mov(result, FieldOperand(array, FixedArray::kLengthOffset)); 1139 } 1140 1141 1142 void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) { 1143 Register result = ToRegister(instr->result()); 1144 Register array = ToRegister(instr->InputAt(0)); 1145 __ mov(result, FieldOperand(array, ExternalArray::kLengthOffset)); 1146 } 1147 1148 1149 void LCodeGen::DoValueOf(LValueOf* instr) { 1150 Register input = ToRegister(instr->InputAt(0)); 1151 Register result = ToRegister(instr->result()); 1152 Register map = ToRegister(instr->TempAt(0)); 1153 ASSERT(input.is(result)); 1154 NearLabel done; 1155 // If the object is a smi return the object. 1156 __ test(input, Immediate(kSmiTagMask)); 1157 __ j(zero, &done); 1158 1159 // If the object is not a value type, return the object. 1160 __ CmpObjectType(input, JS_VALUE_TYPE, map); 1161 __ j(not_equal, &done); 1162 __ mov(result, FieldOperand(input, JSValue::kValueOffset)); 1163 1164 __ bind(&done); 1165 } 1166 1167 1168 void LCodeGen::DoBitNotI(LBitNotI* instr) { 1169 LOperand* input = instr->InputAt(0); 1170 ASSERT(input->Equals(instr->result())); 1171 __ not_(ToRegister(input)); 1172 } 1173 1174 1175 void LCodeGen::DoThrow(LThrow* instr) { 1176 __ push(ToOperand(instr->InputAt(0))); 1177 CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT); 1178 1179 if (FLAG_debug_code) { 1180 Comment("Unreachable code."); 1181 __ int3(); 1182 } 1183 } 1184 1185 1186 void LCodeGen::DoAddI(LAddI* instr) { 1187 LOperand* left = instr->InputAt(0); 1188 LOperand* right = instr->InputAt(1); 1189 ASSERT(left->Equals(instr->result())); 1190 1191 if (right->IsConstantOperand()) { 1192 __ add(ToOperand(left), ToImmediate(right)); 1193 } else { 1194 __ add(ToRegister(left), ToOperand(right)); 1195 } 1196 1197 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1198 DeoptimizeIf(overflow, instr->environment()); 1199 } 1200 } 1201 1202 1203 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1204 XMMRegister left = ToDoubleRegister(instr->InputAt(0)); 1205 XMMRegister right = ToDoubleRegister(instr->InputAt(1)); 1206 XMMRegister result = ToDoubleRegister(instr->result()); 1207 // Modulo uses a fixed result register. 1208 ASSERT(instr->op() == Token::MOD || left.is(result)); 1209 switch (instr->op()) { 1210 case Token::ADD: 1211 __ addsd(left, right); 1212 break; 1213 case Token::SUB: 1214 __ subsd(left, right); 1215 break; 1216 case Token::MUL: 1217 __ mulsd(left, right); 1218 break; 1219 case Token::DIV: 1220 __ divsd(left, right); 1221 break; 1222 case Token::MOD: { 1223 // Pass two doubles as arguments on the stack. 1224 __ PrepareCallCFunction(4, eax); 1225 __ movdbl(Operand(esp, 0 * kDoubleSize), left); 1226 __ movdbl(Operand(esp, 1 * kDoubleSize), right); 1227 __ CallCFunction( 1228 ExternalReference::double_fp_operation(Token::MOD, isolate()), 1229 4); 1230 1231 // Return value is in st(0) on ia32. 1232 // Store it into the (fixed) result register. 1233 __ sub(Operand(esp), Immediate(kDoubleSize)); 1234 __ fstp_d(Operand(esp, 0)); 1235 __ movdbl(result, Operand(esp, 0)); 1236 __ add(Operand(esp), Immediate(kDoubleSize)); 1237 break; 1238 } 1239 default: 1240 UNREACHABLE(); 1241 break; 1242 } 1243 } 1244 1245 1246 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 1247 ASSERT(ToRegister(instr->InputAt(0)).is(edx)); 1248 ASSERT(ToRegister(instr->InputAt(1)).is(eax)); 1249 ASSERT(ToRegister(instr->result()).is(eax)); 1250 1251 TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); 1252 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 1253 __ nop(); // Signals no inlined code. 1254 } 1255 1256 1257 int LCodeGen::GetNextEmittedBlock(int block) { 1258 for (int i = block + 1; i < graph()->blocks()->length(); ++i) { 1259 LLabel* label = chunk_->GetLabel(i); 1260 if (!label->HasReplacement()) return i; 1261 } 1262 return -1; 1263 } 1264 1265 1266 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { 1267 int next_block = GetNextEmittedBlock(current_block_); 1268 right_block = chunk_->LookupDestination(right_block); 1269 left_block = chunk_->LookupDestination(left_block); 1270 1271 if (right_block == left_block) { 1272 EmitGoto(left_block); 1273 } else if (left_block == next_block) { 1274 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); 1275 } else if (right_block == next_block) { 1276 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1277 } else { 1278 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1279 __ jmp(chunk_->GetAssemblyLabel(right_block)); 1280 } 1281 } 1282 1283 1284 void LCodeGen::DoBranch(LBranch* instr) { 1285 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1286 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1287 1288 Representation r = instr->hydrogen()->representation(); 1289 if (r.IsInteger32()) { 1290 Register reg = ToRegister(instr->InputAt(0)); 1291 __ test(reg, Operand(reg)); 1292 EmitBranch(true_block, false_block, not_zero); 1293 } else if (r.IsDouble()) { 1294 XMMRegister reg = ToDoubleRegister(instr->InputAt(0)); 1295 __ xorpd(xmm0, xmm0); 1296 __ ucomisd(reg, xmm0); 1297 EmitBranch(true_block, false_block, not_equal); 1298 } else { 1299 ASSERT(r.IsTagged()); 1300 Register reg = ToRegister(instr->InputAt(0)); 1301 if (instr->hydrogen()->type().IsBoolean()) { 1302 __ cmp(reg, factory()->true_value()); 1303 EmitBranch(true_block, false_block, equal); 1304 } else { 1305 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1306 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1307 1308 __ cmp(reg, factory()->undefined_value()); 1309 __ j(equal, false_label); 1310 __ cmp(reg, factory()->true_value()); 1311 __ j(equal, true_label); 1312 __ cmp(reg, factory()->false_value()); 1313 __ j(equal, false_label); 1314 __ test(reg, Operand(reg)); 1315 __ j(equal, false_label); 1316 __ test(reg, Immediate(kSmiTagMask)); 1317 __ j(zero, true_label); 1318 1319 // Test for double values. Zero is false. 1320 NearLabel call_stub; 1321 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 1322 factory()->heap_number_map()); 1323 __ j(not_equal, &call_stub); 1324 __ fldz(); 1325 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); 1326 __ FCmp(); 1327 __ j(zero, false_label); 1328 __ jmp(true_label); 1329 1330 // The conversion stub doesn't cause garbage collections so it's 1331 // safe to not record a safepoint after the call. 1332 __ bind(&call_stub); 1333 ToBooleanStub stub; 1334 __ pushad(); 1335 __ push(reg); 1336 __ CallStub(&stub); 1337 __ test(eax, Operand(eax)); 1338 __ popad(); 1339 EmitBranch(true_block, false_block, not_zero); 1340 } 1341 } 1342 } 1343 1344 1345 void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) { 1346 block = chunk_->LookupDestination(block); 1347 int next_block = GetNextEmittedBlock(current_block_); 1348 if (block != next_block) { 1349 // Perform stack overflow check if this goto needs it before jumping. 1350 if (deferred_stack_check != NULL) { 1351 ExternalReference stack_limit = 1352 ExternalReference::address_of_stack_limit(isolate()); 1353 __ cmp(esp, Operand::StaticVariable(stack_limit)); 1354 __ j(above_equal, chunk_->GetAssemblyLabel(block)); 1355 __ jmp(deferred_stack_check->entry()); 1356 deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block)); 1357 } else { 1358 __ jmp(chunk_->GetAssemblyLabel(block)); 1359 } 1360 } 1361 } 1362 1363 1364 void LCodeGen::DoDeferredStackCheck(LGoto* instr) { 1365 PushSafepointRegistersScope scope(this); 1366 CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr); 1367 } 1368 1369 void LCodeGen::DoGoto(LGoto* instr) { 1370 class DeferredStackCheck: public LDeferredCode { 1371 public: 1372 DeferredStackCheck(LCodeGen* codegen, LGoto* instr) 1373 : LDeferredCode(codegen), instr_(instr) { } 1374 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } 1375 private: 1376 LGoto* instr_; 1377 }; 1378 1379 DeferredStackCheck* deferred = NULL; 1380 if (instr->include_stack_check()) { 1381 deferred = new DeferredStackCheck(this, instr); 1382 } 1383 EmitGoto(instr->block_id(), deferred); 1384 } 1385 1386 1387 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 1388 Condition cond = no_condition; 1389 switch (op) { 1390 case Token::EQ: 1391 case Token::EQ_STRICT: 1392 cond = equal; 1393 break; 1394 case Token::LT: 1395 cond = is_unsigned ? below : less; 1396 break; 1397 case Token::GT: 1398 cond = is_unsigned ? above : greater; 1399 break; 1400 case Token::LTE: 1401 cond = is_unsigned ? below_equal : less_equal; 1402 break; 1403 case Token::GTE: 1404 cond = is_unsigned ? above_equal : greater_equal; 1405 break; 1406 case Token::IN: 1407 case Token::INSTANCEOF: 1408 default: 1409 UNREACHABLE(); 1410 } 1411 return cond; 1412 } 1413 1414 1415 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { 1416 if (right->IsConstantOperand()) { 1417 __ cmp(ToOperand(left), ToImmediate(right)); 1418 } else { 1419 __ cmp(ToRegister(left), ToOperand(right)); 1420 } 1421 } 1422 1423 1424 void LCodeGen::DoCmpID(LCmpID* instr) { 1425 LOperand* left = instr->InputAt(0); 1426 LOperand* right = instr->InputAt(1); 1427 LOperand* result = instr->result(); 1428 1429 NearLabel unordered; 1430 if (instr->is_double()) { 1431 // Don't base result on EFLAGS when a NaN is involved. Instead 1432 // jump to the unordered case, which produces a false value. 1433 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); 1434 __ j(parity_even, &unordered, not_taken); 1435 } else { 1436 EmitCmpI(left, right); 1437 } 1438 1439 NearLabel done; 1440 Condition cc = TokenToCondition(instr->op(), instr->is_double()); 1441 __ mov(ToRegister(result), factory()->true_value()); 1442 __ j(cc, &done); 1443 1444 __ bind(&unordered); 1445 __ mov(ToRegister(result), factory()->false_value()); 1446 __ bind(&done); 1447 } 1448 1449 1450 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { 1451 LOperand* left = instr->InputAt(0); 1452 LOperand* right = instr->InputAt(1); 1453 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1454 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1455 1456 if (instr->is_double()) { 1457 // Don't base result on EFLAGS when a NaN is involved. Instead 1458 // jump to the false block. 1459 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); 1460 __ j(parity_even, chunk_->GetAssemblyLabel(false_block)); 1461 } else { 1462 EmitCmpI(left, right); 1463 } 1464 1465 Condition cc = TokenToCondition(instr->op(), instr->is_double()); 1466 EmitBranch(true_block, false_block, cc); 1467 } 1468 1469 1470 void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { 1471 Register left = ToRegister(instr->InputAt(0)); 1472 Register right = ToRegister(instr->InputAt(1)); 1473 Register result = ToRegister(instr->result()); 1474 1475 __ cmp(left, Operand(right)); 1476 __ mov(result, factory()->true_value()); 1477 NearLabel done; 1478 __ j(equal, &done); 1479 __ mov(result, factory()->false_value()); 1480 __ bind(&done); 1481 } 1482 1483 1484 void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) { 1485 Register left = ToRegister(instr->InputAt(0)); 1486 Register right = ToRegister(instr->InputAt(1)); 1487 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1488 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1489 1490 __ cmp(left, Operand(right)); 1491 EmitBranch(true_block, false_block, equal); 1492 } 1493 1494 1495 void LCodeGen::DoIsNull(LIsNull* instr) { 1496 Register reg = ToRegister(instr->InputAt(0)); 1497 Register result = ToRegister(instr->result()); 1498 1499 // TODO(fsc): If the expression is known to be a smi, then it's 1500 // definitely not null. Materialize false. 1501 1502 __ cmp(reg, factory()->null_value()); 1503 if (instr->is_strict()) { 1504 __ mov(result, factory()->true_value()); 1505 NearLabel done; 1506 __ j(equal, &done); 1507 __ mov(result, factory()->false_value()); 1508 __ bind(&done); 1509 } else { 1510 NearLabel true_value, false_value, done; 1511 __ j(equal, &true_value); 1512 __ cmp(reg, factory()->undefined_value()); 1513 __ j(equal, &true_value); 1514 __ test(reg, Immediate(kSmiTagMask)); 1515 __ j(zero, &false_value); 1516 // Check for undetectable objects by looking in the bit field in 1517 // the map. The object has already been smi checked. 1518 Register scratch = result; 1519 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset)); 1520 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset)); 1521 __ test(scratch, Immediate(1 << Map::kIsUndetectable)); 1522 __ j(not_zero, &true_value); 1523 __ bind(&false_value); 1524 __ mov(result, factory()->false_value()); 1525 __ jmp(&done); 1526 __ bind(&true_value); 1527 __ mov(result, factory()->true_value()); 1528 __ bind(&done); 1529 } 1530 } 1531 1532 1533 void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { 1534 Register reg = ToRegister(instr->InputAt(0)); 1535 1536 // TODO(fsc): If the expression is known to be a smi, then it's 1537 // definitely not null. Jump to the false block. 1538 1539 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1540 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1541 1542 __ cmp(reg, factory()->null_value()); 1543 if (instr->is_strict()) { 1544 EmitBranch(true_block, false_block, equal); 1545 } else { 1546 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1547 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1548 __ j(equal, true_label); 1549 __ cmp(reg, factory()->undefined_value()); 1550 __ j(equal, true_label); 1551 __ test(reg, Immediate(kSmiTagMask)); 1552 __ j(zero, false_label); 1553 // Check for undetectable objects by looking in the bit field in 1554 // the map. The object has already been smi checked. 1555 Register scratch = ToRegister(instr->TempAt(0)); 1556 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset)); 1557 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset)); 1558 __ test(scratch, Immediate(1 << Map::kIsUndetectable)); 1559 EmitBranch(true_block, false_block, not_zero); 1560 } 1561 } 1562 1563 1564 Condition LCodeGen::EmitIsObject(Register input, 1565 Register temp1, 1566 Register temp2, 1567 Label* is_not_object, 1568 Label* is_object) { 1569 ASSERT(!input.is(temp1)); 1570 ASSERT(!input.is(temp2)); 1571 ASSERT(!temp1.is(temp2)); 1572 1573 __ test(input, Immediate(kSmiTagMask)); 1574 __ j(equal, is_not_object); 1575 1576 __ cmp(input, isolate()->factory()->null_value()); 1577 __ j(equal, is_object); 1578 1579 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset)); 1580 // Undetectable objects behave like undefined. 1581 __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset)); 1582 __ test(temp2, Immediate(1 << Map::kIsUndetectable)); 1583 __ j(not_zero, is_not_object); 1584 1585 __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset)); 1586 __ cmp(temp2, FIRST_JS_OBJECT_TYPE); 1587 __ j(below, is_not_object); 1588 __ cmp(temp2, LAST_JS_OBJECT_TYPE); 1589 return below_equal; 1590 } 1591 1592 1593 void LCodeGen::DoIsObject(LIsObject* instr) { 1594 Register reg = ToRegister(instr->InputAt(0)); 1595 Register result = ToRegister(instr->result()); 1596 Register temp = ToRegister(instr->TempAt(0)); 1597 Label is_false, is_true, done; 1598 1599 Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true); 1600 __ j(true_cond, &is_true); 1601 1602 __ bind(&is_false); 1603 __ mov(result, factory()->false_value()); 1604 __ jmp(&done); 1605 1606 __ bind(&is_true); 1607 __ mov(result, factory()->true_value()); 1608 1609 __ bind(&done); 1610 } 1611 1612 1613 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { 1614 Register reg = ToRegister(instr->InputAt(0)); 1615 Register temp = ToRegister(instr->TempAt(0)); 1616 Register temp2 = ToRegister(instr->TempAt(1)); 1617 1618 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1619 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1620 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1621 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1622 1623 Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label); 1624 1625 EmitBranch(true_block, false_block, true_cond); 1626 } 1627 1628 1629 void LCodeGen::DoIsSmi(LIsSmi* instr) { 1630 Operand input = ToOperand(instr->InputAt(0)); 1631 Register result = ToRegister(instr->result()); 1632 1633 ASSERT(instr->hydrogen()->value()->representation().IsTagged()); 1634 __ test(input, Immediate(kSmiTagMask)); 1635 __ mov(result, factory()->true_value()); 1636 NearLabel done; 1637 __ j(zero, &done); 1638 __ mov(result, factory()->false_value()); 1639 __ bind(&done); 1640 } 1641 1642 1643 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 1644 Operand input = ToOperand(instr->InputAt(0)); 1645 1646 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1647 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1648 1649 __ test(input, Immediate(kSmiTagMask)); 1650 EmitBranch(true_block, false_block, zero); 1651 } 1652 1653 1654 static InstanceType TestType(HHasInstanceType* instr) { 1655 InstanceType from = instr->from(); 1656 InstanceType to = instr->to(); 1657 if (from == FIRST_TYPE) return to; 1658 ASSERT(from == to || to == LAST_TYPE); 1659 return from; 1660 } 1661 1662 1663 static Condition BranchCondition(HHasInstanceType* instr) { 1664 InstanceType from = instr->from(); 1665 InstanceType to = instr->to(); 1666 if (from == to) return equal; 1667 if (to == LAST_TYPE) return above_equal; 1668 if (from == FIRST_TYPE) return below_equal; 1669 UNREACHABLE(); 1670 return equal; 1671 } 1672 1673 1674 void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { 1675 Register input = ToRegister(instr->InputAt(0)); 1676 Register result = ToRegister(instr->result()); 1677 1678 ASSERT(instr->hydrogen()->value()->representation().IsTagged()); 1679 __ test(input, Immediate(kSmiTagMask)); 1680 NearLabel done, is_false; 1681 __ j(zero, &is_false); 1682 __ CmpObjectType(input, TestType(instr->hydrogen()), result); 1683 __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false); 1684 __ mov(result, factory()->true_value()); 1685 __ jmp(&done); 1686 __ bind(&is_false); 1687 __ mov(result, factory()->false_value()); 1688 __ bind(&done); 1689 } 1690 1691 1692 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 1693 Register input = ToRegister(instr->InputAt(0)); 1694 Register temp = ToRegister(instr->TempAt(0)); 1695 1696 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1697 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1698 1699 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1700 1701 __ test(input, Immediate(kSmiTagMask)); 1702 __ j(zero, false_label); 1703 1704 __ CmpObjectType(input, TestType(instr->hydrogen()), temp); 1705 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen())); 1706 } 1707 1708 1709 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 1710 Register input = ToRegister(instr->InputAt(0)); 1711 Register result = ToRegister(instr->result()); 1712 1713 if (FLAG_debug_code) { 1714 __ AbortIfNotString(input); 1715 } 1716 1717 __ mov(result, FieldOperand(input, String::kHashFieldOffset)); 1718 __ IndexFromHash(result, result); 1719 } 1720 1721 1722 void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { 1723 Register input = ToRegister(instr->InputAt(0)); 1724 Register result = ToRegister(instr->result()); 1725 1726 ASSERT(instr->hydrogen()->value()->representation().IsTagged()); 1727 __ mov(result, factory()->true_value()); 1728 __ test(FieldOperand(input, String::kHashFieldOffset), 1729 Immediate(String::kContainsCachedArrayIndexMask)); 1730 NearLabel done; 1731 __ j(zero, &done); 1732 __ mov(result, factory()->false_value()); 1733 __ bind(&done); 1734 } 1735 1736 1737 void LCodeGen::DoHasCachedArrayIndexAndBranch( 1738 LHasCachedArrayIndexAndBranch* instr) { 1739 Register input = ToRegister(instr->InputAt(0)); 1740 1741 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1742 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1743 1744 __ test(FieldOperand(input, String::kHashFieldOffset), 1745 Immediate(String::kContainsCachedArrayIndexMask)); 1746 EmitBranch(true_block, false_block, equal); 1747 } 1748 1749 1750 // Branches to a label or falls through with the answer in the z flag. Trashes 1751 // the temp registers, but not the input. Only input and temp2 may alias. 1752 void LCodeGen::EmitClassOfTest(Label* is_true, 1753 Label* is_false, 1754 Handle<String>class_name, 1755 Register input, 1756 Register temp, 1757 Register temp2) { 1758 ASSERT(!input.is(temp)); 1759 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. 1760 __ test(input, Immediate(kSmiTagMask)); 1761 __ j(zero, is_false); 1762 __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp); 1763 __ j(below, is_false); 1764 1765 // Map is now in temp. 1766 // Functions have class 'Function'. 1767 __ CmpInstanceType(temp, JS_FUNCTION_TYPE); 1768 if (class_name->IsEqualTo(CStrVector("Function"))) { 1769 __ j(equal, is_true); 1770 } else { 1771 __ j(equal, is_false); 1772 } 1773 1774 // Check if the constructor in the map is a function. 1775 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); 1776 1777 // As long as JS_FUNCTION_TYPE is the last instance type and it is 1778 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for 1779 // LAST_JS_OBJECT_TYPE. 1780 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); 1781 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); 1782 1783 // Objects with a non-function constructor have class 'Object'. 1784 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); 1785 if (class_name->IsEqualTo(CStrVector("Object"))) { 1786 __ j(not_equal, is_true); 1787 } else { 1788 __ j(not_equal, is_false); 1789 } 1790 1791 // temp now contains the constructor function. Grab the 1792 // instance class name from there. 1793 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 1794 __ mov(temp, FieldOperand(temp, 1795 SharedFunctionInfo::kInstanceClassNameOffset)); 1796 // The class name we are testing against is a symbol because it's a literal. 1797 // The name in the constructor is a symbol because of the way the context is 1798 // booted. This routine isn't expected to work for random API-created 1799 // classes and it doesn't have to because you can't access it with natives 1800 // syntax. Since both sides are symbols it is sufficient to use an identity 1801 // comparison. 1802 __ cmp(temp, class_name); 1803 // End with the answer in the z flag. 1804 } 1805 1806 1807 void LCodeGen::DoClassOfTest(LClassOfTest* instr) { 1808 Register input = ToRegister(instr->InputAt(0)); 1809 Register result = ToRegister(instr->result()); 1810 ASSERT(input.is(result)); 1811 Register temp = ToRegister(instr->TempAt(0)); 1812 Handle<String> class_name = instr->hydrogen()->class_name(); 1813 NearLabel done; 1814 Label is_true, is_false; 1815 1816 EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input); 1817 1818 __ j(not_equal, &is_false); 1819 1820 __ bind(&is_true); 1821 __ mov(result, factory()->true_value()); 1822 __ jmp(&done); 1823 1824 __ bind(&is_false); 1825 __ mov(result, factory()->false_value()); 1826 __ bind(&done); 1827 } 1828 1829 1830 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 1831 Register input = ToRegister(instr->InputAt(0)); 1832 Register temp = ToRegister(instr->TempAt(0)); 1833 Register temp2 = ToRegister(instr->TempAt(1)); 1834 if (input.is(temp)) { 1835 // Swap. 1836 Register swapper = temp; 1837 temp = temp2; 1838 temp2 = swapper; 1839 } 1840 Handle<String> class_name = instr->hydrogen()->class_name(); 1841 1842 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1843 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1844 1845 Label* true_label = chunk_->GetAssemblyLabel(true_block); 1846 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1847 1848 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2); 1849 1850 EmitBranch(true_block, false_block, equal); 1851 } 1852 1853 1854 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 1855 Register reg = ToRegister(instr->InputAt(0)); 1856 int true_block = instr->true_block_id(); 1857 int false_block = instr->false_block_id(); 1858 1859 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); 1860 EmitBranch(true_block, false_block, equal); 1861 } 1862 1863 1864 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 1865 // Object and function are in fixed registers defined by the stub. 1866 ASSERT(ToRegister(instr->context()).is(esi)); 1867 InstanceofStub stub(InstanceofStub::kArgsInRegisters); 1868 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 1869 1870 NearLabel true_value, done; 1871 __ test(eax, Operand(eax)); 1872 __ j(zero, &true_value); 1873 __ mov(ToRegister(instr->result()), factory()->false_value()); 1874 __ jmp(&done); 1875 __ bind(&true_value); 1876 __ mov(ToRegister(instr->result()), factory()->true_value()); 1877 __ bind(&done); 1878 } 1879 1880 1881 void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { 1882 ASSERT(ToRegister(instr->context()).is(esi)); 1883 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1884 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1885 1886 InstanceofStub stub(InstanceofStub::kArgsInRegisters); 1887 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 1888 __ test(eax, Operand(eax)); 1889 EmitBranch(true_block, false_block, zero); 1890 } 1891 1892 1893 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 1894 class DeferredInstanceOfKnownGlobal: public LDeferredCode { 1895 public: 1896 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 1897 LInstanceOfKnownGlobal* instr) 1898 : LDeferredCode(codegen), instr_(instr) { } 1899 virtual void Generate() { 1900 codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); 1901 } 1902 1903 Label* map_check() { return &map_check_; } 1904 1905 private: 1906 LInstanceOfKnownGlobal* instr_; 1907 Label map_check_; 1908 }; 1909 1910 DeferredInstanceOfKnownGlobal* deferred; 1911 deferred = new DeferredInstanceOfKnownGlobal(this, instr); 1912 1913 Label done, false_result; 1914 Register object = ToRegister(instr->InputAt(0)); 1915 Register temp = ToRegister(instr->TempAt(0)); 1916 1917 // A Smi is not an instance of anything. 1918 __ test(object, Immediate(kSmiTagMask)); 1919 __ j(zero, &false_result, not_taken); 1920 1921 // This is the inlined call site instanceof cache. The two occurences of the 1922 // hole value will be patched to the last map/result pair generated by the 1923 // instanceof stub. 1924 NearLabel cache_miss; 1925 Register map = ToRegister(instr->TempAt(0)); 1926 __ mov(map, FieldOperand(object, HeapObject::kMapOffset)); 1927 __ bind(deferred->map_check()); // Label for calculating code patching. 1928 __ cmp(map, factory()->the_hole_value()); // Patched to cached map. 1929 __ j(not_equal, &cache_miss, not_taken); 1930 __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. 1931 __ jmp(&done); 1932 1933 // The inlined call site cache did not match. Check for null and string 1934 // before calling the deferred code. 1935 __ bind(&cache_miss); 1936 // Null is not an instance of anything. 1937 __ cmp(object, factory()->null_value()); 1938 __ j(equal, &false_result); 1939 1940 // String values are not instances of anything. 1941 Condition is_string = masm_->IsObjectStringType(object, temp, temp); 1942 __ j(is_string, &false_result); 1943 1944 // Go to the deferred code. 1945 __ jmp(deferred->entry()); 1946 1947 __ bind(&false_result); 1948 __ mov(ToRegister(instr->result()), factory()->false_value()); 1949 1950 // Here result has either true or false. Deferred code also produces true or 1951 // false object. 1952 __ bind(deferred->exit()); 1953 __ bind(&done); 1954 } 1955 1956 1957 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, 1958 Label* map_check) { 1959 PushSafepointRegistersScope scope(this); 1960 1961 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 1962 flags = static_cast<InstanceofStub::Flags>( 1963 flags | InstanceofStub::kArgsInRegisters); 1964 flags = static_cast<InstanceofStub::Flags>( 1965 flags | InstanceofStub::kCallSiteInlineCheck); 1966 flags = static_cast<InstanceofStub::Flags>( 1967 flags | InstanceofStub::kReturnTrueFalseObject); 1968 InstanceofStub stub(flags); 1969 1970 // Get the temp register reserved by the instruction. This needs to be a 1971 // register which is pushed last by PushSafepointRegisters as top of the 1972 // stack is used to pass the offset to the location of the map check to 1973 // the stub. 1974 Register temp = ToRegister(instr->TempAt(0)); 1975 ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0); 1976 __ mov(InstanceofStub::right(), Immediate(instr->function())); 1977 static const int kAdditionalDelta = 16; 1978 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; 1979 __ mov(temp, Immediate(delta)); 1980 __ StoreToSafepointRegisterSlot(temp, temp); 1981 CallCodeGeneric(stub.GetCode(), 1982 RelocInfo::CODE_TARGET, 1983 instr, 1984 RESTORE_CONTEXT, 1985 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 1986 // Put the result value into the eax slot and restore all registers. 1987 __ StoreToSafepointRegisterSlot(eax, eax); 1988 } 1989 1990 1991 static Condition ComputeCompareCondition(Token::Value op) { 1992 switch (op) { 1993 case Token::EQ_STRICT: 1994 case Token::EQ: 1995 return equal; 1996 case Token::LT: 1997 return less; 1998 case Token::GT: 1999 return greater; 2000 case Token::LTE: 2001 return less_equal; 2002 case Token::GTE: 2003 return greater_equal; 2004 default: 2005 UNREACHABLE(); 2006 return no_condition; 2007 } 2008 } 2009 2010 2011 void LCodeGen::DoCmpT(LCmpT* instr) { 2012 Token::Value op = instr->op(); 2013 2014 Handle<Code> ic = CompareIC::GetUninitialized(op); 2015 CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 2016 2017 Condition condition = ComputeCompareCondition(op); 2018 if (op == Token::GT || op == Token::LTE) { 2019 condition = ReverseCondition(condition); 2020 } 2021 NearLabel true_value, done; 2022 __ test(eax, Operand(eax)); 2023 __ j(condition, &true_value); 2024 __ mov(ToRegister(instr->result()), factory()->false_value()); 2025 __ jmp(&done); 2026 __ bind(&true_value); 2027 __ mov(ToRegister(instr->result()), factory()->true_value()); 2028 __ bind(&done); 2029 } 2030 2031 2032 void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) { 2033 Token::Value op = instr->op(); 2034 int true_block = chunk_->LookupDestination(instr->true_block_id()); 2035 int false_block = chunk_->LookupDestination(instr->false_block_id()); 2036 2037 Handle<Code> ic = CompareIC::GetUninitialized(op); 2038 CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 2039 2040 // The compare stub expects compare condition and the input operands 2041 // reversed for GT and LTE. 2042 Condition condition = ComputeCompareCondition(op); 2043 if (op == Token::GT || op == Token::LTE) { 2044 condition = ReverseCondition(condition); 2045 } 2046 __ test(eax, Operand(eax)); 2047 EmitBranch(true_block, false_block, condition); 2048 } 2049 2050 2051 void LCodeGen::DoReturn(LReturn* instr) { 2052 if (FLAG_trace) { 2053 // Preserve the return value on the stack and rely on the runtime call 2054 // to return the value in the same register. We're leaving the code 2055 // managed by the register allocator and tearing down the frame, it's 2056 // safe to write to the context register. 2057 __ push(eax); 2058 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2059 __ CallRuntime(Runtime::kTraceExit, 1); 2060 } 2061 __ mov(esp, ebp); 2062 __ pop(ebp); 2063 __ Ret((ParameterCount() + 1) * kPointerSize, ecx); 2064 } 2065 2066 2067 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2068 Register result = ToRegister(instr->result()); 2069 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); 2070 if (instr->hydrogen()->check_hole_value()) { 2071 __ cmp(result, factory()->the_hole_value()); 2072 DeoptimizeIf(equal, instr->environment()); 2073 } 2074 } 2075 2076 2077 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2078 ASSERT(ToRegister(instr->context()).is(esi)); 2079 ASSERT(ToRegister(instr->global_object()).is(eax)); 2080 ASSERT(ToRegister(instr->result()).is(eax)); 2081 2082 __ mov(ecx, instr->name()); 2083 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET : 2084 RelocInfo::CODE_TARGET_CONTEXT; 2085 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2086 CallCode(ic, mode, instr, CONTEXT_ADJUSTED); 2087 } 2088 2089 2090 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { 2091 Register value = ToRegister(instr->InputAt(0)); 2092 Operand cell_operand = Operand::Cell(instr->hydrogen()->cell()); 2093 2094 // If the cell we are storing to contains the hole it could have 2095 // been deleted from the property dictionary. In that case, we need 2096 // to update the property details in the property dictionary to mark 2097 // it as no longer deleted. We deoptimize in that case. 2098 if (instr->hydrogen()->check_hole_value()) { 2099 __ cmp(cell_operand, factory()->the_hole_value()); 2100 DeoptimizeIf(equal, instr->environment()); 2101 } 2102 2103 // Store the value. 2104 __ mov(cell_operand, value); 2105 } 2106 2107 2108 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { 2109 ASSERT(ToRegister(instr->context()).is(esi)); 2110 ASSERT(ToRegister(instr->global_object()).is(edx)); 2111 ASSERT(ToRegister(instr->value()).is(eax)); 2112 2113 __ mov(ecx, instr->name()); 2114 Handle<Code> ic = instr->strict_mode() 2115 ? isolate()->builtins()->StoreIC_Initialize_Strict() 2116 : isolate()->builtins()->StoreIC_Initialize(); 2117 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED); 2118 } 2119 2120 2121 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2122 Register context = ToRegister(instr->context()); 2123 Register result = ToRegister(instr->result()); 2124 __ mov(result, ContextOperand(context, instr->slot_index())); 2125 } 2126 2127 2128 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2129 Register context = ToRegister(instr->context()); 2130 Register value = ToRegister(instr->value()); 2131 __ mov(ContextOperand(context, instr->slot_index()), value); 2132 if (instr->needs_write_barrier()) { 2133 Register temp = ToRegister(instr->TempAt(0)); 2134 int offset = Context::SlotOffset(instr->slot_index()); 2135 __ RecordWrite(context, offset, value, temp); 2136 } 2137 } 2138 2139 2140 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2141 Register object = ToRegister(instr->object()); 2142 Register result = ToRegister(instr->result()); 2143 if (instr->hydrogen()->is_in_object()) { 2144 __ mov(result, FieldOperand(object, instr->hydrogen()->offset())); 2145 } else { 2146 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2147 __ mov(result, FieldOperand(result, instr->hydrogen()->offset())); 2148 } 2149 } 2150 2151 2152 void LCodeGen::EmitLoadField(Register result, 2153 Register object, 2154 Handle<Map> type, 2155 Handle<String> name) { 2156 LookupResult lookup; 2157 type->LookupInDescriptors(NULL, *name, &lookup); 2158 ASSERT(lookup.IsProperty() && lookup.type() == FIELD); 2159 int index = lookup.GetLocalFieldIndexFromMap(*type); 2160 int offset = index * kPointerSize; 2161 if (index < 0) { 2162 // Negative property indices are in-object properties, indexed 2163 // from the end of the fixed part of the object. 2164 __ mov(result, FieldOperand(object, offset + type->instance_size())); 2165 } else { 2166 // Non-negative property indices are in the properties array. 2167 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2168 __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize)); 2169 } 2170 } 2171 2172 2173 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { 2174 Register object = ToRegister(instr->object()); 2175 Register result = ToRegister(instr->result()); 2176 2177 int map_count = instr->hydrogen()->types()->length(); 2178 Handle<String> name = instr->hydrogen()->name(); 2179 if (map_count == 0) { 2180 ASSERT(instr->hydrogen()->need_generic()); 2181 __ mov(ecx, name); 2182 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2183 CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 2184 } else { 2185 NearLabel done; 2186 for (int i = 0; i < map_count - 1; ++i) { 2187 Handle<Map> map = instr->hydrogen()->types()->at(i); 2188 NearLabel next; 2189 __ cmp(FieldOperand(object, HeapObject::kMapOffset), map); 2190 __ j(not_equal, &next); 2191 EmitLoadField(result, object, map, name); 2192 __ jmp(&done); 2193 __ bind(&next); 2194 } 2195 Handle<Map> map = instr->hydrogen()->types()->last(); 2196 __ cmp(FieldOperand(object, HeapObject::kMapOffset), map); 2197 if (instr->hydrogen()->need_generic()) { 2198 NearLabel generic; 2199 __ j(not_equal, &generic); 2200 EmitLoadField(result, object, map, name); 2201 __ jmp(&done); 2202 __ bind(&generic); 2203 __ mov(ecx, name); 2204 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2205 CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 2206 } else { 2207 DeoptimizeIf(not_equal, instr->environment()); 2208 EmitLoadField(result, object, map, name); 2209 } 2210 __ bind(&done); 2211 } 2212 } 2213 2214 2215 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2216 ASSERT(ToRegister(instr->context()).is(esi)); 2217 ASSERT(ToRegister(instr->object()).is(eax)); 2218 ASSERT(ToRegister(instr->result()).is(eax)); 2219 2220 __ mov(ecx, instr->name()); 2221 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2222 CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 2223 } 2224 2225 2226 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 2227 Register function = ToRegister(instr->function()); 2228 Register temp = ToRegister(instr->TempAt(0)); 2229 Register result = ToRegister(instr->result()); 2230 2231 // Check that the function really is a function. 2232 __ CmpObjectType(function, JS_FUNCTION_TYPE, result); 2233 DeoptimizeIf(not_equal, instr->environment()); 2234 2235 // Check whether the function has an instance prototype. 2236 NearLabel non_instance; 2237 __ test_b(FieldOperand(result, Map::kBitFieldOffset), 2238 1 << Map::kHasNonInstancePrototype); 2239 __ j(not_zero, &non_instance); 2240 2241 // Get the prototype or initial map from the function. 2242 __ mov(result, 2243 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2244 2245 // Check that the function has a prototype or an initial map. 2246 __ cmp(Operand(result), Immediate(factory()->the_hole_value())); 2247 DeoptimizeIf(equal, instr->environment()); 2248 2249 // If the function does not have an initial map, we're done. 2250 NearLabel done; 2251 __ CmpObjectType(result, MAP_TYPE, temp); 2252 __ j(not_equal, &done); 2253 2254 // Get the prototype from the initial map. 2255 __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); 2256 __ jmp(&done); 2257 2258 // Non-instance prototype: Fetch prototype from constructor field 2259 // in the function's map. 2260 __ bind(&non_instance); 2261 __ mov(result, FieldOperand(result, Map::kConstructorOffset)); 2262 2263 // All done. 2264 __ bind(&done); 2265 } 2266 2267 2268 void LCodeGen::DoLoadElements(LLoadElements* instr) { 2269 Register result = ToRegister(instr->result()); 2270 Register input = ToRegister(instr->InputAt(0)); 2271 __ mov(result, FieldOperand(input, JSObject::kElementsOffset)); 2272 if (FLAG_debug_code) { 2273 NearLabel done; 2274 __ cmp(FieldOperand(result, HeapObject::kMapOffset), 2275 Immediate(factory()->fixed_array_map())); 2276 __ j(equal, &done); 2277 __ cmp(FieldOperand(result, HeapObject::kMapOffset), 2278 Immediate(factory()->fixed_cow_array_map())); 2279 __ j(equal, &done); 2280 Register temp((result.is(eax)) ? ebx : eax); 2281 __ push(temp); 2282 __ mov(temp, FieldOperand(result, HeapObject::kMapOffset)); 2283 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); 2284 __ sub(Operand(temp), Immediate(FIRST_EXTERNAL_ARRAY_TYPE)); 2285 __ cmp(Operand(temp), Immediate(kExternalArrayTypeCount)); 2286 __ pop(temp); 2287 __ Check(below, "Check for fast elements or pixel array failed."); 2288 __ bind(&done); 2289 } 2290 } 2291 2292 2293 void LCodeGen::DoLoadExternalArrayPointer( 2294 LLoadExternalArrayPointer* instr) { 2295 Register result = ToRegister(instr->result()); 2296 Register input = ToRegister(instr->InputAt(0)); 2297 __ mov(result, FieldOperand(input, 2298 ExternalArray::kExternalPointerOffset)); 2299 } 2300 2301 2302 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 2303 Register arguments = ToRegister(instr->arguments()); 2304 Register length = ToRegister(instr->length()); 2305 Operand index = ToOperand(instr->index()); 2306 Register result = ToRegister(instr->result()); 2307 2308 __ sub(length, index); 2309 DeoptimizeIf(below_equal, instr->environment()); 2310 2311 // There are two words between the frame pointer and the last argument. 2312 // Subtracting from length accounts for one of them add one more. 2313 __ mov(result, Operand(arguments, length, times_4, kPointerSize)); 2314 } 2315 2316 2317 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { 2318 Register elements = ToRegister(instr->elements()); 2319 Register key = ToRegister(instr->key()); 2320 Register result = ToRegister(instr->result()); 2321 ASSERT(result.is(elements)); 2322 2323 // Load the result. 2324 __ mov(result, FieldOperand(elements, 2325 key, 2326 times_pointer_size, 2327 FixedArray::kHeaderSize)); 2328 2329 // Check for the hole value. 2330 __ cmp(result, factory()->the_hole_value()); 2331 DeoptimizeIf(equal, instr->environment()); 2332 } 2333 2334 2335 void LCodeGen::DoLoadKeyedSpecializedArrayElement( 2336 LLoadKeyedSpecializedArrayElement* instr) { 2337 Register external_pointer = ToRegister(instr->external_pointer()); 2338 Register key = ToRegister(instr->key()); 2339 ExternalArrayType array_type = instr->array_type(); 2340 if (array_type == kExternalFloatArray) { 2341 XMMRegister result(ToDoubleRegister(instr->result())); 2342 __ movss(result, Operand(external_pointer, key, times_4, 0)); 2343 __ cvtss2sd(result, result); 2344 } else { 2345 Register result(ToRegister(instr->result())); 2346 switch (array_type) { 2347 case kExternalByteArray: 2348 __ movsx_b(result, Operand(external_pointer, key, times_1, 0)); 2349 break; 2350 case kExternalUnsignedByteArray: 2351 case kExternalPixelArray: 2352 __ movzx_b(result, Operand(external_pointer, key, times_1, 0)); 2353 break; 2354 case kExternalShortArray: 2355 __ movsx_w(result, Operand(external_pointer, key, times_2, 0)); 2356 break; 2357 case kExternalUnsignedShortArray: 2358 __ movzx_w(result, Operand(external_pointer, key, times_2, 0)); 2359 break; 2360 case kExternalIntArray: 2361 __ mov(result, Operand(external_pointer, key, times_4, 0)); 2362 break; 2363 case kExternalUnsignedIntArray: 2364 __ mov(result, Operand(external_pointer, key, times_4, 0)); 2365 __ test(result, Operand(result)); 2366 // TODO(danno): we could be more clever here, perhaps having a special 2367 // version of the stub that detects if the overflow case actually 2368 // happens, and generate code that returns a double rather than int. 2369 DeoptimizeIf(negative, instr->environment()); 2370 break; 2371 case kExternalFloatArray: 2372 UNREACHABLE(); 2373 break; 2374 } 2375 } 2376 } 2377 2378 2379 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 2380 ASSERT(ToRegister(instr->context()).is(esi)); 2381 ASSERT(ToRegister(instr->object()).is(edx)); 2382 ASSERT(ToRegister(instr->key()).is(eax)); 2383 2384 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); 2385 CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 2386 } 2387 2388 2389 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 2390 Register result = ToRegister(instr->result()); 2391 2392 // Check for arguments adapter frame. 2393 NearLabel done, adapted; 2394 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2395 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset)); 2396 __ cmp(Operand(result), 2397 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 2398 __ j(equal, &adapted); 2399 2400 // No arguments adaptor frame. 2401 __ mov(result, Operand(ebp)); 2402 __ jmp(&done); 2403 2404 // Arguments adaptor frame present. 2405 __ bind(&adapted); 2406 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2407 2408 // Result is the frame pointer for the frame if not adapted and for the real 2409 // frame below the adaptor frame if adapted. 2410 __ bind(&done); 2411 } 2412 2413 2414 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 2415 Operand elem = ToOperand(instr->InputAt(0)); 2416 Register result = ToRegister(instr->result()); 2417 2418 NearLabel done; 2419 2420 // If no arguments adaptor frame the number of arguments is fixed. 2421 __ cmp(ebp, elem); 2422 __ mov(result, Immediate(scope()->num_parameters())); 2423 __ j(equal, &done); 2424 2425 // Arguments adaptor frame present. Get argument length from there. 2426 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2427 __ mov(result, Operand(result, 2428 ArgumentsAdaptorFrameConstants::kLengthOffset)); 2429 __ SmiUntag(result); 2430 2431 // Argument length is in result register. 2432 __ bind(&done); 2433 } 2434 2435 2436 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 2437 Register receiver = ToRegister(instr->receiver()); 2438 Register function = ToRegister(instr->function()); 2439 Register length = ToRegister(instr->length()); 2440 Register elements = ToRegister(instr->elements()); 2441 Register scratch = ToRegister(instr->TempAt(0)); 2442 ASSERT(receiver.is(eax)); // Used for parameter count. 2443 ASSERT(function.is(edi)); // Required by InvokeFunction. 2444 ASSERT(ToRegister(instr->result()).is(eax)); 2445 2446 // If the receiver is null or undefined, we have to pass the global object 2447 // as a receiver. 2448 NearLabel global_object, receiver_ok; 2449 __ cmp(receiver, factory()->null_value()); 2450 __ j(equal, &global_object); 2451 __ cmp(receiver, factory()->undefined_value()); 2452 __ j(equal, &global_object); 2453 2454 // The receiver should be a JS object. 2455 __ test(receiver, Immediate(kSmiTagMask)); 2456 DeoptimizeIf(equal, instr->environment()); 2457 __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch); 2458 DeoptimizeIf(below, instr->environment()); 2459 __ jmp(&receiver_ok); 2460 2461 __ bind(&global_object); 2462 // TODO(kmillikin): We have a hydrogen value for the global object. See 2463 // if it's better to use it than to explicitly fetch it from the context 2464 // here. 2465 __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset)); 2466 __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX)); 2467 __ bind(&receiver_ok); 2468 2469 // Copy the arguments to this function possibly from the 2470 // adaptor frame below it. 2471 const uint32_t kArgumentsLimit = 1 * KB; 2472 __ cmp(length, kArgumentsLimit); 2473 DeoptimizeIf(above, instr->environment()); 2474 2475 __ push(receiver); 2476 __ mov(receiver, length); 2477 2478 // Loop through the arguments pushing them onto the execution 2479 // stack. 2480 NearLabel invoke, loop; 2481 // length is a small non-negative integer, due to the test above. 2482 __ test(length, Operand(length)); 2483 __ j(zero, &invoke); 2484 __ bind(&loop); 2485 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); 2486 __ dec(length); 2487 __ j(not_zero, &loop); 2488 2489 // Invoke the function. 2490 __ bind(&invoke); 2491 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); 2492 LPointerMap* pointers = instr->pointer_map(); 2493 LEnvironment* env = instr->deoptimization_environment(); 2494 RecordPosition(pointers->position()); 2495 RegisterEnvironmentForDeoptimization(env); 2496 SafepointGenerator safepoint_generator(this, 2497 pointers, 2498 env->deoptimization_index()); 2499 v8::internal::ParameterCount actual(eax); 2500 __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); 2501 } 2502 2503 2504 void LCodeGen::DoPushArgument(LPushArgument* instr) { 2505 LOperand* argument = instr->InputAt(0); 2506 if (argument->IsConstantOperand()) { 2507 __ push(ToImmediate(argument)); 2508 } else { 2509 __ push(ToOperand(argument)); 2510 } 2511 } 2512 2513 2514 void LCodeGen::DoContext(LContext* instr) { 2515 Register result = ToRegister(instr->result()); 2516 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); 2517 } 2518 2519 2520 void LCodeGen::DoOuterContext(LOuterContext* instr) { 2521 Register context = ToRegister(instr->context()); 2522 Register result = ToRegister(instr->result()); 2523 __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX))); 2524 __ mov(result, FieldOperand(result, JSFunction::kContextOffset)); 2525 } 2526 2527 2528 void LCodeGen::DoGlobalObject(LGlobalObject* instr) { 2529 Register context = ToRegister(instr->context()); 2530 Register result = ToRegister(instr->result()); 2531 __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX))); 2532 } 2533 2534 2535 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { 2536 Register global = ToRegister(instr->global()); 2537 Register result = ToRegister(instr->result()); 2538 __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset)); 2539 } 2540 2541 2542 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 2543 int arity, 2544 LInstruction* instr) { 2545 // Change context if needed. 2546 bool change_context = 2547 (info()->closure()->context() != function->context()) || 2548 scope()->contains_with() || 2549 (scope()->num_heap_slots() > 0); 2550 if (change_context) { 2551 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); 2552 } else { 2553 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2554 } 2555 2556 // Set eax to arguments count if adaption is not needed. Assumes that eax 2557 // is available to write to at this point. 2558 if (!function->NeedsArgumentsAdaption()) { 2559 __ mov(eax, arity); 2560 } 2561 2562 LPointerMap* pointers = instr->pointer_map(); 2563 RecordPosition(pointers->position()); 2564 2565 // Invoke function. 2566 if (*function == *info()->closure()) { 2567 __ CallSelf(); 2568 } else { 2569 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); 2570 } 2571 2572 // Setup deoptimization. 2573 RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT); 2574 } 2575 2576 2577 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { 2578 ASSERT(ToRegister(instr->result()).is(eax)); 2579 __ mov(edi, instr->function()); 2580 CallKnownFunction(instr->function(), instr->arity(), instr); 2581 } 2582 2583 2584 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { 2585 Register input_reg = ToRegister(instr->InputAt(0)); 2586 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 2587 factory()->heap_number_map()); 2588 DeoptimizeIf(not_equal, instr->environment()); 2589 2590 Label done; 2591 Register tmp = input_reg.is(eax) ? ecx : eax; 2592 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx; 2593 2594 // Preserve the value of all registers. 2595 PushSafepointRegistersScope scope(this); 2596 2597 Label negative; 2598 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 2599 // Check the sign of the argument. If the argument is positive, just 2600 // return it. We do not need to patch the stack since |input| and 2601 // |result| are the same register and |input| will be restored 2602 // unchanged by popping safepoint registers. 2603 __ test(tmp, Immediate(HeapNumber::kSignMask)); 2604 __ j(not_zero, &negative); 2605 __ jmp(&done); 2606 2607 __ bind(&negative); 2608 2609 Label allocated, slow; 2610 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); 2611 __ jmp(&allocated); 2612 2613 // Slow case: Call the runtime system to do the number allocation. 2614 __ bind(&slow); 2615 2616 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 2617 2618 // Set the pointer to the new heap number in tmp. 2619 if (!tmp.is(eax)) __ mov(tmp, eax); 2620 2621 // Restore input_reg after call to runtime. 2622 __ LoadFromSafepointRegisterSlot(input_reg, input_reg); 2623 2624 __ bind(&allocated); 2625 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 2626 __ and_(tmp2, ~HeapNumber::kSignMask); 2627 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); 2628 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); 2629 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); 2630 __ StoreToSafepointRegisterSlot(input_reg, tmp); 2631 2632 __ bind(&done); 2633 } 2634 2635 2636 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { 2637 Register input_reg = ToRegister(instr->InputAt(0)); 2638 __ test(input_reg, Operand(input_reg)); 2639 Label is_positive; 2640 __ j(not_sign, &is_positive); 2641 __ neg(input_reg); 2642 __ test(input_reg, Operand(input_reg)); 2643 DeoptimizeIf(negative, instr->environment()); 2644 __ bind(&is_positive); 2645 } 2646 2647 2648 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { 2649 // Class for deferred case. 2650 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { 2651 public: 2652 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, 2653 LUnaryMathOperation* instr) 2654 : LDeferredCode(codegen), instr_(instr) { } 2655 virtual void Generate() { 2656 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 2657 } 2658 private: 2659 LUnaryMathOperation* instr_; 2660 }; 2661 2662 ASSERT(instr->InputAt(0)->Equals(instr->result())); 2663 Representation r = instr->hydrogen()->value()->representation(); 2664 2665 if (r.IsDouble()) { 2666 XMMRegister scratch = xmm0; 2667 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); 2668 __ pxor(scratch, scratch); 2669 __ subsd(scratch, input_reg); 2670 __ pand(input_reg, scratch); 2671 } else if (r.IsInteger32()) { 2672 EmitIntegerMathAbs(instr); 2673 } else { // Tagged case. 2674 DeferredMathAbsTaggedHeapNumber* deferred = 2675 new DeferredMathAbsTaggedHeapNumber(this, instr); 2676 Register input_reg = ToRegister(instr->InputAt(0)); 2677 // Smi check. 2678 __ test(input_reg, Immediate(kSmiTagMask)); 2679 __ j(not_zero, deferred->entry()); 2680 EmitIntegerMathAbs(instr); 2681 __ bind(deferred->exit()); 2682 } 2683 } 2684 2685 2686 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 2687 XMMRegister xmm_scratch = xmm0; 2688 Register output_reg = ToRegister(instr->result()); 2689 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); 2690 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. 2691 __ ucomisd(input_reg, xmm_scratch); 2692 2693 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2694 DeoptimizeIf(below_equal, instr->environment()); 2695 } else { 2696 DeoptimizeIf(below, instr->environment()); 2697 } 2698 2699 // Use truncating instruction (OK because input is positive). 2700 __ cvttsd2si(output_reg, Operand(input_reg)); 2701 2702 // Overflow is signalled with minint. 2703 __ cmp(output_reg, 0x80000000u); 2704 DeoptimizeIf(equal, instr->environment()); 2705 } 2706 2707 2708 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 2709 XMMRegister xmm_scratch = xmm0; 2710 Register output_reg = ToRegister(instr->result()); 2711 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); 2712 2713 // xmm_scratch = 0.5 2714 ExternalReference one_half = ExternalReference::address_of_one_half(); 2715 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); 2716 2717 // input = input + 0.5 2718 __ addsd(input_reg, xmm_scratch); 2719 2720 // We need to return -0 for the input range [-0.5, 0[, otherwise 2721 // compute Math.floor(value + 0.5). 2722 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2723 __ ucomisd(input_reg, xmm_scratch); 2724 DeoptimizeIf(below_equal, instr->environment()); 2725 } else { 2726 // If we don't need to bailout on -0, we check only bailout 2727 // on negative inputs. 2728 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. 2729 __ ucomisd(input_reg, xmm_scratch); 2730 DeoptimizeIf(below, instr->environment()); 2731 } 2732 2733 // Compute Math.floor(value + 0.5). 2734 // Use truncating instruction (OK because input is positive). 2735 __ cvttsd2si(output_reg, Operand(input_reg)); 2736 2737 // Overflow is signalled with minint. 2738 __ cmp(output_reg, 0x80000000u); 2739 DeoptimizeIf(equal, instr->environment()); 2740 } 2741 2742 2743 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 2744 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); 2745 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 2746 __ sqrtsd(input_reg, input_reg); 2747 } 2748 2749 2750 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { 2751 XMMRegister xmm_scratch = xmm0; 2752 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); 2753 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 2754 __ xorpd(xmm_scratch, xmm_scratch); 2755 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. 2756 __ sqrtsd(input_reg, input_reg); 2757 } 2758 2759 2760 void LCodeGen::DoPower(LPower* instr) { 2761 LOperand* left = instr->InputAt(0); 2762 LOperand* right = instr->InputAt(1); 2763 DoubleRegister result_reg = ToDoubleRegister(instr->result()); 2764 Representation exponent_type = instr->hydrogen()->right()->representation(); 2765 2766 if (exponent_type.IsDouble()) { 2767 // It is safe to use ebx directly since the instruction is marked 2768 // as a call. 2769 __ PrepareCallCFunction(4, ebx); 2770 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left)); 2771 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right)); 2772 __ CallCFunction(ExternalReference::power_double_double_function(isolate()), 2773 4); 2774 } else if (exponent_type.IsInteger32()) { 2775 // It is safe to use ebx directly since the instruction is marked 2776 // as a call. 2777 ASSERT(!ToRegister(right).is(ebx)); 2778 __ PrepareCallCFunction(4, ebx); 2779 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left)); 2780 __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right)); 2781 __ CallCFunction(ExternalReference::power_double_int_function(isolate()), 2782 4); 2783 } else { 2784 ASSERT(exponent_type.IsTagged()); 2785 CpuFeatures::Scope scope(SSE2); 2786 Register right_reg = ToRegister(right); 2787 2788 Label non_smi, call; 2789 __ test(right_reg, Immediate(kSmiTagMask)); 2790 __ j(not_zero, &non_smi); 2791 __ SmiUntag(right_reg); 2792 __ cvtsi2sd(result_reg, Operand(right_reg)); 2793 __ jmp(&call); 2794 2795 __ bind(&non_smi); 2796 // It is safe to use ebx directly since the instruction is marked 2797 // as a call. 2798 ASSERT(!right_reg.is(ebx)); 2799 __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx); 2800 DeoptimizeIf(not_equal, instr->environment()); 2801 __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset)); 2802 2803 __ bind(&call); 2804 __ PrepareCallCFunction(4, ebx); 2805 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left)); 2806 __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg); 2807 __ CallCFunction(ExternalReference::power_double_double_function(isolate()), 2808 4); 2809 } 2810 2811 // Return value is in st(0) on ia32. 2812 // Store it into the (fixed) result register. 2813 __ sub(Operand(esp), Immediate(kDoubleSize)); 2814 __ fstp_d(Operand(esp, 0)); 2815 __ movdbl(result_reg, Operand(esp, 0)); 2816 __ add(Operand(esp), Immediate(kDoubleSize)); 2817 } 2818 2819 2820 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { 2821 ASSERT(instr->InputAt(0)->Equals(instr->result())); 2822 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); 2823 NearLabel positive, done, zero, negative; 2824 __ xorpd(xmm0, xmm0); 2825 __ ucomisd(input_reg, xmm0); 2826 __ j(above, &positive); 2827 __ j(equal, &zero); 2828 ExternalReference nan = ExternalReference::address_of_nan(); 2829 __ movdbl(input_reg, Operand::StaticVariable(nan)); 2830 __ jmp(&done); 2831 __ bind(&zero); 2832 __ push(Immediate(0xFFF00000)); 2833 __ push(Immediate(0)); 2834 __ movdbl(input_reg, Operand(esp, 0)); 2835 __ add(Operand(esp), Immediate(kDoubleSize)); 2836 __ jmp(&done); 2837 __ bind(&positive); 2838 __ fldln2(); 2839 __ sub(Operand(esp), Immediate(kDoubleSize)); 2840 __ movdbl(Operand(esp, 0), input_reg); 2841 __ fld_d(Operand(esp, 0)); 2842 __ fyl2x(); 2843 __ fstp_d(Operand(esp, 0)); 2844 __ movdbl(input_reg, Operand(esp, 0)); 2845 __ add(Operand(esp), Immediate(kDoubleSize)); 2846 __ bind(&done); 2847 } 2848 2849 2850 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { 2851 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); 2852 TranscendentalCacheStub stub(TranscendentalCache::COS, 2853 TranscendentalCacheStub::UNTAGGED); 2854 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 2855 } 2856 2857 2858 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { 2859 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); 2860 TranscendentalCacheStub stub(TranscendentalCache::SIN, 2861 TranscendentalCacheStub::UNTAGGED); 2862 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 2863 } 2864 2865 2866 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { 2867 switch (instr->op()) { 2868 case kMathAbs: 2869 DoMathAbs(instr); 2870 break; 2871 case kMathFloor: 2872 DoMathFloor(instr); 2873 break; 2874 case kMathRound: 2875 DoMathRound(instr); 2876 break; 2877 case kMathSqrt: 2878 DoMathSqrt(instr); 2879 break; 2880 case kMathPowHalf: 2881 DoMathPowHalf(instr); 2882 break; 2883 case kMathCos: 2884 DoMathCos(instr); 2885 break; 2886 case kMathSin: 2887 DoMathSin(instr); 2888 break; 2889 case kMathLog: 2890 DoMathLog(instr); 2891 break; 2892 2893 default: 2894 UNREACHABLE(); 2895 } 2896 } 2897 2898 2899 void LCodeGen::DoCallKeyed(LCallKeyed* instr) { 2900 ASSERT(ToRegister(instr->context()).is(esi)); 2901 ASSERT(ToRegister(instr->key()).is(ecx)); 2902 ASSERT(ToRegister(instr->result()).is(eax)); 2903 2904 int arity = instr->arity(); 2905 Handle<Code> ic = isolate()->stub_cache()-> 2906 ComputeKeyedCallInitialize(arity, NOT_IN_LOOP); 2907 CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 2908 } 2909 2910 2911 void LCodeGen::DoCallNamed(LCallNamed* instr) { 2912 ASSERT(ToRegister(instr->context()).is(esi)); 2913 ASSERT(ToRegister(instr->result()).is(eax)); 2914 2915 int arity = instr->arity(); 2916 Handle<Code> ic = isolate()->stub_cache()-> 2917 ComputeCallInitialize(arity, NOT_IN_LOOP); 2918 __ mov(ecx, instr->name()); 2919 CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 2920 } 2921 2922 2923 void LCodeGen::DoCallFunction(LCallFunction* instr) { 2924 ASSERT(ToRegister(instr->context()).is(esi)); 2925 ASSERT(ToRegister(instr->result()).is(eax)); 2926 2927 int arity = instr->arity(); 2928 CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE); 2929 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 2930 __ Drop(1); 2931 } 2932 2933 2934 void LCodeGen::DoCallGlobal(LCallGlobal* instr) { 2935 ASSERT(ToRegister(instr->context()).is(esi)); 2936 ASSERT(ToRegister(instr->result()).is(eax)); 2937 2938 int arity = instr->arity(); 2939 Handle<Code> ic = isolate()->stub_cache()-> 2940 ComputeCallInitialize(arity, NOT_IN_LOOP); 2941 __ mov(ecx, instr->name()); 2942 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED); 2943 } 2944 2945 2946 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { 2947 ASSERT(ToRegister(instr->result()).is(eax)); 2948 __ mov(edi, instr->target()); 2949 CallKnownFunction(instr->target(), instr->arity(), instr); 2950 } 2951 2952 2953 void LCodeGen::DoCallNew(LCallNew* instr) { 2954 ASSERT(ToRegister(instr->context()).is(esi)); 2955 ASSERT(ToRegister(instr->constructor()).is(edi)); 2956 ASSERT(ToRegister(instr->result()).is(eax)); 2957 2958 Handle<Code> builtin = isolate()->builtins()->JSConstructCall(); 2959 __ Set(eax, Immediate(instr->arity())); 2960 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED); 2961 } 2962 2963 2964 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 2965 CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT); 2966 } 2967 2968 2969 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 2970 Register object = ToRegister(instr->object()); 2971 Register value = ToRegister(instr->value()); 2972 int offset = instr->offset(); 2973 2974 if (!instr->transition().is_null()) { 2975 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition()); 2976 } 2977 2978 // Do the store. 2979 if (instr->is_in_object()) { 2980 __ mov(FieldOperand(object, offset), value); 2981 if (instr->needs_write_barrier()) { 2982 Register temp = ToRegister(instr->TempAt(0)); 2983 // Update the write barrier for the object for in-object properties. 2984 __ RecordWrite(object, offset, value, temp); 2985 } 2986 } else { 2987 Register temp = ToRegister(instr->TempAt(0)); 2988 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset)); 2989 __ mov(FieldOperand(temp, offset), value); 2990 if (instr->needs_write_barrier()) { 2991 // Update the write barrier for the properties array. 2992 // object is used as a scratch register. 2993 __ RecordWrite(temp, offset, value, object); 2994 } 2995 } 2996 } 2997 2998 2999 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 3000 ASSERT(ToRegister(instr->context()).is(esi)); 3001 ASSERT(ToRegister(instr->object()).is(edx)); 3002 ASSERT(ToRegister(instr->value()).is(eax)); 3003 3004 __ mov(ecx, instr->name()); 3005 Handle<Code> ic = instr->strict_mode() 3006 ? isolate()->builtins()->StoreIC_Initialize_Strict() 3007 : isolate()->builtins()->StoreIC_Initialize(); 3008 CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 3009 } 3010 3011 3012 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 3013 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); 3014 DeoptimizeIf(above_equal, instr->environment()); 3015 } 3016 3017 3018 void LCodeGen::DoStoreKeyedSpecializedArrayElement( 3019 LStoreKeyedSpecializedArrayElement* instr) { 3020 Register external_pointer = ToRegister(instr->external_pointer()); 3021 Register key = ToRegister(instr->key()); 3022 ExternalArrayType array_type = instr->array_type(); 3023 if (array_type == kExternalFloatArray) { 3024 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); 3025 __ movss(Operand(external_pointer, key, times_4, 0), xmm0); 3026 } else { 3027 Register value = ToRegister(instr->value()); 3028 switch (array_type) { 3029 case kExternalPixelArray: { 3030 // Clamp the value to [0..255]. 3031 Register temp = ToRegister(instr->TempAt(0)); 3032 // The dec_b below requires that the clamped value is in a byte 3033 // register. eax is an arbitrary choice to satisfy this requirement, we 3034 // hinted the register allocator to give us eax when building the 3035 // instruction. 3036 ASSERT(temp.is(eax)); 3037 __ mov(temp, ToRegister(instr->value())); 3038 NearLabel done; 3039 __ test(temp, Immediate(0xFFFFFF00)); 3040 __ j(zero, &done); 3041 __ setcc(negative, temp); // 1 if negative, 0 if positive. 3042 __ dec_b(temp); // 0 if negative, 255 if positive. 3043 __ bind(&done); 3044 __ mov_b(Operand(external_pointer, key, times_1, 0), temp); 3045 break; 3046 } 3047 case kExternalByteArray: 3048 case kExternalUnsignedByteArray: 3049 __ mov_b(Operand(external_pointer, key, times_1, 0), value); 3050 break; 3051 case kExternalShortArray: 3052 case kExternalUnsignedShortArray: 3053 __ mov_w(Operand(external_pointer, key, times_2, 0), value); 3054 break; 3055 case kExternalIntArray: 3056 case kExternalUnsignedIntArray: 3057 __ mov(Operand(external_pointer, key, times_4, 0), value); 3058 break; 3059 case kExternalFloatArray: 3060 UNREACHABLE(); 3061 break; 3062 } 3063 } 3064 } 3065 3066 3067 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { 3068 Register value = ToRegister(instr->value()); 3069 Register elements = ToRegister(instr->object()); 3070 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 3071 3072 // Do the store. 3073 if (instr->key()->IsConstantOperand()) { 3074 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 3075 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3076 int offset = 3077 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; 3078 __ mov(FieldOperand(elements, offset), value); 3079 } else { 3080 __ mov(FieldOperand(elements, 3081 key, 3082 times_pointer_size, 3083 FixedArray::kHeaderSize), 3084 value); 3085 } 3086 3087 if (instr->hydrogen()->NeedsWriteBarrier()) { 3088 // Compute address of modified element and store it into key register. 3089 __ lea(key, 3090 FieldOperand(elements, 3091 key, 3092 times_pointer_size, 3093 FixedArray::kHeaderSize)); 3094 __ RecordWrite(elements, key, value); 3095 } 3096 } 3097 3098 3099 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 3100 ASSERT(ToRegister(instr->context()).is(esi)); 3101 ASSERT(ToRegister(instr->object()).is(edx)); 3102 ASSERT(ToRegister(instr->key()).is(ecx)); 3103 ASSERT(ToRegister(instr->value()).is(eax)); 3104 3105 Handle<Code> ic = instr->strict_mode() 3106 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 3107 : isolate()->builtins()->KeyedStoreIC_Initialize(); 3108 CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED); 3109 } 3110 3111 3112 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 3113 class DeferredStringCharCodeAt: public LDeferredCode { 3114 public: 3115 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 3116 : LDeferredCode(codegen), instr_(instr) { } 3117 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } 3118 private: 3119 LStringCharCodeAt* instr_; 3120 }; 3121 3122 Register string = ToRegister(instr->string()); 3123 Register index = no_reg; 3124 int const_index = -1; 3125 if (instr->index()->IsConstantOperand()) { 3126 const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3127 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); 3128 if (!Smi::IsValid(const_index)) { 3129 // Guaranteed to be out of bounds because of the assert above. 3130 // So the bounds check that must dominate this instruction must 3131 // have deoptimized already. 3132 if (FLAG_debug_code) { 3133 __ Abort("StringCharCodeAt: out of bounds index."); 3134 } 3135 // No code needs to be generated. 3136 return; 3137 } 3138 } else { 3139 index = ToRegister(instr->index()); 3140 } 3141 Register result = ToRegister(instr->result()); 3142 3143 DeferredStringCharCodeAt* deferred = 3144 new DeferredStringCharCodeAt(this, instr); 3145 3146 NearLabel flat_string, ascii_string, done; 3147 3148 // Fetch the instance type of the receiver into result register. 3149 __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); 3150 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); 3151 3152 // We need special handling for non-flat strings. 3153 STATIC_ASSERT(kSeqStringTag == 0); 3154 __ test(result, Immediate(kStringRepresentationMask)); 3155 __ j(zero, &flat_string); 3156 3157 // Handle non-flat strings. 3158 __ test(result, Immediate(kIsConsStringMask)); 3159 __ j(zero, deferred->entry()); 3160 3161 // ConsString. 3162 // Check whether the right hand side is the empty string (i.e. if 3163 // this is really a flat string in a cons string). If that is not 3164 // the case we would rather go to the runtime system now to flatten 3165 // the string. 3166 __ cmp(FieldOperand(string, ConsString::kSecondOffset), 3167 Immediate(factory()->empty_string())); 3168 __ j(not_equal, deferred->entry()); 3169 // Get the first of the two strings and load its instance type. 3170 __ mov(string, FieldOperand(string, ConsString::kFirstOffset)); 3171 __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); 3172 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); 3173 // If the first cons component is also non-flat, then go to runtime. 3174 STATIC_ASSERT(kSeqStringTag == 0); 3175 __ test(result, Immediate(kStringRepresentationMask)); 3176 __ j(not_zero, deferred->entry()); 3177 3178 // Check for ASCII or two-byte string. 3179 __ bind(&flat_string); 3180 STATIC_ASSERT(kAsciiStringTag != 0); 3181 __ test(result, Immediate(kStringEncodingMask)); 3182 __ j(not_zero, &ascii_string); 3183 3184 // Two-byte string. 3185 // Load the two-byte character code into the result register. 3186 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 3187 if (instr->index()->IsConstantOperand()) { 3188 __ movzx_w(result, 3189 FieldOperand(string, 3190 SeqTwoByteString::kHeaderSize + 3191 (kUC16Size * const_index))); 3192 } else { 3193 __ movzx_w(result, FieldOperand(string, 3194 index, 3195 times_2, 3196 SeqTwoByteString::kHeaderSize)); 3197 } 3198 __ jmp(&done); 3199 3200 // ASCII string. 3201 // Load the byte into the result register. 3202 __ bind(&ascii_string); 3203 if (instr->index()->IsConstantOperand()) { 3204 __ movzx_b(result, FieldOperand(string, 3205 SeqAsciiString::kHeaderSize + const_index)); 3206 } else { 3207 __ movzx_b(result, FieldOperand(string, 3208 index, 3209 times_1, 3210 SeqAsciiString::kHeaderSize)); 3211 } 3212 __ bind(&done); 3213 __ bind(deferred->exit()); 3214 } 3215 3216 3217 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 3218 Register string = ToRegister(instr->string()); 3219 Register result = ToRegister(instr->result()); 3220 3221 // TODO(3095996): Get rid of this. For now, we need to make the 3222 // result register contain a valid pointer because it is already 3223 // contained in the register pointer map. 3224 __ Set(result, Immediate(0)); 3225 3226 PushSafepointRegistersScope scope(this); 3227 __ push(string); 3228 // Push the index as a smi. This is safe because of the checks in 3229 // DoStringCharCodeAt above. 3230 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); 3231 if (instr->index()->IsConstantOperand()) { 3232 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3233 __ push(Immediate(Smi::FromInt(const_index))); 3234 } else { 3235 Register index = ToRegister(instr->index()); 3236 __ SmiTag(index); 3237 __ push(index); 3238 } 3239 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr); 3240 if (FLAG_debug_code) { 3241 __ AbortIfNotSmi(eax); 3242 } 3243 __ SmiUntag(eax); 3244 __ StoreToSafepointRegisterSlot(result, eax); 3245 } 3246 3247 3248 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 3249 class DeferredStringCharFromCode: public LDeferredCode { 3250 public: 3251 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 3252 : LDeferredCode(codegen), instr_(instr) { } 3253 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } 3254 private: 3255 LStringCharFromCode* instr_; 3256 }; 3257 3258 DeferredStringCharFromCode* deferred = 3259 new DeferredStringCharFromCode(this, instr); 3260 3261 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); 3262 Register char_code = ToRegister(instr->char_code()); 3263 Register result = ToRegister(instr->result()); 3264 ASSERT(!char_code.is(result)); 3265 3266 __ cmp(char_code, String::kMaxAsciiCharCode); 3267 __ j(above, deferred->entry()); 3268 __ Set(result, Immediate(factory()->single_character_string_cache())); 3269 __ mov(result, FieldOperand(result, 3270 char_code, times_pointer_size, 3271 FixedArray::kHeaderSize)); 3272 __ cmp(result, factory()->undefined_value()); 3273 __ j(equal, deferred->entry()); 3274 __ bind(deferred->exit()); 3275 } 3276 3277 3278 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 3279 Register char_code = ToRegister(instr->char_code()); 3280 Register result = ToRegister(instr->result()); 3281 3282 // TODO(3095996): Get rid of this. For now, we need to make the 3283 // result register contain a valid pointer because it is already 3284 // contained in the register pointer map. 3285 __ Set(result, Immediate(0)); 3286 3287 PushSafepointRegistersScope scope(this); 3288 __ SmiTag(char_code); 3289 __ push(char_code); 3290 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); 3291 __ StoreToSafepointRegisterSlot(result, eax); 3292 } 3293 3294 3295 void LCodeGen::DoStringLength(LStringLength* instr) { 3296 Register string = ToRegister(instr->string()); 3297 Register result = ToRegister(instr->result()); 3298 __ mov(result, FieldOperand(string, String::kLengthOffset)); 3299 } 3300 3301 3302 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 3303 LOperand* input = instr->InputAt(0); 3304 ASSERT(input->IsRegister() || input->IsStackSlot()); 3305 LOperand* output = instr->result(); 3306 ASSERT(output->IsDoubleRegister()); 3307 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); 3308 } 3309 3310 3311 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 3312 class DeferredNumberTagI: public LDeferredCode { 3313 public: 3314 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 3315 : LDeferredCode(codegen), instr_(instr) { } 3316 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } 3317 private: 3318 LNumberTagI* instr_; 3319 }; 3320 3321 LOperand* input = instr->InputAt(0); 3322 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3323 Register reg = ToRegister(input); 3324 3325 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); 3326 __ SmiTag(reg); 3327 __ j(overflow, deferred->entry()); 3328 __ bind(deferred->exit()); 3329 } 3330 3331 3332 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { 3333 Label slow; 3334 Register reg = ToRegister(instr->InputAt(0)); 3335 Register tmp = reg.is(eax) ? ecx : eax; 3336 3337 // Preserve the value of all registers. 3338 PushSafepointRegistersScope scope(this); 3339 3340 // There was overflow, so bits 30 and 31 of the original integer 3341 // disagree. Try to allocate a heap number in new space and store 3342 // the value in there. If that fails, call the runtime system. 3343 NearLabel done; 3344 __ SmiUntag(reg); 3345 __ xor_(reg, 0x80000000); 3346 __ cvtsi2sd(xmm0, Operand(reg)); 3347 if (FLAG_inline_new) { 3348 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); 3349 __ jmp(&done); 3350 } 3351 3352 // Slow case: Call the runtime system to do the number allocation. 3353 __ bind(&slow); 3354 3355 // TODO(3095996): Put a valid pointer value in the stack slot where the result 3356 // register is stored, as this register is in the pointer map, but contains an 3357 // integer value. 3358 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); 3359 3360 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 3361 if (!reg.is(eax)) __ mov(reg, eax); 3362 3363 // Done. Put the value in xmm0 into the value of the allocated heap 3364 // number. 3365 __ bind(&done); 3366 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); 3367 __ StoreToSafepointRegisterSlot(reg, reg); 3368 } 3369 3370 3371 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 3372 class DeferredNumberTagD: public LDeferredCode { 3373 public: 3374 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 3375 : LDeferredCode(codegen), instr_(instr) { } 3376 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 3377 private: 3378 LNumberTagD* instr_; 3379 }; 3380 3381 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); 3382 Register reg = ToRegister(instr->result()); 3383 Register tmp = ToRegister(instr->TempAt(0)); 3384 3385 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); 3386 if (FLAG_inline_new) { 3387 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 3388 } else { 3389 __ jmp(deferred->entry()); 3390 } 3391 __ bind(deferred->exit()); 3392 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 3393 } 3394 3395 3396 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 3397 // TODO(3095996): Get rid of this. For now, we need to make the 3398 // result register contain a valid pointer because it is already 3399 // contained in the register pointer map. 3400 Register reg = ToRegister(instr->result()); 3401 __ Set(reg, Immediate(0)); 3402 3403 PushSafepointRegistersScope scope(this); 3404 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 3405 __ StoreToSafepointRegisterSlot(reg, eax); 3406 } 3407 3408 3409 void LCodeGen::DoSmiTag(LSmiTag* instr) { 3410 LOperand* input = instr->InputAt(0); 3411 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3412 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); 3413 __ SmiTag(ToRegister(input)); 3414 } 3415 3416 3417 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 3418 LOperand* input = instr->InputAt(0); 3419 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3420 if (instr->needs_check()) { 3421 __ test(ToRegister(input), Immediate(kSmiTagMask)); 3422 DeoptimizeIf(not_zero, instr->environment()); 3423 } 3424 __ SmiUntag(ToRegister(input)); 3425 } 3426 3427 3428 void LCodeGen::EmitNumberUntagD(Register input_reg, 3429 XMMRegister result_reg, 3430 bool deoptimize_on_undefined, 3431 LEnvironment* env) { 3432 NearLabel load_smi, done; 3433 3434 // Smi check. 3435 __ test(input_reg, Immediate(kSmiTagMask)); 3436 __ j(zero, &load_smi, not_taken); 3437 3438 // Heap number map check. 3439 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 3440 factory()->heap_number_map()); 3441 if (deoptimize_on_undefined) { 3442 DeoptimizeIf(not_equal, env); 3443 } else { 3444 NearLabel heap_number; 3445 __ j(equal, &heap_number); 3446 __ cmp(input_reg, factory()->undefined_value()); 3447 DeoptimizeIf(not_equal, env); 3448 3449 // Convert undefined to NaN. 3450 ExternalReference nan = ExternalReference::address_of_nan(); 3451 __ movdbl(result_reg, Operand::StaticVariable(nan)); 3452 __ jmp(&done); 3453 3454 __ bind(&heap_number); 3455 } 3456 // Heap number to XMM conversion. 3457 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); 3458 __ jmp(&done); 3459 3460 // Smi to XMM conversion 3461 __ bind(&load_smi); 3462 __ SmiUntag(input_reg); // Untag smi before converting to float. 3463 __ cvtsi2sd(result_reg, Operand(input_reg)); 3464 __ SmiTag(input_reg); // Retag smi. 3465 __ bind(&done); 3466 } 3467 3468 3469 class DeferredTaggedToI: public LDeferredCode { 3470 public: 3471 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 3472 : LDeferredCode(codegen), instr_(instr) { } 3473 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } 3474 private: 3475 LTaggedToI* instr_; 3476 }; 3477 3478 3479 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 3480 NearLabel done, heap_number; 3481 Register input_reg = ToRegister(instr->InputAt(0)); 3482 3483 // Heap number map check. 3484 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 3485 factory()->heap_number_map()); 3486 3487 if (instr->truncating()) { 3488 __ j(equal, &heap_number); 3489 // Check for undefined. Undefined is converted to zero for truncating 3490 // conversions. 3491 __ cmp(input_reg, factory()->undefined_value()); 3492 DeoptimizeIf(not_equal, instr->environment()); 3493 __ mov(input_reg, 0); 3494 __ jmp(&done); 3495 3496 __ bind(&heap_number); 3497 if (CpuFeatures::IsSupported(SSE3)) { 3498 CpuFeatures::Scope scope(SSE3); 3499 NearLabel convert; 3500 // Use more powerful conversion when sse3 is available. 3501 // Load x87 register with heap number. 3502 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); 3503 // Get exponent alone and check for too-big exponent. 3504 __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); 3505 __ and_(input_reg, HeapNumber::kExponentMask); 3506 const uint32_t kTooBigExponent = 3507 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; 3508 __ cmp(Operand(input_reg), Immediate(kTooBigExponent)); 3509 __ j(less, &convert); 3510 // Pop FPU stack before deoptimizing. 3511 __ ffree(0); 3512 __ fincstp(); 3513 DeoptimizeIf(no_condition, instr->environment()); 3514 3515 // Reserve space for 64 bit answer. 3516 __ bind(&convert); 3517 __ sub(Operand(esp), Immediate(kDoubleSize)); 3518 // Do conversion, which cannot fail because we checked the exponent. 3519 __ fisttp_d(Operand(esp, 0)); 3520 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result. 3521 __ add(Operand(esp), Immediate(kDoubleSize)); 3522 } else { 3523 NearLabel deopt; 3524 XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0)); 3525 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 3526 __ cvttsd2si(input_reg, Operand(xmm0)); 3527 __ cmp(input_reg, 0x80000000u); 3528 __ j(not_equal, &done); 3529 // Check if the input was 0x8000000 (kMinInt). 3530 // If no, then we got an overflow and we deoptimize. 3531 ExternalReference min_int = ExternalReference::address_of_min_int(); 3532 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); 3533 __ ucomisd(xmm_temp, xmm0); 3534 DeoptimizeIf(not_equal, instr->environment()); 3535 DeoptimizeIf(parity_even, instr->environment()); // NaN. 3536 } 3537 } else { 3538 // Deoptimize if we don't have a heap number. 3539 DeoptimizeIf(not_equal, instr->environment()); 3540 3541 XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0)); 3542 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 3543 __ cvttsd2si(input_reg, Operand(xmm0)); 3544 __ cvtsi2sd(xmm_temp, Operand(input_reg)); 3545 __ ucomisd(xmm0, xmm_temp); 3546 DeoptimizeIf(not_equal, instr->environment()); 3547 DeoptimizeIf(parity_even, instr->environment()); // NaN. 3548 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3549 __ test(input_reg, Operand(input_reg)); 3550 __ j(not_zero, &done); 3551 __ movmskpd(input_reg, xmm0); 3552 __ and_(input_reg, 1); 3553 DeoptimizeIf(not_zero, instr->environment()); 3554 } 3555 } 3556 __ bind(&done); 3557 } 3558 3559 3560 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 3561 LOperand* input = instr->InputAt(0); 3562 ASSERT(input->IsRegister()); 3563 ASSERT(input->Equals(instr->result())); 3564 3565 Register input_reg = ToRegister(input); 3566 3567 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); 3568 3569 // Smi check. 3570 __ test(input_reg, Immediate(kSmiTagMask)); 3571 __ j(not_zero, deferred->entry()); 3572 3573 // Smi to int32 conversion 3574 __ SmiUntag(input_reg); // Untag smi. 3575 3576 __ bind(deferred->exit()); 3577 } 3578 3579 3580 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 3581 LOperand* input = instr->InputAt(0); 3582 ASSERT(input->IsRegister()); 3583 LOperand* result = instr->result(); 3584 ASSERT(result->IsDoubleRegister()); 3585 3586 Register input_reg = ToRegister(input); 3587 XMMRegister result_reg = ToDoubleRegister(result); 3588 3589 EmitNumberUntagD(input_reg, result_reg, 3590 instr->hydrogen()->deoptimize_on_undefined(), 3591 instr->environment()); 3592 } 3593 3594 3595 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 3596 LOperand* input = instr->InputAt(0); 3597 ASSERT(input->IsDoubleRegister()); 3598 LOperand* result = instr->result(); 3599 ASSERT(result->IsRegister()); 3600 3601 XMMRegister input_reg = ToDoubleRegister(input); 3602 Register result_reg = ToRegister(result); 3603 3604 if (instr->truncating()) { 3605 // Performs a truncating conversion of a floating point number as used by 3606 // the JS bitwise operations. 3607 __ cvttsd2si(result_reg, Operand(input_reg)); 3608 __ cmp(result_reg, 0x80000000u); 3609 if (CpuFeatures::IsSupported(SSE3)) { 3610 // This will deoptimize if the exponent of the input in out of range. 3611 CpuFeatures::Scope scope(SSE3); 3612 NearLabel convert, done; 3613 __ j(not_equal, &done); 3614 __ sub(Operand(esp), Immediate(kDoubleSize)); 3615 __ movdbl(Operand(esp, 0), input_reg); 3616 // Get exponent alone and check for too-big exponent. 3617 __ mov(result_reg, Operand(esp, sizeof(int32_t))); 3618 __ and_(result_reg, HeapNumber::kExponentMask); 3619 const uint32_t kTooBigExponent = 3620 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; 3621 __ cmp(Operand(result_reg), Immediate(kTooBigExponent)); 3622 __ j(less, &convert); 3623 __ add(Operand(esp), Immediate(kDoubleSize)); 3624 DeoptimizeIf(no_condition, instr->environment()); 3625 __ bind(&convert); 3626 // Do conversion, which cannot fail because we checked the exponent. 3627 __ fld_d(Operand(esp, 0)); 3628 __ fisttp_d(Operand(esp, 0)); 3629 __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result. 3630 __ add(Operand(esp), Immediate(kDoubleSize)); 3631 __ bind(&done); 3632 } else { 3633 NearLabel done; 3634 Register temp_reg = ToRegister(instr->TempAt(0)); 3635 XMMRegister xmm_scratch = xmm0; 3636 3637 // If cvttsd2si succeeded, we're done. Otherwise, we attempt 3638 // manual conversion. 3639 __ j(not_equal, &done); 3640 3641 // Get high 32 bits of the input in result_reg and temp_reg. 3642 __ pshufd(xmm_scratch, input_reg, 1); 3643 __ movd(Operand(temp_reg), xmm_scratch); 3644 __ mov(result_reg, temp_reg); 3645 3646 // Prepare negation mask in temp_reg. 3647 __ sar(temp_reg, kBitsPerInt - 1); 3648 3649 // Extract the exponent from result_reg and subtract adjusted 3650 // bias from it. The adjustment is selected in a way such that 3651 // when the difference is zero, the answer is in the low 32 bits 3652 // of the input, otherwise a shift has to be performed. 3653 __ shr(result_reg, HeapNumber::kExponentShift); 3654 __ and_(result_reg, 3655 HeapNumber::kExponentMask >> HeapNumber::kExponentShift); 3656 __ sub(Operand(result_reg), 3657 Immediate(HeapNumber::kExponentBias + 3658 HeapNumber::kExponentBits + 3659 HeapNumber::kMantissaBits)); 3660 // Don't handle big (> kMantissaBits + kExponentBits == 63) or 3661 // special exponents. 3662 DeoptimizeIf(greater, instr->environment()); 3663 3664 // Zero out the sign and the exponent in the input (by shifting 3665 // it to the left) and restore the implicit mantissa bit, 3666 // i.e. convert the input to unsigned int64 shifted left by 3667 // kExponentBits. 3668 ExternalReference minus_zero = ExternalReference::address_of_minus_zero(); 3669 // Minus zero has the most significant bit set and the other 3670 // bits cleared. 3671 __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero)); 3672 __ psllq(input_reg, HeapNumber::kExponentBits); 3673 __ por(input_reg, xmm_scratch); 3674 3675 // Get the amount to shift the input right in xmm_scratch. 3676 __ neg(result_reg); 3677 __ movd(xmm_scratch, Operand(result_reg)); 3678 3679 // Shift the input right and extract low 32 bits. 3680 __ psrlq(input_reg, xmm_scratch); 3681 __ movd(Operand(result_reg), input_reg); 3682 3683 // Use the prepared mask in temp_reg to negate the result if necessary. 3684 __ xor_(result_reg, Operand(temp_reg)); 3685 __ sub(result_reg, Operand(temp_reg)); 3686 __ bind(&done); 3687 } 3688 } else { 3689 NearLabel done; 3690 __ cvttsd2si(result_reg, Operand(input_reg)); 3691 __ cvtsi2sd(xmm0, Operand(result_reg)); 3692 __ ucomisd(xmm0, input_reg); 3693 DeoptimizeIf(not_equal, instr->environment()); 3694 DeoptimizeIf(parity_even, instr->environment()); // NaN. 3695 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3696 // The integer converted back is equal to the original. We 3697 // only have to test if we got -0 as an input. 3698 __ test(result_reg, Operand(result_reg)); 3699 __ j(not_zero, &done); 3700 __ movmskpd(result_reg, input_reg); 3701 // Bit 0 contains the sign of the double in input_reg. 3702 // If input was positive, we are ok and return 0, otherwise 3703 // deoptimize. 3704 __ and_(result_reg, 1); 3705 DeoptimizeIf(not_zero, instr->environment()); 3706 } 3707 __ bind(&done); 3708 } 3709 } 3710 3711 3712 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 3713 LOperand* input = instr->InputAt(0); 3714 __ test(ToRegister(input), Immediate(kSmiTagMask)); 3715 DeoptimizeIf(not_zero, instr->environment()); 3716 } 3717 3718 3719 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 3720 LOperand* input = instr->InputAt(0); 3721 __ test(ToRegister(input), Immediate(kSmiTagMask)); 3722 DeoptimizeIf(zero, instr->environment()); 3723 } 3724 3725 3726 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 3727 Register input = ToRegister(instr->InputAt(0)); 3728 Register temp = ToRegister(instr->TempAt(0)); 3729 InstanceType first = instr->hydrogen()->first(); 3730 InstanceType last = instr->hydrogen()->last(); 3731 3732 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); 3733 3734 // If there is only one type in the interval check for equality. 3735 if (first == last) { 3736 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), 3737 static_cast<int8_t>(first)); 3738 DeoptimizeIf(not_equal, instr->environment()); 3739 } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) { 3740 // String has a dedicated bit in instance type. 3741 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask); 3742 DeoptimizeIf(not_zero, instr->environment()); 3743 } else { 3744 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), 3745 static_cast<int8_t>(first)); 3746 DeoptimizeIf(below, instr->environment()); 3747 // Omit check for the last type. 3748 if (last != LAST_TYPE) { 3749 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), 3750 static_cast<int8_t>(last)); 3751 DeoptimizeIf(above, instr->environment()); 3752 } 3753 } 3754 } 3755 3756 3757 void LCodeGen::DoCheckFunction(LCheckFunction* instr) { 3758 ASSERT(instr->InputAt(0)->IsRegister()); 3759 Register reg = ToRegister(instr->InputAt(0)); 3760 __ cmp(reg, instr->hydrogen()->target()); 3761 DeoptimizeIf(not_equal, instr->environment()); 3762 } 3763 3764 3765 void LCodeGen::DoCheckMap(LCheckMap* instr) { 3766 LOperand* input = instr->InputAt(0); 3767 ASSERT(input->IsRegister()); 3768 Register reg = ToRegister(input); 3769 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 3770 instr->hydrogen()->map()); 3771 DeoptimizeIf(not_equal, instr->environment()); 3772 } 3773 3774 3775 void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) { 3776 if (isolate()->heap()->InNewSpace(*object)) { 3777 Handle<JSGlobalPropertyCell> cell = 3778 isolate()->factory()->NewJSGlobalPropertyCell(object); 3779 __ mov(result, Operand::Cell(cell)); 3780 } else { 3781 __ mov(result, object); 3782 } 3783 } 3784 3785 3786 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { 3787 Register reg = ToRegister(instr->TempAt(0)); 3788 3789 Handle<JSObject> holder = instr->holder(); 3790 Handle<JSObject> current_prototype = instr->prototype(); 3791 3792 // Load prototype object. 3793 LoadHeapObject(reg, current_prototype); 3794 3795 // Check prototype maps up to the holder. 3796 while (!current_prototype.is_identical_to(holder)) { 3797 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 3798 Handle<Map>(current_prototype->map())); 3799 DeoptimizeIf(not_equal, instr->environment()); 3800 current_prototype = 3801 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); 3802 // Load next prototype object. 3803 LoadHeapObject(reg, current_prototype); 3804 } 3805 3806 // Check the holder map. 3807 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), 3808 Handle<Map>(current_prototype->map())); 3809 DeoptimizeIf(not_equal, instr->environment()); 3810 } 3811 3812 3813 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { 3814 // Setup the parameters to the stub/runtime call. 3815 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 3816 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); 3817 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); 3818 __ push(Immediate(instr->hydrogen()->constant_elements())); 3819 3820 // Pick the right runtime function or stub to call. 3821 int length = instr->hydrogen()->length(); 3822 if (instr->hydrogen()->IsCopyOnWrite()) { 3823 ASSERT(instr->hydrogen()->depth() == 1); 3824 FastCloneShallowArrayStub::Mode mode = 3825 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; 3826 FastCloneShallowArrayStub stub(mode, length); 3827 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 3828 } else if (instr->hydrogen()->depth() > 1) { 3829 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT); 3830 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { 3831 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT); 3832 } else { 3833 FastCloneShallowArrayStub::Mode mode = 3834 FastCloneShallowArrayStub::CLONE_ELEMENTS; 3835 FastCloneShallowArrayStub stub(mode, length); 3836 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 3837 } 3838 } 3839 3840 3841 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { 3842 ASSERT(ToRegister(instr->context()).is(esi)); 3843 // Setup the parameters to the stub/runtime call. 3844 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 3845 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); 3846 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); 3847 __ push(Immediate(instr->hydrogen()->constant_properties())); 3848 int flags = instr->hydrogen()->fast_elements() 3849 ? ObjectLiteral::kFastElements 3850 : ObjectLiteral::kNoFlags; 3851 flags |= instr->hydrogen()->has_function() 3852 ? ObjectLiteral::kHasFunction 3853 : ObjectLiteral::kNoFlags; 3854 __ push(Immediate(Smi::FromInt(flags))); 3855 3856 // Pick the right runtime function to call. 3857 if (instr->hydrogen()->depth() > 1) { 3858 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED); 3859 } else { 3860 CallRuntime(Runtime::kCreateObjectLiteralShallow, 3861 4, 3862 instr, 3863 CONTEXT_ADJUSTED); 3864 } 3865 } 3866 3867 3868 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 3869 ASSERT(ToRegister(instr->InputAt(0)).is(eax)); 3870 __ push(eax); 3871 CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED); 3872 } 3873 3874 3875 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 3876 NearLabel materialized; 3877 // Registers will be used as follows: 3878 // edi = JS function. 3879 // ecx = literals array. 3880 // ebx = regexp literal. 3881 // eax = regexp literal clone. 3882 __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 3883 __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset)); 3884 int literal_offset = FixedArray::kHeaderSize + 3885 instr->hydrogen()->literal_index() * kPointerSize; 3886 __ mov(ebx, FieldOperand(ecx, literal_offset)); 3887 __ cmp(ebx, factory()->undefined_value()); 3888 __ j(not_equal, &materialized); 3889 3890 // Create regexp literal using runtime function 3891 // Result will be in eax. 3892 __ push(ecx); 3893 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); 3894 __ push(Immediate(instr->hydrogen()->pattern())); 3895 __ push(Immediate(instr->hydrogen()->flags())); 3896 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT); 3897 __ mov(ebx, eax); 3898 3899 __ bind(&materialized); 3900 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 3901 Label allocated, runtime_allocate; 3902 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); 3903 __ jmp(&allocated); 3904 3905 __ bind(&runtime_allocate); 3906 __ push(ebx); 3907 __ push(Immediate(Smi::FromInt(size))); 3908 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT); 3909 __ pop(ebx); 3910 3911 __ bind(&allocated); 3912 // Copy the content into the newly allocated memory. 3913 // (Unroll copy loop once for better throughput). 3914 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { 3915 __ mov(edx, FieldOperand(ebx, i)); 3916 __ mov(ecx, FieldOperand(ebx, i + kPointerSize)); 3917 __ mov(FieldOperand(eax, i), edx); 3918 __ mov(FieldOperand(eax, i + kPointerSize), ecx); 3919 } 3920 if ((size % (2 * kPointerSize)) != 0) { 3921 __ mov(edx, FieldOperand(ebx, size - kPointerSize)); 3922 __ mov(FieldOperand(eax, size - kPointerSize), edx); 3923 } 3924 } 3925 3926 3927 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 3928 // Use the fast case closure allocation code that allocates in new 3929 // space for nested functions that don't need literals cloning. 3930 Handle<SharedFunctionInfo> shared_info = instr->shared_info(); 3931 bool pretenure = instr->hydrogen()->pretenure(); 3932 if (!pretenure && shared_info->num_literals() == 0) { 3933 FastNewClosureStub stub( 3934 shared_info->strict_mode() ? kStrictMode : kNonStrictMode); 3935 __ push(Immediate(shared_info)); 3936 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 3937 } else { 3938 __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); 3939 __ push(Immediate(shared_info)); 3940 __ push(Immediate(pretenure 3941 ? factory()->true_value() 3942 : factory()->false_value())); 3943 CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT); 3944 } 3945 } 3946 3947 3948 void LCodeGen::DoTypeof(LTypeof* instr) { 3949 LOperand* input = instr->InputAt(0); 3950 if (input->IsConstantOperand()) { 3951 __ push(ToImmediate(input)); 3952 } else { 3953 __ push(ToOperand(input)); 3954 } 3955 CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT); 3956 } 3957 3958 3959 void LCodeGen::DoTypeofIs(LTypeofIs* instr) { 3960 Register input = ToRegister(instr->InputAt(0)); 3961 Register result = ToRegister(instr->result()); 3962 Label true_label; 3963 Label false_label; 3964 NearLabel done; 3965 3966 Condition final_branch_condition = EmitTypeofIs(&true_label, 3967 &false_label, 3968 input, 3969 instr->type_literal()); 3970 __ j(final_branch_condition, &true_label); 3971 __ bind(&false_label); 3972 __ mov(result, factory()->false_value()); 3973 __ jmp(&done); 3974 3975 __ bind(&true_label); 3976 __ mov(result, factory()->true_value()); 3977 3978 __ bind(&done); 3979 } 3980 3981 3982 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 3983 Register input = ToRegister(instr->InputAt(0)); 3984 int true_block = chunk_->LookupDestination(instr->true_block_id()); 3985 int false_block = chunk_->LookupDestination(instr->false_block_id()); 3986 Label* true_label = chunk_->GetAssemblyLabel(true_block); 3987 Label* false_label = chunk_->GetAssemblyLabel(false_block); 3988 3989 Condition final_branch_condition = EmitTypeofIs(true_label, 3990 false_label, 3991 input, 3992 instr->type_literal()); 3993 3994 EmitBranch(true_block, false_block, final_branch_condition); 3995 } 3996 3997 3998 Condition LCodeGen::EmitTypeofIs(Label* true_label, 3999 Label* false_label, 4000 Register input, 4001 Handle<String> type_name) { 4002 Condition final_branch_condition = no_condition; 4003 if (type_name->Equals(heap()->number_symbol())) { 4004 __ JumpIfSmi(input, true_label); 4005 __ cmp(FieldOperand(input, HeapObject::kMapOffset), 4006 factory()->heap_number_map()); 4007 final_branch_condition = equal; 4008 4009 } else if (type_name->Equals(heap()->string_symbol())) { 4010 __ JumpIfSmi(input, false_label); 4011 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); 4012 __ j(above_equal, false_label); 4013 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 4014 1 << Map::kIsUndetectable); 4015 final_branch_condition = zero; 4016 4017 } else if (type_name->Equals(heap()->boolean_symbol())) { 4018 __ cmp(input, factory()->true_value()); 4019 __ j(equal, true_label); 4020 __ cmp(input, factory()->false_value()); 4021 final_branch_condition = equal; 4022 4023 } else if (type_name->Equals(heap()->undefined_symbol())) { 4024 __ cmp(input, factory()->undefined_value()); 4025 __ j(equal, true_label); 4026 __ JumpIfSmi(input, false_label); 4027 // Check for undetectable objects => true. 4028 __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); 4029 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 4030 1 << Map::kIsUndetectable); 4031 final_branch_condition = not_zero; 4032 4033 } else if (type_name->Equals(heap()->function_symbol())) { 4034 __ JumpIfSmi(input, false_label); 4035 __ CmpObjectType(input, JS_FUNCTION_TYPE, input); 4036 __ j(equal, true_label); 4037 // Regular expressions => 'function' (they are callable). 4038 __ CmpInstanceType(input, JS_REGEXP_TYPE); 4039 final_branch_condition = equal; 4040 4041 } else if (type_name->Equals(heap()->object_symbol())) { 4042 __ JumpIfSmi(input, false_label); 4043 __ cmp(input, factory()->null_value()); 4044 __ j(equal, true_label); 4045 // Regular expressions => 'function', not 'object'. 4046 __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input); 4047 __ j(below, false_label); 4048 __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE); 4049 __ j(above_equal, false_label); 4050 // Check for undetectable objects => false. 4051 __ test_b(FieldOperand(input, Map::kBitFieldOffset), 4052 1 << Map::kIsUndetectable); 4053 final_branch_condition = zero; 4054 4055 } else { 4056 final_branch_condition = not_equal; 4057 __ jmp(false_label); 4058 // A dead branch instruction will be generated after this point. 4059 } 4060 4061 return final_branch_condition; 4062 } 4063 4064 4065 void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) { 4066 Register result = ToRegister(instr->result()); 4067 NearLabel true_label; 4068 NearLabel false_label; 4069 NearLabel done; 4070 4071 EmitIsConstructCall(result); 4072 __ j(equal, &true_label); 4073 4074 __ mov(result, factory()->false_value()); 4075 __ jmp(&done); 4076 4077 __ bind(&true_label); 4078 __ mov(result, factory()->true_value()); 4079 4080 __ bind(&done); 4081 } 4082 4083 4084 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { 4085 Register temp = ToRegister(instr->TempAt(0)); 4086 int true_block = chunk_->LookupDestination(instr->true_block_id()); 4087 int false_block = chunk_->LookupDestination(instr->false_block_id()); 4088 4089 EmitIsConstructCall(temp); 4090 EmitBranch(true_block, false_block, equal); 4091 } 4092 4093 4094 void LCodeGen::EmitIsConstructCall(Register temp) { 4095 // Get the frame pointer for the calling frame. 4096 __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 4097 4098 // Skip the arguments adaptor frame if it exists. 4099 NearLabel check_frame_marker; 4100 __ cmp(Operand(temp, StandardFrameConstants::kContextOffset), 4101 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 4102 __ j(not_equal, &check_frame_marker); 4103 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); 4104 4105 // Check the marker in the calling frame. 4106 __ bind(&check_frame_marker); 4107 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), 4108 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); 4109 } 4110 4111 4112 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 4113 // No code for lazy bailout instruction. Used to capture environment after a 4114 // call for populating the safepoint data with deoptimization data. 4115 } 4116 4117 4118 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 4119 DeoptimizeIf(no_condition, instr->environment()); 4120 } 4121 4122 4123 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { 4124 LOperand* obj = instr->object(); 4125 LOperand* key = instr->key(); 4126 __ push(ToOperand(obj)); 4127 if (key->IsConstantOperand()) { 4128 __ push(ToImmediate(key)); 4129 } else { 4130 __ push(ToOperand(key)); 4131 } 4132 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); 4133 LPointerMap* pointers = instr->pointer_map(); 4134 LEnvironment* env = instr->deoptimization_environment(); 4135 RecordPosition(pointers->position()); 4136 RegisterEnvironmentForDeoptimization(env); 4137 // Create safepoint generator that will also ensure enough space in the 4138 // reloc info for patching in deoptimization (since this is invoking a 4139 // builtin) 4140 SafepointGenerator safepoint_generator(this, 4141 pointers, 4142 env->deoptimization_index()); 4143 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 4144 __ push(Immediate(Smi::FromInt(strict_mode_flag()))); 4145 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator); 4146 } 4147 4148 4149 void LCodeGen::DoStackCheck(LStackCheck* instr) { 4150 // Perform stack overflow check. 4151 NearLabel done; 4152 ExternalReference stack_limit = 4153 ExternalReference::address_of_stack_limit(isolate()); 4154 __ cmp(esp, Operand::StaticVariable(stack_limit)); 4155 __ j(above_equal, &done); 4156 4157 StackCheckStub stub; 4158 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT); 4159 __ bind(&done); 4160 } 4161 4162 4163 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 4164 // This is a pseudo-instruction that ensures that the environment here is 4165 // properly registered for deoptimization and records the assembler's PC 4166 // offset. 4167 LEnvironment* environment = instr->environment(); 4168 environment->SetSpilledRegisters(instr->SpilledRegisterArray(), 4169 instr->SpilledDoubleRegisterArray()); 4170 4171 // If the environment were already registered, we would have no way of 4172 // backpatching it with the spill slot operands. 4173 ASSERT(!environment->HasBeenRegistered()); 4174 RegisterEnvironmentForDeoptimization(environment); 4175 ASSERT(osr_pc_offset_ == -1); 4176 osr_pc_offset_ = masm()->pc_offset(); 4177 } 4178 4179 4180 #undef __ 4181 4182 } } // namespace v8::internal 4183 4184 #endif // V8_TARGET_ARCH_IA32 4185