1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "v8.h" 29 30 #if V8_TARGET_ARCH_IA32 31 32 #include "bootstrapper.h" 33 #include "code-stubs.h" 34 #include "isolate.h" 35 #include "jsregexp.h" 36 #include "regexp-macro-assembler.h" 37 #include "runtime.h" 38 #include "stub-cache.h" 39 #include "codegen.h" 40 #include "runtime.h" 41 42 namespace v8 { 43 namespace internal { 44 45 46 void ToNumberStub::InitializeInterfaceDescriptor( 47 Isolate* isolate, 48 CodeStubInterfaceDescriptor* descriptor) { 49 static Register registers[] = { eax }; 50 descriptor->register_param_count_ = 1; 51 descriptor->register_params_ = registers; 52 descriptor->deoptimization_handler_ = NULL; 53 } 54 55 56 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( 57 Isolate* isolate, 58 CodeStubInterfaceDescriptor* descriptor) { 59 static Register registers[] = { eax, ebx, ecx }; 60 descriptor->register_param_count_ = 3; 61 descriptor->register_params_ = registers; 62 descriptor->deoptimization_handler_ = 63 Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; 64 } 65 66 67 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( 68 Isolate* isolate, 69 CodeStubInterfaceDescriptor* descriptor) { 70 static Register registers[] = { eax, ebx, ecx, edx }; 71 descriptor->register_param_count_ = 4; 72 descriptor->register_params_ = registers; 73 descriptor->deoptimization_handler_ = 74 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; 75 } 76 77 78 void CreateAllocationSiteStub::InitializeInterfaceDescriptor( 79 Isolate* isolate, 80 CodeStubInterfaceDescriptor* descriptor) { 81 static Register registers[] = { ebx }; 82 descriptor->register_param_count_ = 1; 83 descriptor->register_params_ = registers; 84 descriptor->deoptimization_handler_ = NULL; 85 } 86 87 88 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( 89 Isolate* isolate, 90 CodeStubInterfaceDescriptor* descriptor) { 91 static Register registers[] = { edx, ecx }; 92 descriptor->register_param_count_ = 2; 93 descriptor->register_params_ = registers; 94 descriptor->deoptimization_handler_ = 95 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); 96 } 97 98 99 void LoadFieldStub::InitializeInterfaceDescriptor( 100 Isolate* isolate, 101 CodeStubInterfaceDescriptor* descriptor) { 102 static Register registers[] = { edx }; 103 descriptor->register_param_count_ = 1; 104 descriptor->register_params_ = registers; 105 descriptor->deoptimization_handler_ = NULL; 106 } 107 108 109 void KeyedLoadFieldStub::InitializeInterfaceDescriptor( 110 Isolate* isolate, 111 CodeStubInterfaceDescriptor* descriptor) { 112 static Register registers[] = { edx }; 113 descriptor->register_param_count_ = 1; 114 descriptor->register_params_ = registers; 115 descriptor->deoptimization_handler_ = NULL; 116 } 117 118 119 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( 120 Isolate* isolate, 121 CodeStubInterfaceDescriptor* descriptor) { 122 static Register registers[] = { edx, ecx, eax }; 123 descriptor->register_param_count_ = 3; 124 descriptor->register_params_ = registers; 125 descriptor->deoptimization_handler_ = 126 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); 127 } 128 129 130 void TransitionElementsKindStub::InitializeInterfaceDescriptor( 131 Isolate* isolate, 132 CodeStubInterfaceDescriptor* descriptor) { 133 static Register registers[] = { eax, ebx }; 134 descriptor->register_param_count_ = 2; 135 descriptor->register_params_ = registers; 136 descriptor->deoptimization_handler_ = 137 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; 138 } 139 140 141 static void InitializeArrayConstructorDescriptor( 142 Isolate* isolate, 143 CodeStubInterfaceDescriptor* descriptor, 144 int constant_stack_parameter_count) { 145 // register state 146 // eax -- number of arguments 147 // edi -- function 148 // ebx -- type info cell with elements kind 149 static Register registers[] = { edi, ebx }; 150 descriptor->register_param_count_ = 2; 151 152 if (constant_stack_parameter_count != 0) { 153 // stack param count needs (constructor pointer, and single argument) 154 descriptor->stack_parameter_count_ = &eax; 155 } 156 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; 157 descriptor->register_params_ = registers; 158 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; 159 descriptor->deoptimization_handler_ = 160 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; 161 } 162 163 164 static void InitializeInternalArrayConstructorDescriptor( 165 Isolate* isolate, 166 CodeStubInterfaceDescriptor* descriptor, 167 int constant_stack_parameter_count) { 168 // register state 169 // eax -- number of arguments 170 // edi -- constructor function 171 static Register registers[] = { edi }; 172 descriptor->register_param_count_ = 1; 173 174 if (constant_stack_parameter_count != 0) { 175 // stack param count needs (constructor pointer, and single argument) 176 descriptor->stack_parameter_count_ = &eax; 177 } 178 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; 179 descriptor->register_params_ = registers; 180 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; 181 descriptor->deoptimization_handler_ = 182 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; 183 } 184 185 186 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( 187 Isolate* isolate, 188 CodeStubInterfaceDescriptor* descriptor) { 189 InitializeArrayConstructorDescriptor(isolate, descriptor, 0); 190 } 191 192 193 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( 194 Isolate* isolate, 195 CodeStubInterfaceDescriptor* descriptor) { 196 InitializeArrayConstructorDescriptor(isolate, descriptor, 1); 197 } 198 199 200 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( 201 Isolate* isolate, 202 CodeStubInterfaceDescriptor* descriptor) { 203 InitializeArrayConstructorDescriptor(isolate, descriptor, -1); 204 } 205 206 207 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( 208 Isolate* isolate, 209 CodeStubInterfaceDescriptor* descriptor) { 210 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); 211 } 212 213 214 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( 215 Isolate* isolate, 216 CodeStubInterfaceDescriptor* descriptor) { 217 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); 218 } 219 220 221 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( 222 Isolate* isolate, 223 CodeStubInterfaceDescriptor* descriptor) { 224 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); 225 } 226 227 228 void CompareNilICStub::InitializeInterfaceDescriptor( 229 Isolate* isolate, 230 CodeStubInterfaceDescriptor* descriptor) { 231 static Register registers[] = { eax }; 232 descriptor->register_param_count_ = 1; 233 descriptor->register_params_ = registers; 234 descriptor->deoptimization_handler_ = 235 FUNCTION_ADDR(CompareNilIC_Miss); 236 descriptor->SetMissHandler( 237 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); 238 } 239 240 void ToBooleanStub::InitializeInterfaceDescriptor( 241 Isolate* isolate, 242 CodeStubInterfaceDescriptor* descriptor) { 243 static Register registers[] = { eax }; 244 descriptor->register_param_count_ = 1; 245 descriptor->register_params_ = registers; 246 descriptor->deoptimization_handler_ = 247 FUNCTION_ADDR(ToBooleanIC_Miss); 248 descriptor->SetMissHandler( 249 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); 250 } 251 252 253 void StoreGlobalStub::InitializeInterfaceDescriptor( 254 Isolate* isolate, 255 CodeStubInterfaceDescriptor* descriptor) { 256 static Register registers[] = { edx, ecx, eax }; 257 descriptor->register_param_count_ = 3; 258 descriptor->register_params_ = registers; 259 descriptor->deoptimization_handler_ = 260 FUNCTION_ADDR(StoreIC_MissFromStubFailure); 261 } 262 263 264 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( 265 Isolate* isolate, 266 CodeStubInterfaceDescriptor* descriptor) { 267 static Register registers[] = { eax, ebx, ecx, edx }; 268 descriptor->register_param_count_ = 4; 269 descriptor->register_params_ = registers; 270 descriptor->deoptimization_handler_ = 271 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); 272 } 273 274 275 #define __ ACCESS_MASM(masm) 276 277 278 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { 279 // Update the static counter each time a new code stub is generated. 280 Isolate* isolate = masm->isolate(); 281 isolate->counters()->code_stubs()->Increment(); 282 283 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); 284 int param_count = descriptor->register_param_count_; 285 { 286 // Call the runtime system in a fresh internal frame. 287 FrameScope scope(masm, StackFrame::INTERNAL); 288 ASSERT(descriptor->register_param_count_ == 0 || 289 eax.is(descriptor->register_params_[param_count - 1])); 290 // Push arguments 291 for (int i = 0; i < param_count; ++i) { 292 __ push(descriptor->register_params_[i]); 293 } 294 ExternalReference miss = descriptor->miss_handler(); 295 __ CallExternalReference(miss, descriptor->register_param_count_); 296 } 297 298 __ ret(0); 299 } 300 301 302 void FastNewClosureStub::Generate(MacroAssembler* masm) { 303 // Create a new closure from the given function info in new 304 // space. Set the context to the current context in esi. 305 Counters* counters = masm->isolate()->counters(); 306 307 Label gc; 308 __ Allocate(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); 309 310 __ IncrementCounter(counters->fast_new_closure_total(), 1); 311 312 // Get the function info from the stack. 313 __ mov(edx, Operand(esp, 1 * kPointerSize)); 314 315 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); 316 317 // Compute the function map in the current native context and set that 318 // as the map of the allocated object. 319 __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 320 __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset)); 321 __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index))); 322 __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx); 323 324 // Initialize the rest of the function. We don't have to update the 325 // write barrier because the allocated object is in new space. 326 Factory* factory = masm->isolate()->factory(); 327 __ mov(ebx, Immediate(factory->empty_fixed_array())); 328 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); 329 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); 330 __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), 331 Immediate(factory->the_hole_value())); 332 __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); 333 __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); 334 __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); 335 336 // Initialize the code pointer in the function to be the one 337 // found in the shared function info object. 338 // But first check if there is an optimized version for our context. 339 Label check_optimized; 340 Label install_unoptimized; 341 if (FLAG_cache_optimized_code) { 342 __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset)); 343 __ test(ebx, ebx); 344 __ j(not_zero, &check_optimized, Label::kNear); 345 } 346 __ bind(&install_unoptimized); 347 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), 348 Immediate(factory->undefined_value())); 349 __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); 350 __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); 351 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx); 352 353 // Return and remove the on-stack parameter. 354 __ ret(1 * kPointerSize); 355 356 __ bind(&check_optimized); 357 358 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1); 359 360 // ecx holds native context, ebx points to fixed array of 3-element entries 361 // (native context, optimized code, literals). 362 // Map must never be empty, so check the first elements. 363 Label install_optimized; 364 // Speculatively move code object into edx. 365 __ mov(edx, FieldOperand(ebx, SharedFunctionInfo::kFirstCodeSlot)); 366 __ cmp(ecx, FieldOperand(ebx, SharedFunctionInfo::kFirstContextSlot)); 367 __ j(equal, &install_optimized); 368 369 // Iterate through the rest of map backwards. edx holds an index as a Smi. 370 Label loop; 371 Label restore; 372 __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset)); 373 __ bind(&loop); 374 // Do not double check first entry. 375 __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex))); 376 __ j(equal, &restore); 377 __ sub(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength))); 378 __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0)); 379 __ j(not_equal, &loop, Label::kNear); 380 // Hit: fetch the optimized code. 381 __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1)); 382 383 __ bind(&install_optimized); 384 __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1); 385 386 // TODO(fschneider): Idea: store proper code pointers in the optimized code 387 // map and either unmangle them on marking or do nothing as the whole map is 388 // discarded on major GC anyway. 389 __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); 390 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx); 391 392 // Now link a function into a list of optimized functions. 393 __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST)); 394 395 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx); 396 // No need for write barrier as JSFunction (eax) is in the new space. 397 398 __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax); 399 // Store JSFunction (eax) into edx before issuing write barrier as 400 // it clobbers all the registers passed. 401 __ mov(edx, eax); 402 __ RecordWriteContextSlot( 403 ecx, 404 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), 405 edx, 406 ebx, 407 kDontSaveFPRegs); 408 409 // Return and remove the on-stack parameter. 410 __ ret(1 * kPointerSize); 411 412 __ bind(&restore); 413 // Restore SharedFunctionInfo into edx. 414 __ mov(edx, Operand(esp, 1 * kPointerSize)); 415 __ jmp(&install_unoptimized); 416 417 // Create a new closure through the slower runtime call. 418 __ bind(&gc); 419 __ pop(ecx); // Temporarily remove return address. 420 __ pop(edx); 421 __ push(esi); 422 __ push(edx); 423 __ push(Immediate(factory->false_value())); 424 __ push(ecx); // Restore return address. 425 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 426 } 427 428 429 void FastNewContextStub::Generate(MacroAssembler* masm) { 430 // Try to allocate the context in new space. 431 Label gc; 432 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 433 __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize, 434 eax, ebx, ecx, &gc, TAG_OBJECT); 435 436 // Get the function from the stack. 437 __ mov(ecx, Operand(esp, 1 * kPointerSize)); 438 439 // Set up the object header. 440 Factory* factory = masm->isolate()->factory(); 441 __ mov(FieldOperand(eax, HeapObject::kMapOffset), 442 factory->function_context_map()); 443 __ mov(FieldOperand(eax, Context::kLengthOffset), 444 Immediate(Smi::FromInt(length))); 445 446 // Set up the fixed slots. 447 __ Set(ebx, Immediate(0)); // Set to NULL. 448 __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx); 449 __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi); 450 __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx); 451 452 // Copy the global object from the previous context. 453 __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 454 __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx); 455 456 // Initialize the rest of the slots to undefined. 457 __ mov(ebx, factory->undefined_value()); 458 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { 459 __ mov(Operand(eax, Context::SlotOffset(i)), ebx); 460 } 461 462 // Return and remove the on-stack parameter. 463 __ mov(esi, eax); 464 __ ret(1 * kPointerSize); 465 466 // Need to collect. Call into runtime system. 467 __ bind(&gc); 468 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); 469 } 470 471 472 void FastNewBlockContextStub::Generate(MacroAssembler* masm) { 473 // Stack layout on entry: 474 // 475 // [esp + (1 * kPointerSize)]: function 476 // [esp + (2 * kPointerSize)]: serialized scope info 477 478 // Try to allocate the context in new space. 479 Label gc; 480 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 481 __ Allocate(FixedArray::SizeFor(length), eax, ebx, ecx, &gc, TAG_OBJECT); 482 483 // Get the function or sentinel from the stack. 484 __ mov(ecx, Operand(esp, 1 * kPointerSize)); 485 486 // Get the serialized scope info from the stack. 487 __ mov(ebx, Operand(esp, 2 * kPointerSize)); 488 489 // Set up the object header. 490 Factory* factory = masm->isolate()->factory(); 491 __ mov(FieldOperand(eax, HeapObject::kMapOffset), 492 factory->block_context_map()); 493 __ mov(FieldOperand(eax, Context::kLengthOffset), 494 Immediate(Smi::FromInt(length))); 495 496 // If this block context is nested in the native context we get a smi 497 // sentinel instead of a function. The block context should get the 498 // canonical empty function of the native context as its closure which 499 // we still have to look up. 500 Label after_sentinel; 501 __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear); 502 if (FLAG_debug_code) { 503 __ cmp(ecx, 0); 504 __ Assert(equal, kExpected0AsASmiSentinel); 505 } 506 __ mov(ecx, GlobalObjectOperand()); 507 __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset)); 508 __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX)); 509 __ bind(&after_sentinel); 510 511 // Set up the fixed slots. 512 __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx); 513 __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi); 514 __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx); 515 516 // Copy the global object from the previous context. 517 __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX)); 518 __ mov(ContextOperand(eax, Context::GLOBAL_OBJECT_INDEX), ebx); 519 520 // Initialize the rest of the slots to the hole value. 521 if (slots_ == 1) { 522 __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS), 523 factory->the_hole_value()); 524 } else { 525 __ mov(ebx, factory->the_hole_value()); 526 for (int i = 0; i < slots_; i++) { 527 __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx); 528 } 529 } 530 531 // Return and remove the on-stack parameters. 532 __ mov(esi, eax); 533 __ ret(2 * kPointerSize); 534 535 // Need to collect. Call into runtime system. 536 __ bind(&gc); 537 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); 538 } 539 540 541 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 542 // We don't allow a GC during a store buffer overflow so there is no need to 543 // store the registers in any particular way, but we do have to store and 544 // restore them. 545 __ pushad(); 546 if (save_doubles_ == kSaveFPRegs) { 547 CpuFeatureScope scope(masm, SSE2); 548 __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); 549 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { 550 XMMRegister reg = XMMRegister::from_code(i); 551 __ movdbl(Operand(esp, i * kDoubleSize), reg); 552 } 553 } 554 const int argument_count = 1; 555 556 AllowExternalCallThatCantCauseGC scope(masm); 557 __ PrepareCallCFunction(argument_count, ecx); 558 __ mov(Operand(esp, 0 * kPointerSize), 559 Immediate(ExternalReference::isolate_address(masm->isolate()))); 560 __ CallCFunction( 561 ExternalReference::store_buffer_overflow_function(masm->isolate()), 562 argument_count); 563 if (save_doubles_ == kSaveFPRegs) { 564 CpuFeatureScope scope(masm, SSE2); 565 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { 566 XMMRegister reg = XMMRegister::from_code(i); 567 __ movdbl(reg, Operand(esp, i * kDoubleSize)); 568 } 569 __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); 570 } 571 __ popad(); 572 __ ret(0); 573 } 574 575 576 class FloatingPointHelper : public AllStatic { 577 public: 578 enum ArgLocation { 579 ARGS_ON_STACK, 580 ARGS_IN_REGISTERS 581 }; 582 583 // Code pattern for loading a floating point value. Input value must 584 // be either a smi or a heap number object (fp value). Requirements: 585 // operand in register number. Returns operand as floating point number 586 // on FPU stack. 587 static void LoadFloatOperand(MacroAssembler* masm, Register number); 588 589 // Code pattern for loading floating point values. Input values must 590 // be either smi or heap number objects (fp values). Requirements: 591 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. 592 // Returns operands as floating point numbers on FPU stack. 593 static void LoadFloatOperands(MacroAssembler* masm, 594 Register scratch, 595 ArgLocation arg_location = ARGS_ON_STACK); 596 597 // Similar to LoadFloatOperand but assumes that both operands are smis. 598 // Expects operands in edx, eax. 599 static void LoadFloatSmis(MacroAssembler* masm, Register scratch); 600 601 // Test if operands are smi or number objects (fp). Requirements: 602 // operand_1 in eax, operand_2 in edx; falls through on float 603 // operands, jumps to the non_float label otherwise. 604 static void CheckFloatOperands(MacroAssembler* masm, 605 Label* non_float, 606 Register scratch); 607 608 // Takes the operands in edx and eax and loads them as integers in eax 609 // and ecx. 610 static void LoadUnknownsAsIntegers(MacroAssembler* masm, 611 bool use_sse3, 612 BinaryOpIC::TypeInfo left_type, 613 BinaryOpIC::TypeInfo right_type, 614 Label* operand_conversion_failure); 615 616 // Assumes that operands are smis or heap numbers and loads them 617 // into xmm0 and xmm1. Operands are in edx and eax. 618 // Leaves operands unchanged. 619 static void LoadSSE2Operands(MacroAssembler* masm); 620 621 // Test if operands are numbers (smi or HeapNumber objects), and load 622 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if 623 // either operand is not a number. Operands are in edx and eax. 624 // Leaves operands unchanged. 625 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); 626 627 // Similar to LoadSSE2Operands but assumes that both operands are smis. 628 // Expects operands in edx, eax. 629 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); 630 631 // Checks that |operand| has an int32 value. If |int32_result| is different 632 // from |scratch|, it will contain that int32 value. 633 static void CheckSSE2OperandIsInt32(MacroAssembler* masm, 634 Label* non_int32, 635 XMMRegister operand, 636 Register int32_result, 637 Register scratch, 638 XMMRegister xmm_scratch); 639 }; 640 641 642 void DoubleToIStub::Generate(MacroAssembler* masm) { 643 Register input_reg = this->source(); 644 Register final_result_reg = this->destination(); 645 ASSERT(is_truncating()); 646 647 Label check_negative, process_64_bits, done, done_no_stash; 648 649 int double_offset = offset(); 650 651 // Account for return address and saved regs if input is esp. 652 if (input_reg.is(esp)) double_offset += 3 * kPointerSize; 653 654 MemOperand mantissa_operand(MemOperand(input_reg, double_offset)); 655 MemOperand exponent_operand(MemOperand(input_reg, 656 double_offset + kDoubleSize / 2)); 657 658 Register scratch1; 659 { 660 Register scratch_candidates[3] = { ebx, edx, edi }; 661 for (int i = 0; i < 3; i++) { 662 scratch1 = scratch_candidates[i]; 663 if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break; 664 } 665 } 666 // Since we must use ecx for shifts below, use some other register (eax) 667 // to calculate the result if ecx is the requested return register. 668 Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg; 669 // Save ecx if it isn't the return register and therefore volatile, or if it 670 // is the return register, then save the temp register we use in its stead for 671 // the result. 672 Register save_reg = final_result_reg.is(ecx) ? eax : ecx; 673 __ push(scratch1); 674 __ push(save_reg); 675 676 bool stash_exponent_copy = !input_reg.is(esp); 677 __ mov(scratch1, mantissa_operand); 678 if (CpuFeatures::IsSupported(SSE3)) { 679 CpuFeatureScope scope(masm, SSE3); 680 // Load x87 register with heap number. 681 __ fld_d(mantissa_operand); 682 } 683 __ mov(ecx, exponent_operand); 684 if (stash_exponent_copy) __ push(ecx); 685 686 __ and_(ecx, HeapNumber::kExponentMask); 687 __ shr(ecx, HeapNumber::kExponentShift); 688 __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias)); 689 __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits)); 690 __ j(below, &process_64_bits); 691 692 // Result is entirely in lower 32-bits of mantissa 693 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize; 694 if (CpuFeatures::IsSupported(SSE3)) { 695 __ fstp(0); 696 } 697 __ sub(ecx, Immediate(delta)); 698 __ xor_(result_reg, result_reg); 699 __ cmp(ecx, Immediate(31)); 700 __ j(above, &done); 701 __ shl_cl(scratch1); 702 __ jmp(&check_negative); 703 704 __ bind(&process_64_bits); 705 if (CpuFeatures::IsSupported(SSE3)) { 706 CpuFeatureScope scope(masm, SSE3); 707 if (stash_exponent_copy) { 708 // Already a copy of the exponent on the stack, overwrite it. 709 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); 710 __ sub(esp, Immediate(kDoubleSize / 2)); 711 } else { 712 // Reserve space for 64 bit answer. 713 __ sub(esp, Immediate(kDoubleSize)); // Nolint. 714 } 715 // Do conversion, which cannot fail because we checked the exponent. 716 __ fisttp_d(Operand(esp, 0)); 717 __ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result 718 __ add(esp, Immediate(kDoubleSize)); 719 __ jmp(&done_no_stash); 720 } else { 721 // Result must be extracted from shifted 32-bit mantissa 722 __ sub(ecx, Immediate(delta)); 723 __ neg(ecx); 724 if (stash_exponent_copy) { 725 __ mov(result_reg, MemOperand(esp, 0)); 726 } else { 727 __ mov(result_reg, exponent_operand); 728 } 729 __ and_(result_reg, 730 Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32))); 731 __ add(result_reg, 732 Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32))); 733 __ shrd(result_reg, scratch1); 734 __ shr_cl(result_reg); 735 __ test(ecx, Immediate(32)); 736 if (CpuFeatures::IsSupported(CMOV)) { 737 CpuFeatureScope use_cmov(masm, CMOV); 738 __ cmov(not_equal, scratch1, result_reg); 739 } else { 740 Label skip_mov; 741 __ j(equal, &skip_mov, Label::kNear); 742 __ mov(scratch1, result_reg); 743 __ bind(&skip_mov); 744 } 745 } 746 747 // If the double was negative, negate the integer result. 748 __ bind(&check_negative); 749 __ mov(result_reg, scratch1); 750 __ neg(result_reg); 751 if (stash_exponent_copy) { 752 __ cmp(MemOperand(esp, 0), Immediate(0)); 753 } else { 754 __ cmp(exponent_operand, Immediate(0)); 755 } 756 if (CpuFeatures::IsSupported(CMOV)) { 757 CpuFeatureScope use_cmov(masm, CMOV); 758 __ cmov(greater, result_reg, scratch1); 759 } else { 760 Label skip_mov; 761 __ j(less_equal, &skip_mov, Label::kNear); 762 __ mov(result_reg, scratch1); 763 __ bind(&skip_mov); 764 } 765 766 // Restore registers 767 __ bind(&done); 768 if (stash_exponent_copy) { 769 __ add(esp, Immediate(kDoubleSize / 2)); 770 } 771 __ bind(&done_no_stash); 772 if (!final_result_reg.is(result_reg)) { 773 ASSERT(final_result_reg.is(ecx)); 774 __ mov(final_result_reg, result_reg); 775 } 776 __ pop(save_reg); 777 __ pop(scratch1); 778 __ ret(0); 779 } 780 781 782 // Uses SSE2 to convert the heap number in |source| to an integer. Jumps to 783 // |conversion_failure| if the heap number did not contain an int32 value. 784 // Result is in ecx. Trashes ebx, xmm0, and xmm1. 785 static void ConvertHeapNumberToInt32(MacroAssembler* masm, 786 Register source, 787 Label* conversion_failure) { 788 __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset)); 789 FloatingPointHelper::CheckSSE2OperandIsInt32( 790 masm, conversion_failure, xmm0, ecx, ebx, xmm1); 791 } 792 793 794 void BinaryOpStub::Initialize() { 795 platform_specific_bit_ = CpuFeatures::IsSupported(SSE3); 796 } 797 798 799 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 800 __ pop(ecx); // Save return address. 801 __ push(edx); 802 __ push(eax); 803 // Left and right arguments are now on top. 804 __ push(Immediate(Smi::FromInt(MinorKey()))); 805 806 __ push(ecx); // Push return address. 807 808 // Patch the caller to an appropriate specialized stub and return the 809 // operation result to the caller of the stub. 810 __ TailCallExternalReference( 811 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), 812 masm->isolate()), 813 3, 814 1); 815 } 816 817 818 // Prepare for a type transition runtime call when the args are already on 819 // the stack, under the return address. 820 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) { 821 __ pop(ecx); // Save return address. 822 // Left and right arguments are already on top of the stack. 823 __ push(Immediate(Smi::FromInt(MinorKey()))); 824 825 __ push(ecx); // Push return address. 826 827 // Patch the caller to an appropriate specialized stub and return the 828 // operation result to the caller of the stub. 829 __ TailCallExternalReference( 830 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), 831 masm->isolate()), 832 3, 833 1); 834 } 835 836 837 static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) { 838 __ pop(ecx); 839 __ pop(eax); 840 __ pop(edx); 841 __ push(ecx); 842 } 843 844 845 static void BinaryOpStub_GenerateSmiCode( 846 MacroAssembler* masm, 847 Label* slow, 848 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, 849 Token::Value op) { 850 // 1. Move arguments into edx, eax except for DIV and MOD, which need the 851 // dividend in eax and edx free for the division. Use eax, ebx for those. 852 Comment load_comment(masm, "-- Load arguments"); 853 Register left = edx; 854 Register right = eax; 855 if (op == Token::DIV || op == Token::MOD) { 856 left = eax; 857 right = ebx; 858 __ mov(ebx, eax); 859 __ mov(eax, edx); 860 } 861 862 863 // 2. Prepare the smi check of both operands by oring them together. 864 Comment smi_check_comment(masm, "-- Smi check arguments"); 865 Label not_smis; 866 Register combined = ecx; 867 ASSERT(!left.is(combined) && !right.is(combined)); 868 switch (op) { 869 case Token::BIT_OR: 870 // Perform the operation into eax and smi check the result. Preserve 871 // eax in case the result is not a smi. 872 ASSERT(!left.is(ecx) && !right.is(ecx)); 873 __ mov(ecx, right); 874 __ or_(right, left); // Bitwise or is commutative. 875 combined = right; 876 break; 877 878 case Token::BIT_XOR: 879 case Token::BIT_AND: 880 case Token::ADD: 881 case Token::SUB: 882 case Token::MUL: 883 case Token::DIV: 884 case Token::MOD: 885 __ mov(combined, right); 886 __ or_(combined, left); 887 break; 888 889 case Token::SHL: 890 case Token::SAR: 891 case Token::SHR: 892 // Move the right operand into ecx for the shift operation, use eax 893 // for the smi check register. 894 ASSERT(!left.is(ecx) && !right.is(ecx)); 895 __ mov(ecx, right); 896 __ or_(right, left); 897 combined = right; 898 break; 899 900 default: 901 break; 902 } 903 904 // 3. Perform the smi check of the operands. 905 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case. 906 __ JumpIfNotSmi(combined, ¬_smis); 907 908 // 4. Operands are both smis, perform the operation leaving the result in 909 // eax and check the result if necessary. 910 Comment perform_smi(masm, "-- Perform smi operation"); 911 Label use_fp_on_smis; 912 switch (op) { 913 case Token::BIT_OR: 914 // Nothing to do. 915 break; 916 917 case Token::BIT_XOR: 918 ASSERT(right.is(eax)); 919 __ xor_(right, left); // Bitwise xor is commutative. 920 break; 921 922 case Token::BIT_AND: 923 ASSERT(right.is(eax)); 924 __ and_(right, left); // Bitwise and is commutative. 925 break; 926 927 case Token::SHL: 928 // Remove tags from operands (but keep sign). 929 __ SmiUntag(left); 930 __ SmiUntag(ecx); 931 // Perform the operation. 932 __ shl_cl(left); 933 // Check that the *signed* result fits in a smi. 934 __ cmp(left, 0xc0000000); 935 __ j(sign, &use_fp_on_smis); 936 // Tag the result and store it in register eax. 937 __ SmiTag(left); 938 __ mov(eax, left); 939 break; 940 941 case Token::SAR: 942 // Remove tags from operands (but keep sign). 943 __ SmiUntag(left); 944 __ SmiUntag(ecx); 945 // Perform the operation. 946 __ sar_cl(left); 947 // Tag the result and store it in register eax. 948 __ SmiTag(left); 949 __ mov(eax, left); 950 break; 951 952 case Token::SHR: 953 // Remove tags from operands (but keep sign). 954 __ SmiUntag(left); 955 __ SmiUntag(ecx); 956 // Perform the operation. 957 __ shr_cl(left); 958 // Check that the *unsigned* result fits in a smi. 959 // Neither of the two high-order bits can be set: 960 // - 0x80000000: high bit would be lost when smi tagging. 961 // - 0x40000000: this number would convert to negative when 962 // Smi tagging these two cases can only happen with shifts 963 // by 0 or 1 when handed a valid smi. 964 __ test(left, Immediate(0xc0000000)); 965 __ j(not_zero, &use_fp_on_smis); 966 // Tag the result and store it in register eax. 967 __ SmiTag(left); 968 __ mov(eax, left); 969 break; 970 971 case Token::ADD: 972 ASSERT(right.is(eax)); 973 __ add(right, left); // Addition is commutative. 974 __ j(overflow, &use_fp_on_smis); 975 break; 976 977 case Token::SUB: 978 __ sub(left, right); 979 __ j(overflow, &use_fp_on_smis); 980 __ mov(eax, left); 981 break; 982 983 case Token::MUL: 984 // If the smi tag is 0 we can just leave the tag on one operand. 985 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case. 986 // We can't revert the multiplication if the result is not a smi 987 // so save the right operand. 988 __ mov(ebx, right); 989 // Remove tag from one of the operands (but keep sign). 990 __ SmiUntag(right); 991 // Do multiplication. 992 __ imul(right, left); // Multiplication is commutative. 993 __ j(overflow, &use_fp_on_smis); 994 // Check for negative zero result. Use combined = left | right. 995 __ NegativeZeroTest(right, combined, &use_fp_on_smis); 996 break; 997 998 case Token::DIV: 999 // We can't revert the division if the result is not a smi so 1000 // save the left operand. 1001 __ mov(edi, left); 1002 // Check for 0 divisor. 1003 __ test(right, right); 1004 __ j(zero, &use_fp_on_smis); 1005 // Sign extend left into edx:eax. 1006 ASSERT(left.is(eax)); 1007 __ cdq(); 1008 // Divide edx:eax by right. 1009 __ idiv(right); 1010 // Check for the corner case of dividing the most negative smi by 1011 // -1. We cannot use the overflow flag, since it is not set by idiv 1012 // instruction. 1013 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 1014 __ cmp(eax, 0x40000000); 1015 __ j(equal, &use_fp_on_smis); 1016 // Check for negative zero result. Use combined = left | right. 1017 __ NegativeZeroTest(eax, combined, &use_fp_on_smis); 1018 // Check that the remainder is zero. 1019 __ test(edx, edx); 1020 __ j(not_zero, &use_fp_on_smis); 1021 // Tag the result and store it in register eax. 1022 __ SmiTag(eax); 1023 break; 1024 1025 case Token::MOD: 1026 // Check for 0 divisor. 1027 __ test(right, right); 1028 __ j(zero, ¬_smis); 1029 1030 // Sign extend left into edx:eax. 1031 ASSERT(left.is(eax)); 1032 __ cdq(); 1033 // Divide edx:eax by right. 1034 __ idiv(right); 1035 // Check for negative zero result. Use combined = left | right. 1036 __ NegativeZeroTest(edx, combined, slow); 1037 // Move remainder to register eax. 1038 __ mov(eax, edx); 1039 break; 1040 1041 default: 1042 UNREACHABLE(); 1043 } 1044 1045 // 5. Emit return of result in eax. Some operations have registers pushed. 1046 switch (op) { 1047 case Token::ADD: 1048 case Token::SUB: 1049 case Token::MUL: 1050 case Token::DIV: 1051 __ ret(0); 1052 break; 1053 case Token::MOD: 1054 case Token::BIT_OR: 1055 case Token::BIT_AND: 1056 case Token::BIT_XOR: 1057 case Token::SAR: 1058 case Token::SHL: 1059 case Token::SHR: 1060 __ ret(2 * kPointerSize); 1061 break; 1062 default: 1063 UNREACHABLE(); 1064 } 1065 1066 // 6. For some operations emit inline code to perform floating point 1067 // operations on known smis (e.g., if the result of the operation 1068 // overflowed the smi range). 1069 if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) { 1070 __ bind(&use_fp_on_smis); 1071 switch (op) { 1072 // Undo the effects of some operations, and some register moves. 1073 case Token::SHL: 1074 // The arguments are saved on the stack, and only used from there. 1075 break; 1076 case Token::ADD: 1077 // Revert right = right + left. 1078 __ sub(right, left); 1079 break; 1080 case Token::SUB: 1081 // Revert left = left - right. 1082 __ add(left, right); 1083 break; 1084 case Token::MUL: 1085 // Right was clobbered but a copy is in ebx. 1086 __ mov(right, ebx); 1087 break; 1088 case Token::DIV: 1089 // Left was clobbered but a copy is in edi. Right is in ebx for 1090 // division. They should be in eax, ebx for jump to not_smi. 1091 __ mov(eax, edi); 1092 break; 1093 default: 1094 // No other operators jump to use_fp_on_smis. 1095 break; 1096 } 1097 __ jmp(¬_smis); 1098 } else { 1099 ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS); 1100 switch (op) { 1101 case Token::SHL: 1102 case Token::SHR: { 1103 Comment perform_float(masm, "-- Perform float operation on smis"); 1104 __ bind(&use_fp_on_smis); 1105 // Result we want is in left == edx, so we can put the allocated heap 1106 // number in eax. 1107 __ AllocateHeapNumber(eax, ecx, ebx, slow); 1108 // Store the result in the HeapNumber and return. 1109 // It's OK to overwrite the arguments on the stack because we 1110 // are about to return. 1111 if (op == Token::SHR) { 1112 __ mov(Operand(esp, 1 * kPointerSize), left); 1113 __ mov(Operand(esp, 2 * kPointerSize), Immediate(0)); 1114 __ fild_d(Operand(esp, 1 * kPointerSize)); 1115 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1116 } else { 1117 ASSERT_EQ(Token::SHL, op); 1118 if (CpuFeatures::IsSupported(SSE2)) { 1119 CpuFeatureScope use_sse2(masm, SSE2); 1120 __ cvtsi2sd(xmm0, left); 1121 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1122 } else { 1123 __ mov(Operand(esp, 1 * kPointerSize), left); 1124 __ fild_s(Operand(esp, 1 * kPointerSize)); 1125 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1126 } 1127 } 1128 __ ret(2 * kPointerSize); 1129 break; 1130 } 1131 1132 case Token::ADD: 1133 case Token::SUB: 1134 case Token::MUL: 1135 case Token::DIV: { 1136 Comment perform_float(masm, "-- Perform float operation on smis"); 1137 __ bind(&use_fp_on_smis); 1138 // Restore arguments to edx, eax. 1139 switch (op) { 1140 case Token::ADD: 1141 // Revert right = right + left. 1142 __ sub(right, left); 1143 break; 1144 case Token::SUB: 1145 // Revert left = left - right. 1146 __ add(left, right); 1147 break; 1148 case Token::MUL: 1149 // Right was clobbered but a copy is in ebx. 1150 __ mov(right, ebx); 1151 break; 1152 case Token::DIV: 1153 // Left was clobbered but a copy is in edi. Right is in ebx for 1154 // division. 1155 __ mov(edx, edi); 1156 __ mov(eax, right); 1157 break; 1158 default: UNREACHABLE(); 1159 break; 1160 } 1161 __ AllocateHeapNumber(ecx, ebx, no_reg, slow); 1162 if (CpuFeatures::IsSupported(SSE2)) { 1163 CpuFeatureScope use_sse2(masm, SSE2); 1164 FloatingPointHelper::LoadSSE2Smis(masm, ebx); 1165 switch (op) { 1166 case Token::ADD: __ addsd(xmm0, xmm1); break; 1167 case Token::SUB: __ subsd(xmm0, xmm1); break; 1168 case Token::MUL: __ mulsd(xmm0, xmm1); break; 1169 case Token::DIV: __ divsd(xmm0, xmm1); break; 1170 default: UNREACHABLE(); 1171 } 1172 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); 1173 } else { // SSE2 not available, use FPU. 1174 FloatingPointHelper::LoadFloatSmis(masm, ebx); 1175 switch (op) { 1176 case Token::ADD: __ faddp(1); break; 1177 case Token::SUB: __ fsubp(1); break; 1178 case Token::MUL: __ fmulp(1); break; 1179 case Token::DIV: __ fdivp(1); break; 1180 default: UNREACHABLE(); 1181 } 1182 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset)); 1183 } 1184 __ mov(eax, ecx); 1185 __ ret(0); 1186 break; 1187 } 1188 1189 default: 1190 break; 1191 } 1192 } 1193 1194 // 7. Non-smi operands, fall out to the non-smi code with the operands in 1195 // edx and eax. 1196 Comment done_comment(masm, "-- Enter non-smi code"); 1197 __ bind(¬_smis); 1198 switch (op) { 1199 case Token::BIT_OR: 1200 case Token::SHL: 1201 case Token::SAR: 1202 case Token::SHR: 1203 // Right operand is saved in ecx and eax was destroyed by the smi 1204 // check. 1205 __ mov(eax, ecx); 1206 break; 1207 1208 case Token::DIV: 1209 case Token::MOD: 1210 // Operands are in eax, ebx at this point. 1211 __ mov(edx, eax); 1212 __ mov(eax, ebx); 1213 break; 1214 1215 default: 1216 break; 1217 } 1218 } 1219 1220 1221 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 1222 Label right_arg_changed, call_runtime; 1223 1224 switch (op_) { 1225 case Token::ADD: 1226 case Token::SUB: 1227 case Token::MUL: 1228 case Token::DIV: 1229 break; 1230 case Token::MOD: 1231 case Token::BIT_OR: 1232 case Token::BIT_AND: 1233 case Token::BIT_XOR: 1234 case Token::SAR: 1235 case Token::SHL: 1236 case Token::SHR: 1237 GenerateRegisterArgsPush(masm); 1238 break; 1239 default: 1240 UNREACHABLE(); 1241 } 1242 1243 if (op_ == Token::MOD && encoded_right_arg_.has_value) { 1244 // It is guaranteed that the value will fit into a Smi, because if it 1245 // didn't, we wouldn't be here, see BinaryOp_Patch. 1246 __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value()))); 1247 __ j(not_equal, &right_arg_changed); 1248 } 1249 1250 if (result_type_ == BinaryOpIC::UNINITIALIZED || 1251 result_type_ == BinaryOpIC::SMI) { 1252 BinaryOpStub_GenerateSmiCode( 1253 masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_); 1254 } else { 1255 BinaryOpStub_GenerateSmiCode( 1256 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); 1257 } 1258 1259 // Code falls through if the result is not returned as either a smi or heap 1260 // number. 1261 __ bind(&right_arg_changed); 1262 switch (op_) { 1263 case Token::ADD: 1264 case Token::SUB: 1265 case Token::MUL: 1266 case Token::DIV: 1267 GenerateTypeTransition(masm); 1268 break; 1269 case Token::MOD: 1270 case Token::BIT_OR: 1271 case Token::BIT_AND: 1272 case Token::BIT_XOR: 1273 case Token::SAR: 1274 case Token::SHL: 1275 case Token::SHR: 1276 GenerateTypeTransitionWithSavedArgs(masm); 1277 break; 1278 default: 1279 UNREACHABLE(); 1280 } 1281 1282 __ bind(&call_runtime); 1283 switch (op_) { 1284 case Token::ADD: 1285 case Token::SUB: 1286 case Token::MUL: 1287 case Token::DIV: 1288 break; 1289 case Token::MOD: 1290 case Token::BIT_OR: 1291 case Token::BIT_AND: 1292 case Token::BIT_XOR: 1293 case Token::SAR: 1294 case Token::SHL: 1295 case Token::SHR: 1296 BinaryOpStub_GenerateRegisterArgsPop(masm); 1297 break; 1298 default: 1299 UNREACHABLE(); 1300 } 1301 1302 { 1303 FrameScope scope(masm, StackFrame::INTERNAL); 1304 __ push(edx); 1305 __ push(eax); 1306 GenerateCallRuntime(masm); 1307 } 1308 __ ret(0); 1309 } 1310 1311 1312 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { 1313 Label call_runtime; 1314 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); 1315 ASSERT(op_ == Token::ADD); 1316 // If both arguments are strings, call the string add stub. 1317 // Otherwise, do a transition. 1318 1319 // Registers containing left and right operands respectively. 1320 Register left = edx; 1321 Register right = eax; 1322 1323 // Test if left operand is a string. 1324 __ JumpIfSmi(left, &call_runtime, Label::kNear); 1325 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx); 1326 __ j(above_equal, &call_runtime, Label::kNear); 1327 1328 // Test if right operand is a string. 1329 __ JumpIfSmi(right, &call_runtime, Label::kNear); 1330 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx); 1331 __ j(above_equal, &call_runtime, Label::kNear); 1332 1333 StringAddStub string_add_stub( 1334 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); 1335 GenerateRegisterArgsPush(masm); 1336 __ TailCallStub(&string_add_stub); 1337 1338 __ bind(&call_runtime); 1339 GenerateTypeTransition(masm); 1340 } 1341 1342 1343 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, 1344 Label* alloc_failure, 1345 OverwriteMode mode); 1346 1347 1348 // Input: 1349 // edx: left operand (tagged) 1350 // eax: right operand (tagged) 1351 // Output: 1352 // eax: result (tagged) 1353 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 1354 Label call_runtime; 1355 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); 1356 1357 // Floating point case. 1358 switch (op_) { 1359 case Token::ADD: 1360 case Token::SUB: 1361 case Token::MUL: 1362 case Token::DIV: 1363 case Token::MOD: { 1364 Label not_floats, not_int32, right_arg_changed; 1365 if (CpuFeatures::IsSupported(SSE2)) { 1366 CpuFeatureScope use_sse2(masm, SSE2); 1367 // It could be that only SMIs have been seen at either the left 1368 // or the right operand. For precise type feedback, patch the IC 1369 // again if this changes. 1370 // In theory, we would need the same check in the non-SSE2 case, 1371 // but since we don't support Crankshaft on such hardware we can 1372 // afford not to care about precise type feedback. 1373 if (left_type_ == BinaryOpIC::SMI) { 1374 __ JumpIfNotSmi(edx, ¬_int32); 1375 } 1376 if (right_type_ == BinaryOpIC::SMI) { 1377 __ JumpIfNotSmi(eax, ¬_int32); 1378 } 1379 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); 1380 FloatingPointHelper::CheckSSE2OperandIsInt32( 1381 masm, ¬_int32, xmm0, ebx, ecx, xmm2); 1382 FloatingPointHelper::CheckSSE2OperandIsInt32( 1383 masm, ¬_int32, xmm1, edi, ecx, xmm2); 1384 if (op_ == Token::MOD) { 1385 if (encoded_right_arg_.has_value) { 1386 __ cmp(edi, Immediate(fixed_right_arg_value())); 1387 __ j(not_equal, &right_arg_changed); 1388 } 1389 GenerateRegisterArgsPush(masm); 1390 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); 1391 } else { 1392 switch (op_) { 1393 case Token::ADD: __ addsd(xmm0, xmm1); break; 1394 case Token::SUB: __ subsd(xmm0, xmm1); break; 1395 case Token::MUL: __ mulsd(xmm0, xmm1); break; 1396 case Token::DIV: __ divsd(xmm0, xmm1); break; 1397 default: UNREACHABLE(); 1398 } 1399 // Check result type if it is currently Int32. 1400 if (result_type_ <= BinaryOpIC::INT32) { 1401 FloatingPointHelper::CheckSSE2OperandIsInt32( 1402 masm, ¬_int32, xmm0, ecx, ecx, xmm2); 1403 } 1404 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); 1405 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1406 __ ret(0); 1407 } 1408 } else { // SSE2 not available, use FPU. 1409 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); 1410 FloatingPointHelper::LoadFloatOperands( 1411 masm, 1412 ecx, 1413 FloatingPointHelper::ARGS_IN_REGISTERS); 1414 if (op_ == Token::MOD) { 1415 // The operands are now on the FPU stack, but we don't need them. 1416 __ fstp(0); 1417 __ fstp(0); 1418 GenerateRegisterArgsPush(masm); 1419 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); 1420 } else { 1421 switch (op_) { 1422 case Token::ADD: __ faddp(1); break; 1423 case Token::SUB: __ fsubp(1); break; 1424 case Token::MUL: __ fmulp(1); break; 1425 case Token::DIV: __ fdivp(1); break; 1426 default: UNREACHABLE(); 1427 } 1428 Label after_alloc_failure; 1429 BinaryOpStub_GenerateHeapResultAllocation( 1430 masm, &after_alloc_failure, mode_); 1431 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1432 __ ret(0); 1433 __ bind(&after_alloc_failure); 1434 __ fstp(0); // Pop FPU stack before calling runtime. 1435 __ jmp(&call_runtime); 1436 } 1437 } 1438 1439 __ bind(¬_floats); 1440 __ bind(¬_int32); 1441 __ bind(&right_arg_changed); 1442 GenerateTypeTransition(masm); 1443 break; 1444 } 1445 1446 case Token::BIT_OR: 1447 case Token::BIT_AND: 1448 case Token::BIT_XOR: 1449 case Token::SAR: 1450 case Token::SHL: 1451 case Token::SHR: { 1452 GenerateRegisterArgsPush(masm); 1453 Label not_floats; 1454 Label not_int32; 1455 Label non_smi_result; 1456 bool use_sse3 = platform_specific_bit_; 1457 FloatingPointHelper::LoadUnknownsAsIntegers( 1458 masm, use_sse3, left_type_, right_type_, ¬_floats); 1459 switch (op_) { 1460 case Token::BIT_OR: __ or_(eax, ecx); break; 1461 case Token::BIT_AND: __ and_(eax, ecx); break; 1462 case Token::BIT_XOR: __ xor_(eax, ecx); break; 1463 case Token::SAR: __ sar_cl(eax); break; 1464 case Token::SHL: __ shl_cl(eax); break; 1465 case Token::SHR: __ shr_cl(eax); break; 1466 default: UNREACHABLE(); 1467 } 1468 if (op_ == Token::SHR) { 1469 // Check if result is non-negative and fits in a smi. 1470 __ test(eax, Immediate(0xc0000000)); 1471 __ j(not_zero, &call_runtime); 1472 } else { 1473 // Check if result fits in a smi. 1474 __ cmp(eax, 0xc0000000); 1475 __ j(negative, &non_smi_result, Label::kNear); 1476 } 1477 // Tag smi result and return. 1478 __ SmiTag(eax); 1479 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1480 1481 // All ops except SHR return a signed int32 that we load in 1482 // a HeapNumber. 1483 if (op_ != Token::SHR) { 1484 __ bind(&non_smi_result); 1485 // Allocate a heap number if needed. 1486 __ mov(ebx, eax); // ebx: result 1487 Label skip_allocation; 1488 switch (mode_) { 1489 case OVERWRITE_LEFT: 1490 case OVERWRITE_RIGHT: 1491 // If the operand was an object, we skip the 1492 // allocation of a heap number. 1493 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? 1494 1 * kPointerSize : 2 * kPointerSize)); 1495 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear); 1496 // Fall through! 1497 case NO_OVERWRITE: 1498 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1499 __ bind(&skip_allocation); 1500 break; 1501 default: UNREACHABLE(); 1502 } 1503 // Store the result in the HeapNumber and return. 1504 if (CpuFeatures::IsSupported(SSE2)) { 1505 CpuFeatureScope use_sse2(masm, SSE2); 1506 __ cvtsi2sd(xmm0, ebx); 1507 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1508 } else { 1509 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1510 __ fild_s(Operand(esp, 1 * kPointerSize)); 1511 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1512 } 1513 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1514 } 1515 1516 __ bind(¬_floats); 1517 __ bind(¬_int32); 1518 GenerateTypeTransitionWithSavedArgs(masm); 1519 break; 1520 } 1521 default: UNREACHABLE(); break; 1522 } 1523 1524 // If an allocation fails, or SHR hits a hard case, use the runtime system to 1525 // get the correct result. 1526 __ bind(&call_runtime); 1527 1528 switch (op_) { 1529 case Token::ADD: 1530 case Token::SUB: 1531 case Token::MUL: 1532 case Token::DIV: 1533 break; 1534 case Token::MOD: 1535 return; // Handled above. 1536 case Token::BIT_OR: 1537 case Token::BIT_AND: 1538 case Token::BIT_XOR: 1539 case Token::SAR: 1540 case Token::SHL: 1541 case Token::SHR: 1542 BinaryOpStub_GenerateRegisterArgsPop(masm); 1543 break; 1544 default: 1545 UNREACHABLE(); 1546 } 1547 1548 { 1549 FrameScope scope(masm, StackFrame::INTERNAL); 1550 __ push(edx); 1551 __ push(eax); 1552 GenerateCallRuntime(masm); 1553 } 1554 __ ret(0); 1555 } 1556 1557 1558 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { 1559 if (op_ == Token::ADD) { 1560 // Handle string addition here, because it is the only operation 1561 // that does not do a ToNumber conversion on the operands. 1562 GenerateAddStrings(masm); 1563 } 1564 1565 Factory* factory = masm->isolate()->factory(); 1566 1567 // Convert odd ball arguments to numbers. 1568 Label check, done; 1569 __ cmp(edx, factory->undefined_value()); 1570 __ j(not_equal, &check, Label::kNear); 1571 if (Token::IsBitOp(op_)) { 1572 __ xor_(edx, edx); 1573 } else { 1574 __ mov(edx, Immediate(factory->nan_value())); 1575 } 1576 __ jmp(&done, Label::kNear); 1577 __ bind(&check); 1578 __ cmp(eax, factory->undefined_value()); 1579 __ j(not_equal, &done, Label::kNear); 1580 if (Token::IsBitOp(op_)) { 1581 __ xor_(eax, eax); 1582 } else { 1583 __ mov(eax, Immediate(factory->nan_value())); 1584 } 1585 __ bind(&done); 1586 1587 GenerateNumberStub(masm); 1588 } 1589 1590 1591 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { 1592 Label call_runtime; 1593 1594 // Floating point case. 1595 switch (op_) { 1596 case Token::ADD: 1597 case Token::SUB: 1598 case Token::MUL: 1599 case Token::DIV: { 1600 Label not_floats; 1601 if (CpuFeatures::IsSupported(SSE2)) { 1602 CpuFeatureScope use_sse2(masm, SSE2); 1603 1604 // It could be that only SMIs have been seen at either the left 1605 // or the right operand. For precise type feedback, patch the IC 1606 // again if this changes. 1607 // In theory, we would need the same check in the non-SSE2 case, 1608 // but since we don't support Crankshaft on such hardware we can 1609 // afford not to care about precise type feedback. 1610 if (left_type_ == BinaryOpIC::SMI) { 1611 __ JumpIfNotSmi(edx, ¬_floats); 1612 } 1613 if (right_type_ == BinaryOpIC::SMI) { 1614 __ JumpIfNotSmi(eax, ¬_floats); 1615 } 1616 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); 1617 if (left_type_ == BinaryOpIC::INT32) { 1618 FloatingPointHelper::CheckSSE2OperandIsInt32( 1619 masm, ¬_floats, xmm0, ecx, ecx, xmm2); 1620 } 1621 if (right_type_ == BinaryOpIC::INT32) { 1622 FloatingPointHelper::CheckSSE2OperandIsInt32( 1623 masm, ¬_floats, xmm1, ecx, ecx, xmm2); 1624 } 1625 1626 switch (op_) { 1627 case Token::ADD: __ addsd(xmm0, xmm1); break; 1628 case Token::SUB: __ subsd(xmm0, xmm1); break; 1629 case Token::MUL: __ mulsd(xmm0, xmm1); break; 1630 case Token::DIV: __ divsd(xmm0, xmm1); break; 1631 default: UNREACHABLE(); 1632 } 1633 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); 1634 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1635 __ ret(0); 1636 } else { // SSE2 not available, use FPU. 1637 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); 1638 FloatingPointHelper::LoadFloatOperands( 1639 masm, 1640 ecx, 1641 FloatingPointHelper::ARGS_IN_REGISTERS); 1642 switch (op_) { 1643 case Token::ADD: __ faddp(1); break; 1644 case Token::SUB: __ fsubp(1); break; 1645 case Token::MUL: __ fmulp(1); break; 1646 case Token::DIV: __ fdivp(1); break; 1647 default: UNREACHABLE(); 1648 } 1649 Label after_alloc_failure; 1650 BinaryOpStub_GenerateHeapResultAllocation( 1651 masm, &after_alloc_failure, mode_); 1652 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1653 __ ret(0); 1654 __ bind(&after_alloc_failure); 1655 __ fstp(0); // Pop FPU stack before calling runtime. 1656 __ jmp(&call_runtime); 1657 } 1658 1659 __ bind(¬_floats); 1660 GenerateTypeTransition(masm); 1661 break; 1662 } 1663 1664 case Token::MOD: { 1665 // For MOD we go directly to runtime in the non-smi case. 1666 break; 1667 } 1668 case Token::BIT_OR: 1669 case Token::BIT_AND: 1670 case Token::BIT_XOR: 1671 case Token::SAR: 1672 case Token::SHL: 1673 case Token::SHR: { 1674 GenerateRegisterArgsPush(masm); 1675 Label not_floats; 1676 Label non_smi_result; 1677 // We do not check the input arguments here, as any value is 1678 // unconditionally truncated to an int32 anyway. To get the 1679 // right optimized code, int32 type feedback is just right. 1680 bool use_sse3 = platform_specific_bit_; 1681 FloatingPointHelper::LoadUnknownsAsIntegers( 1682 masm, use_sse3, left_type_, right_type_, ¬_floats); 1683 switch (op_) { 1684 case Token::BIT_OR: __ or_(eax, ecx); break; 1685 case Token::BIT_AND: __ and_(eax, ecx); break; 1686 case Token::BIT_XOR: __ xor_(eax, ecx); break; 1687 case Token::SAR: __ sar_cl(eax); break; 1688 case Token::SHL: __ shl_cl(eax); break; 1689 case Token::SHR: __ shr_cl(eax); break; 1690 default: UNREACHABLE(); 1691 } 1692 if (op_ == Token::SHR) { 1693 // Check if result is non-negative and fits in a smi. 1694 __ test(eax, Immediate(0xc0000000)); 1695 __ j(not_zero, &call_runtime); 1696 } else { 1697 // Check if result fits in a smi. 1698 __ cmp(eax, 0xc0000000); 1699 __ j(negative, &non_smi_result, Label::kNear); 1700 } 1701 // Tag smi result and return. 1702 __ SmiTag(eax); 1703 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1704 1705 // All ops except SHR return a signed int32 that we load in 1706 // a HeapNumber. 1707 if (op_ != Token::SHR) { 1708 __ bind(&non_smi_result); 1709 // Allocate a heap number if needed. 1710 __ mov(ebx, eax); // ebx: result 1711 Label skip_allocation; 1712 switch (mode_) { 1713 case OVERWRITE_LEFT: 1714 case OVERWRITE_RIGHT: 1715 // If the operand was an object, we skip the 1716 // allocation of a heap number. 1717 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? 1718 1 * kPointerSize : 2 * kPointerSize)); 1719 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear); 1720 // Fall through! 1721 case NO_OVERWRITE: 1722 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1723 __ bind(&skip_allocation); 1724 break; 1725 default: UNREACHABLE(); 1726 } 1727 // Store the result in the HeapNumber and return. 1728 if (CpuFeatures::IsSupported(SSE2)) { 1729 CpuFeatureScope use_sse2(masm, SSE2); 1730 __ cvtsi2sd(xmm0, ebx); 1731 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1732 } else { 1733 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1734 __ fild_s(Operand(esp, 1 * kPointerSize)); 1735 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1736 } 1737 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1738 } 1739 1740 __ bind(¬_floats); 1741 GenerateTypeTransitionWithSavedArgs(masm); 1742 break; 1743 } 1744 default: UNREACHABLE(); break; 1745 } 1746 1747 // If an allocation fails, or SHR or MOD hit a hard case, 1748 // use the runtime system to get the correct result. 1749 __ bind(&call_runtime); 1750 1751 switch (op_) { 1752 case Token::ADD: 1753 case Token::SUB: 1754 case Token::MUL: 1755 case Token::DIV: 1756 case Token::MOD: 1757 break; 1758 case Token::BIT_OR: 1759 case Token::BIT_AND: 1760 case Token::BIT_XOR: 1761 case Token::SAR: 1762 case Token::SHL: 1763 case Token::SHR: 1764 BinaryOpStub_GenerateRegisterArgsPop(masm); 1765 break; 1766 default: 1767 UNREACHABLE(); 1768 } 1769 1770 { 1771 FrameScope scope(masm, StackFrame::INTERNAL); 1772 __ push(edx); 1773 __ push(eax); 1774 GenerateCallRuntime(masm); 1775 } 1776 __ ret(0); 1777 } 1778 1779 1780 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 1781 Label call_runtime; 1782 1783 Counters* counters = masm->isolate()->counters(); 1784 __ IncrementCounter(counters->generic_binary_stub_calls(), 1); 1785 1786 switch (op_) { 1787 case Token::ADD: 1788 case Token::SUB: 1789 case Token::MUL: 1790 case Token::DIV: 1791 break; 1792 case Token::MOD: 1793 case Token::BIT_OR: 1794 case Token::BIT_AND: 1795 case Token::BIT_XOR: 1796 case Token::SAR: 1797 case Token::SHL: 1798 case Token::SHR: 1799 GenerateRegisterArgsPush(masm); 1800 break; 1801 default: 1802 UNREACHABLE(); 1803 } 1804 1805 BinaryOpStub_GenerateSmiCode( 1806 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); 1807 1808 // Floating point case. 1809 switch (op_) { 1810 case Token::ADD: 1811 case Token::SUB: 1812 case Token::MUL: 1813 case Token::DIV: { 1814 Label not_floats; 1815 if (CpuFeatures::IsSupported(SSE2)) { 1816 CpuFeatureScope use_sse2(masm, SSE2); 1817 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); 1818 1819 switch (op_) { 1820 case Token::ADD: __ addsd(xmm0, xmm1); break; 1821 case Token::SUB: __ subsd(xmm0, xmm1); break; 1822 case Token::MUL: __ mulsd(xmm0, xmm1); break; 1823 case Token::DIV: __ divsd(xmm0, xmm1); break; 1824 default: UNREACHABLE(); 1825 } 1826 BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); 1827 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1828 __ ret(0); 1829 } else { // SSE2 not available, use FPU. 1830 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); 1831 FloatingPointHelper::LoadFloatOperands( 1832 masm, 1833 ecx, 1834 FloatingPointHelper::ARGS_IN_REGISTERS); 1835 switch (op_) { 1836 case Token::ADD: __ faddp(1); break; 1837 case Token::SUB: __ fsubp(1); break; 1838 case Token::MUL: __ fmulp(1); break; 1839 case Token::DIV: __ fdivp(1); break; 1840 default: UNREACHABLE(); 1841 } 1842 Label after_alloc_failure; 1843 BinaryOpStub_GenerateHeapResultAllocation( 1844 masm, &after_alloc_failure, mode_); 1845 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1846 __ ret(0); 1847 __ bind(&after_alloc_failure); 1848 __ fstp(0); // Pop FPU stack before calling runtime. 1849 __ jmp(&call_runtime); 1850 } 1851 __ bind(¬_floats); 1852 break; 1853 } 1854 case Token::MOD: { 1855 // For MOD we go directly to runtime in the non-smi case. 1856 break; 1857 } 1858 case Token::BIT_OR: 1859 case Token::BIT_AND: 1860 case Token::BIT_XOR: 1861 case Token::SAR: 1862 case Token::SHL: 1863 case Token::SHR: { 1864 Label non_smi_result; 1865 bool use_sse3 = platform_specific_bit_; 1866 FloatingPointHelper::LoadUnknownsAsIntegers(masm, 1867 use_sse3, 1868 BinaryOpIC::GENERIC, 1869 BinaryOpIC::GENERIC, 1870 &call_runtime); 1871 switch (op_) { 1872 case Token::BIT_OR: __ or_(eax, ecx); break; 1873 case Token::BIT_AND: __ and_(eax, ecx); break; 1874 case Token::BIT_XOR: __ xor_(eax, ecx); break; 1875 case Token::SAR: __ sar_cl(eax); break; 1876 case Token::SHL: __ shl_cl(eax); break; 1877 case Token::SHR: __ shr_cl(eax); break; 1878 default: UNREACHABLE(); 1879 } 1880 if (op_ == Token::SHR) { 1881 // Check if result is non-negative and fits in a smi. 1882 __ test(eax, Immediate(0xc0000000)); 1883 __ j(not_zero, &call_runtime); 1884 } else { 1885 // Check if result fits in a smi. 1886 __ cmp(eax, 0xc0000000); 1887 __ j(negative, &non_smi_result, Label::kNear); 1888 } 1889 // Tag smi result and return. 1890 __ SmiTag(eax); 1891 __ ret(2 * kPointerSize); // Drop the arguments from the stack. 1892 1893 // All ops except SHR return a signed int32 that we load in 1894 // a HeapNumber. 1895 if (op_ != Token::SHR) { 1896 __ bind(&non_smi_result); 1897 // Allocate a heap number if needed. 1898 __ mov(ebx, eax); // ebx: result 1899 Label skip_allocation; 1900 switch (mode_) { 1901 case OVERWRITE_LEFT: 1902 case OVERWRITE_RIGHT: 1903 // If the operand was an object, we skip the 1904 // allocation of a heap number. 1905 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? 1906 1 * kPointerSize : 2 * kPointerSize)); 1907 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear); 1908 // Fall through! 1909 case NO_OVERWRITE: 1910 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1911 __ bind(&skip_allocation); 1912 break; 1913 default: UNREACHABLE(); 1914 } 1915 // Store the result in the HeapNumber and return. 1916 if (CpuFeatures::IsSupported(SSE2)) { 1917 CpuFeatureScope use_sse2(masm, SSE2); 1918 __ cvtsi2sd(xmm0, ebx); 1919 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1920 } else { 1921 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1922 __ fild_s(Operand(esp, 1 * kPointerSize)); 1923 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1924 } 1925 __ ret(2 * kPointerSize); 1926 } 1927 break; 1928 } 1929 default: UNREACHABLE(); break; 1930 } 1931 1932 // If all else fails, use the runtime system to get the correct 1933 // result. 1934 __ bind(&call_runtime); 1935 switch (op_) { 1936 case Token::ADD: 1937 GenerateAddStrings(masm); 1938 // Fall through. 1939 case Token::SUB: 1940 case Token::MUL: 1941 case Token::DIV: 1942 break; 1943 case Token::MOD: 1944 case Token::BIT_OR: 1945 case Token::BIT_AND: 1946 case Token::BIT_XOR: 1947 case Token::SAR: 1948 case Token::SHL: 1949 case Token::SHR: 1950 BinaryOpStub_GenerateRegisterArgsPop(masm); 1951 break; 1952 default: 1953 UNREACHABLE(); 1954 } 1955 1956 { 1957 FrameScope scope(masm, StackFrame::INTERNAL); 1958 __ push(edx); 1959 __ push(eax); 1960 GenerateCallRuntime(masm); 1961 } 1962 __ ret(0); 1963 } 1964 1965 1966 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { 1967 ASSERT(op_ == Token::ADD); 1968 Label left_not_string, call_runtime; 1969 1970 // Registers containing left and right operands respectively. 1971 Register left = edx; 1972 Register right = eax; 1973 1974 // Test if left operand is a string. 1975 __ JumpIfSmi(left, &left_not_string, Label::kNear); 1976 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx); 1977 __ j(above_equal, &left_not_string, Label::kNear); 1978 1979 StringAddStub string_add_left_stub( 1980 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); 1981 GenerateRegisterArgsPush(masm); 1982 __ TailCallStub(&string_add_left_stub); 1983 1984 // Left operand is not a string, test right. 1985 __ bind(&left_not_string); 1986 __ JumpIfSmi(right, &call_runtime, Label::kNear); 1987 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx); 1988 __ j(above_equal, &call_runtime, Label::kNear); 1989 1990 StringAddStub string_add_right_stub( 1991 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); 1992 GenerateRegisterArgsPush(masm); 1993 __ TailCallStub(&string_add_right_stub); 1994 1995 // Neither argument is a string. 1996 __ bind(&call_runtime); 1997 } 1998 1999 2000 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, 2001 Label* alloc_failure, 2002 OverwriteMode mode) { 2003 Label skip_allocation; 2004 switch (mode) { 2005 case OVERWRITE_LEFT: { 2006 // If the argument in edx is already an object, we skip the 2007 // allocation of a heap number. 2008 __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear); 2009 // Allocate a heap number for the result. Keep eax and edx intact 2010 // for the possible runtime call. 2011 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); 2012 // Now edx can be overwritten losing one of the arguments as we are 2013 // now done and will not need it any more. 2014 __ mov(edx, ebx); 2015 __ bind(&skip_allocation); 2016 // Use object in edx as a result holder 2017 __ mov(eax, edx); 2018 break; 2019 } 2020 case OVERWRITE_RIGHT: 2021 // If the argument in eax is already an object, we skip the 2022 // allocation of a heap number. 2023 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear); 2024 // Fall through! 2025 case NO_OVERWRITE: 2026 // Allocate a heap number for the result. Keep eax and edx intact 2027 // for the possible runtime call. 2028 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); 2029 // Now eax can be overwritten losing one of the arguments as we are 2030 // now done and will not need it any more. 2031 __ mov(eax, ebx); 2032 __ bind(&skip_allocation); 2033 break; 2034 default: UNREACHABLE(); 2035 } 2036 } 2037 2038 2039 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 2040 __ pop(ecx); 2041 __ push(edx); 2042 __ push(eax); 2043 __ push(ecx); 2044 } 2045 2046 2047 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 2048 // TAGGED case: 2049 // Input: 2050 // esp[4]: tagged number input argument (should be number). 2051 // esp[0]: return address. 2052 // Output: 2053 // eax: tagged double result. 2054 // UNTAGGED case: 2055 // Input:: 2056 // esp[0]: return address. 2057 // xmm1: untagged double input argument 2058 // Output: 2059 // xmm1: untagged double result. 2060 2061 Label runtime_call; 2062 Label runtime_call_clear_stack; 2063 Label skip_cache; 2064 const bool tagged = (argument_type_ == TAGGED); 2065 if (tagged) { 2066 // Test that eax is a number. 2067 Label input_not_smi; 2068 Label loaded; 2069 __ mov(eax, Operand(esp, kPointerSize)); 2070 __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear); 2071 // Input is a smi. Untag and load it onto the FPU stack. 2072 // Then load the low and high words of the double into ebx, edx. 2073 STATIC_ASSERT(kSmiTagSize == 1); 2074 __ sar(eax, 1); 2075 __ sub(esp, Immediate(2 * kPointerSize)); 2076 __ mov(Operand(esp, 0), eax); 2077 __ fild_s(Operand(esp, 0)); 2078 __ fst_d(Operand(esp, 0)); 2079 __ pop(edx); 2080 __ pop(ebx); 2081 __ jmp(&loaded, Label::kNear); 2082 __ bind(&input_not_smi); 2083 // Check if input is a HeapNumber. 2084 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 2085 Factory* factory = masm->isolate()->factory(); 2086 __ cmp(ebx, Immediate(factory->heap_number_map())); 2087 __ j(not_equal, &runtime_call); 2088 // Input is a HeapNumber. Push it on the FPU stack and load its 2089 // low and high words into ebx, edx. 2090 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); 2091 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); 2092 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); 2093 2094 __ bind(&loaded); 2095 } else { // UNTAGGED. 2096 CpuFeatureScope scope(masm, SSE2); 2097 if (CpuFeatures::IsSupported(SSE4_1)) { 2098 CpuFeatureScope sse4_scope(masm, SSE4_1); 2099 __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. 2100 } else { 2101 __ pshufd(xmm0, xmm1, 0x1); 2102 __ movd(edx, xmm0); 2103 } 2104 __ movd(ebx, xmm1); 2105 } 2106 2107 // ST[0] or xmm1 == double value 2108 // ebx = low 32 bits of double value 2109 // edx = high 32 bits of double value 2110 // Compute hash (the shifts are arithmetic): 2111 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); 2112 __ mov(ecx, ebx); 2113 __ xor_(ecx, edx); 2114 __ mov(eax, ecx); 2115 __ sar(eax, 16); 2116 __ xor_(ecx, eax); 2117 __ mov(eax, ecx); 2118 __ sar(eax, 8); 2119 __ xor_(ecx, eax); 2120 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); 2121 __ and_(ecx, 2122 Immediate(TranscendentalCache::SubCache::kCacheSize - 1)); 2123 2124 // ST[0] or xmm1 == double value. 2125 // ebx = low 32 bits of double value. 2126 // edx = high 32 bits of double value. 2127 // ecx = TranscendentalCache::hash(double value). 2128 ExternalReference cache_array = 2129 ExternalReference::transcendental_cache_array_address(masm->isolate()); 2130 __ mov(eax, Immediate(cache_array)); 2131 int cache_array_index = 2132 type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]); 2133 __ mov(eax, Operand(eax, cache_array_index)); 2134 // Eax points to the cache for the type type_. 2135 // If NULL, the cache hasn't been initialized yet, so go through runtime. 2136 __ test(eax, eax); 2137 __ j(zero, &runtime_call_clear_stack); 2138 #ifdef DEBUG 2139 // Check that the layout of cache elements match expectations. 2140 { TranscendentalCache::SubCache::Element test_elem[2]; 2141 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); 2142 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); 2143 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); 2144 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); 2145 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); 2146 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. 2147 CHECK_EQ(0, elem_in0 - elem_start); 2148 CHECK_EQ(kIntSize, elem_in1 - elem_start); 2149 CHECK_EQ(2 * kIntSize, elem_out - elem_start); 2150 } 2151 #endif 2152 // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12]. 2153 __ lea(ecx, Operand(ecx, ecx, times_2, 0)); 2154 __ lea(ecx, Operand(eax, ecx, times_4, 0)); 2155 // Check if cache matches: Double value is stored in uint32_t[2] array. 2156 Label cache_miss; 2157 __ cmp(ebx, Operand(ecx, 0)); 2158 __ j(not_equal, &cache_miss, Label::kNear); 2159 __ cmp(edx, Operand(ecx, kIntSize)); 2160 __ j(not_equal, &cache_miss, Label::kNear); 2161 // Cache hit! 2162 Counters* counters = masm->isolate()->counters(); 2163 __ IncrementCounter(counters->transcendental_cache_hit(), 1); 2164 __ mov(eax, Operand(ecx, 2 * kIntSize)); 2165 if (tagged) { 2166 __ fstp(0); 2167 __ ret(kPointerSize); 2168 } else { // UNTAGGED. 2169 CpuFeatureScope scope(masm, SSE2); 2170 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2171 __ Ret(); 2172 } 2173 2174 __ bind(&cache_miss); 2175 __ IncrementCounter(counters->transcendental_cache_miss(), 1); 2176 // Update cache with new value. 2177 // We are short on registers, so use no_reg as scratch. 2178 // This gives slightly larger code. 2179 if (tagged) { 2180 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); 2181 } else { // UNTAGGED. 2182 CpuFeatureScope scope(masm, SSE2); 2183 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); 2184 __ sub(esp, Immediate(kDoubleSize)); 2185 __ movdbl(Operand(esp, 0), xmm1); 2186 __ fld_d(Operand(esp, 0)); 2187 __ add(esp, Immediate(kDoubleSize)); 2188 } 2189 GenerateOperation(masm, type_); 2190 __ mov(Operand(ecx, 0), ebx); 2191 __ mov(Operand(ecx, kIntSize), edx); 2192 __ mov(Operand(ecx, 2 * kIntSize), eax); 2193 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 2194 if (tagged) { 2195 __ ret(kPointerSize); 2196 } else { // UNTAGGED. 2197 CpuFeatureScope scope(masm, SSE2); 2198 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2199 __ Ret(); 2200 2201 // Skip cache and return answer directly, only in untagged case. 2202 __ bind(&skip_cache); 2203 __ sub(esp, Immediate(kDoubleSize)); 2204 __ movdbl(Operand(esp, 0), xmm1); 2205 __ fld_d(Operand(esp, 0)); 2206 GenerateOperation(masm, type_); 2207 __ fstp_d(Operand(esp, 0)); 2208 __ movdbl(xmm1, Operand(esp, 0)); 2209 __ add(esp, Immediate(kDoubleSize)); 2210 // We return the value in xmm1 without adding it to the cache, but 2211 // we cause a scavenging GC so that future allocations will succeed. 2212 { 2213 FrameScope scope(masm, StackFrame::INTERNAL); 2214 // Allocate an unused object bigger than a HeapNumber. 2215 __ push(Immediate(Smi::FromInt(2 * kDoubleSize))); 2216 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 2217 } 2218 __ Ret(); 2219 } 2220 2221 // Call runtime, doing whatever allocation and cleanup is necessary. 2222 if (tagged) { 2223 __ bind(&runtime_call_clear_stack); 2224 __ fstp(0); 2225 __ bind(&runtime_call); 2226 ExternalReference runtime = 2227 ExternalReference(RuntimeFunction(), masm->isolate()); 2228 __ TailCallExternalReference(runtime, 1, 1); 2229 } else { // UNTAGGED. 2230 CpuFeatureScope scope(masm, SSE2); 2231 __ bind(&runtime_call_clear_stack); 2232 __ bind(&runtime_call); 2233 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); 2234 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); 2235 { 2236 FrameScope scope(masm, StackFrame::INTERNAL); 2237 __ push(eax); 2238 __ CallRuntime(RuntimeFunction(), 1); 2239 } 2240 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2241 __ Ret(); 2242 } 2243 } 2244 2245 2246 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 2247 switch (type_) { 2248 case TranscendentalCache::SIN: return Runtime::kMath_sin; 2249 case TranscendentalCache::COS: return Runtime::kMath_cos; 2250 case TranscendentalCache::TAN: return Runtime::kMath_tan; 2251 case TranscendentalCache::LOG: return Runtime::kMath_log; 2252 default: 2253 UNIMPLEMENTED(); 2254 return Runtime::kAbort; 2255 } 2256 } 2257 2258 2259 void TranscendentalCacheStub::GenerateOperation( 2260 MacroAssembler* masm, TranscendentalCache::Type type) { 2261 // Only free register is edi. 2262 // Input value is on FP stack, and also in ebx/edx. 2263 // Input value is possibly in xmm1. 2264 // Address of result (a newly allocated HeapNumber) may be in eax. 2265 if (type == TranscendentalCache::SIN || 2266 type == TranscendentalCache::COS || 2267 type == TranscendentalCache::TAN) { 2268 // Both fsin and fcos require arguments in the range +/-2^63 and 2269 // return NaN for infinities and NaN. They can share all code except 2270 // the actual fsin/fcos operation. 2271 Label in_range, done; 2272 // If argument is outside the range -2^63..2^63, fsin/cos doesn't 2273 // work. We must reduce it to the appropriate range. 2274 __ mov(edi, edx); 2275 __ and_(edi, Immediate(0x7ff00000)); // Exponent only. 2276 int supported_exponent_limit = 2277 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; 2278 __ cmp(edi, Immediate(supported_exponent_limit)); 2279 __ j(below, &in_range, Label::kNear); 2280 // Check for infinity and NaN. Both return NaN for sin. 2281 __ cmp(edi, Immediate(0x7ff00000)); 2282 Label non_nan_result; 2283 __ j(not_equal, &non_nan_result, Label::kNear); 2284 // Input is +/-Infinity or NaN. Result is NaN. 2285 __ fstp(0); 2286 // NaN is represented by 0x7ff8000000000000. 2287 __ push(Immediate(0x7ff80000)); 2288 __ push(Immediate(0)); 2289 __ fld_d(Operand(esp, 0)); 2290 __ add(esp, Immediate(2 * kPointerSize)); 2291 __ jmp(&done, Label::kNear); 2292 2293 __ bind(&non_nan_result); 2294 2295 // Use fpmod to restrict argument to the range +/-2*PI. 2296 __ mov(edi, eax); // Save eax before using fnstsw_ax. 2297 __ fldpi(); 2298 __ fadd(0); 2299 __ fld(1); 2300 // FPU Stack: input, 2*pi, input. 2301 { 2302 Label no_exceptions; 2303 __ fwait(); 2304 __ fnstsw_ax(); 2305 // Clear if Illegal Operand or Zero Division exceptions are set. 2306 __ test(eax, Immediate(5)); 2307 __ j(zero, &no_exceptions, Label::kNear); 2308 __ fnclex(); 2309 __ bind(&no_exceptions); 2310 } 2311 2312 // Compute st(0) % st(1) 2313 { 2314 Label partial_remainder_loop; 2315 __ bind(&partial_remainder_loop); 2316 __ fprem1(); 2317 __ fwait(); 2318 __ fnstsw_ax(); 2319 __ test(eax, Immediate(0x400 /* C2 */)); 2320 // If C2 is set, computation only has partial result. Loop to 2321 // continue computation. 2322 __ j(not_zero, &partial_remainder_loop); 2323 } 2324 // FPU Stack: input, 2*pi, input % 2*pi 2325 __ fstp(2); 2326 __ fstp(0); 2327 __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer). 2328 2329 // FPU Stack: input % 2*pi 2330 __ bind(&in_range); 2331 switch (type) { 2332 case TranscendentalCache::SIN: 2333 __ fsin(); 2334 break; 2335 case TranscendentalCache::COS: 2336 __ fcos(); 2337 break; 2338 case TranscendentalCache::TAN: 2339 // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the 2340 // FP register stack. 2341 __ fptan(); 2342 __ fstp(0); // Pop FP register stack. 2343 break; 2344 default: 2345 UNREACHABLE(); 2346 } 2347 __ bind(&done); 2348 } else { 2349 ASSERT(type == TranscendentalCache::LOG); 2350 __ fldln2(); 2351 __ fxch(); 2352 __ fyl2x(); 2353 } 2354 } 2355 2356 2357 // Input: edx, eax are the left and right objects of a bit op. 2358 // Output: eax, ecx are left and right integers for a bit op. 2359 // Warning: can clobber inputs even when it jumps to |conversion_failure|! 2360 void FloatingPointHelper::LoadUnknownsAsIntegers( 2361 MacroAssembler* masm, 2362 bool use_sse3, 2363 BinaryOpIC::TypeInfo left_type, 2364 BinaryOpIC::TypeInfo right_type, 2365 Label* conversion_failure) { 2366 // Check float operands. 2367 Label arg1_is_object, check_undefined_arg1; 2368 Label arg2_is_object, check_undefined_arg2; 2369 Label load_arg2, done; 2370 2371 // Test if arg1 is a Smi. 2372 if (left_type == BinaryOpIC::SMI) { 2373 __ JumpIfNotSmi(edx, conversion_failure); 2374 } else { 2375 __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear); 2376 } 2377 2378 __ SmiUntag(edx); 2379 __ jmp(&load_arg2); 2380 2381 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). 2382 __ bind(&check_undefined_arg1); 2383 Factory* factory = masm->isolate()->factory(); 2384 __ cmp(edx, factory->undefined_value()); 2385 __ j(not_equal, conversion_failure); 2386 __ mov(edx, Immediate(0)); 2387 __ jmp(&load_arg2); 2388 2389 __ bind(&arg1_is_object); 2390 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); 2391 __ cmp(ebx, factory->heap_number_map()); 2392 __ j(not_equal, &check_undefined_arg1); 2393 2394 // Get the untagged integer version of the edx heap number in ecx. 2395 if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) { 2396 CpuFeatureScope use_sse2(masm, SSE2); 2397 ConvertHeapNumberToInt32(masm, edx, conversion_failure); 2398 } else { 2399 DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kHeapObjectTag, 2400 true); 2401 __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 2402 } 2403 __ mov(edx, ecx); 2404 2405 // Here edx has the untagged integer, eax has a Smi or a heap number. 2406 __ bind(&load_arg2); 2407 2408 // Test if arg2 is a Smi. 2409 if (right_type == BinaryOpIC::SMI) { 2410 __ JumpIfNotSmi(eax, conversion_failure); 2411 } else { 2412 __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear); 2413 } 2414 2415 __ SmiUntag(eax); 2416 __ mov(ecx, eax); 2417 __ jmp(&done); 2418 2419 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). 2420 __ bind(&check_undefined_arg2); 2421 __ cmp(eax, factory->undefined_value()); 2422 __ j(not_equal, conversion_failure); 2423 __ mov(ecx, Immediate(0)); 2424 __ jmp(&done); 2425 2426 __ bind(&arg2_is_object); 2427 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 2428 __ cmp(ebx, factory->heap_number_map()); 2429 __ j(not_equal, &check_undefined_arg2); 2430 // Get the untagged integer version of the eax heap number in ecx. 2431 2432 if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) { 2433 CpuFeatureScope use_sse2(masm, SSE2); 2434 ConvertHeapNumberToInt32(masm, eax, conversion_failure); 2435 } else { 2436 DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kHeapObjectTag, 2437 true); 2438 __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 2439 } 2440 2441 __ bind(&done); 2442 __ mov(eax, edx); 2443 } 2444 2445 2446 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, 2447 Register number) { 2448 Label load_smi, done; 2449 2450 __ JumpIfSmi(number, &load_smi, Label::kNear); 2451 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); 2452 __ jmp(&done, Label::kNear); 2453 2454 __ bind(&load_smi); 2455 __ SmiUntag(number); 2456 __ push(number); 2457 __ fild_s(Operand(esp, 0)); 2458 __ pop(number); 2459 2460 __ bind(&done); 2461 } 2462 2463 2464 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { 2465 Label load_smi_edx, load_eax, load_smi_eax, done; 2466 // Load operand in edx into xmm0. 2467 __ JumpIfSmi(edx, &load_smi_edx, Label::kNear); 2468 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 2469 2470 __ bind(&load_eax); 2471 // Load operand in eax into xmm1. 2472 __ JumpIfSmi(eax, &load_smi_eax, Label::kNear); 2473 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2474 __ jmp(&done, Label::kNear); 2475 2476 __ bind(&load_smi_edx); 2477 __ SmiUntag(edx); // Untag smi before converting to float. 2478 __ cvtsi2sd(xmm0, edx); 2479 __ SmiTag(edx); // Retag smi for heap number overwriting test. 2480 __ jmp(&load_eax); 2481 2482 __ bind(&load_smi_eax); 2483 __ SmiUntag(eax); // Untag smi before converting to float. 2484 __ cvtsi2sd(xmm1, eax); 2485 __ SmiTag(eax); // Retag smi for heap number overwriting test. 2486 2487 __ bind(&done); 2488 } 2489 2490 2491 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, 2492 Label* not_numbers) { 2493 Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; 2494 // Load operand in edx into xmm0, or branch to not_numbers. 2495 __ JumpIfSmi(edx, &load_smi_edx, Label::kNear); 2496 Factory* factory = masm->isolate()->factory(); 2497 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map()); 2498 __ j(not_equal, not_numbers); // Argument in edx is not a number. 2499 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 2500 __ bind(&load_eax); 2501 // Load operand in eax into xmm1, or branch to not_numbers. 2502 __ JumpIfSmi(eax, &load_smi_eax, Label::kNear); 2503 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map()); 2504 __ j(equal, &load_float_eax, Label::kNear); 2505 __ jmp(not_numbers); // Argument in eax is not a number. 2506 __ bind(&load_smi_edx); 2507 __ SmiUntag(edx); // Untag smi before converting to float. 2508 __ cvtsi2sd(xmm0, edx); 2509 __ SmiTag(edx); // Retag smi for heap number overwriting test. 2510 __ jmp(&load_eax); 2511 __ bind(&load_smi_eax); 2512 __ SmiUntag(eax); // Untag smi before converting to float. 2513 __ cvtsi2sd(xmm1, eax); 2514 __ SmiTag(eax); // Retag smi for heap number overwriting test. 2515 __ jmp(&done, Label::kNear); 2516 __ bind(&load_float_eax); 2517 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2518 __ bind(&done); 2519 } 2520 2521 2522 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, 2523 Register scratch) { 2524 const Register left = edx; 2525 const Register right = eax; 2526 __ mov(scratch, left); 2527 ASSERT(!scratch.is(right)); // We're about to clobber scratch. 2528 __ SmiUntag(scratch); 2529 __ cvtsi2sd(xmm0, scratch); 2530 2531 __ mov(scratch, right); 2532 __ SmiUntag(scratch); 2533 __ cvtsi2sd(xmm1, scratch); 2534 } 2535 2536 2537 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, 2538 Label* non_int32, 2539 XMMRegister operand, 2540 Register int32_result, 2541 Register scratch, 2542 XMMRegister xmm_scratch) { 2543 __ cvttsd2si(int32_result, Operand(operand)); 2544 __ cvtsi2sd(xmm_scratch, int32_result); 2545 __ pcmpeqd(xmm_scratch, operand); 2546 __ movmskps(scratch, xmm_scratch); 2547 // Two least significant bits should be both set. 2548 __ not_(scratch); 2549 __ test(scratch, Immediate(3)); 2550 __ j(not_zero, non_int32); 2551 } 2552 2553 2554 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 2555 Register scratch, 2556 ArgLocation arg_location) { 2557 Label load_smi_1, load_smi_2, done_load_1, done; 2558 if (arg_location == ARGS_IN_REGISTERS) { 2559 __ mov(scratch, edx); 2560 } else { 2561 __ mov(scratch, Operand(esp, 2 * kPointerSize)); 2562 } 2563 __ JumpIfSmi(scratch, &load_smi_1, Label::kNear); 2564 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); 2565 __ bind(&done_load_1); 2566 2567 if (arg_location == ARGS_IN_REGISTERS) { 2568 __ mov(scratch, eax); 2569 } else { 2570 __ mov(scratch, Operand(esp, 1 * kPointerSize)); 2571 } 2572 __ JumpIfSmi(scratch, &load_smi_2, Label::kNear); 2573 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); 2574 __ jmp(&done, Label::kNear); 2575 2576 __ bind(&load_smi_1); 2577 __ SmiUntag(scratch); 2578 __ push(scratch); 2579 __ fild_s(Operand(esp, 0)); 2580 __ pop(scratch); 2581 __ jmp(&done_load_1); 2582 2583 __ bind(&load_smi_2); 2584 __ SmiUntag(scratch); 2585 __ push(scratch); 2586 __ fild_s(Operand(esp, 0)); 2587 __ pop(scratch); 2588 2589 __ bind(&done); 2590 } 2591 2592 2593 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, 2594 Register scratch) { 2595 const Register left = edx; 2596 const Register right = eax; 2597 __ mov(scratch, left); 2598 ASSERT(!scratch.is(right)); // We're about to clobber scratch. 2599 __ SmiUntag(scratch); 2600 __ push(scratch); 2601 __ fild_s(Operand(esp, 0)); 2602 2603 __ mov(scratch, right); 2604 __ SmiUntag(scratch); 2605 __ mov(Operand(esp, 0), scratch); 2606 __ fild_s(Operand(esp, 0)); 2607 __ pop(scratch); 2608 } 2609 2610 2611 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, 2612 Label* non_float, 2613 Register scratch) { 2614 Label test_other, done; 2615 // Test if both operands are floats or smi -> scratch=k_is_float; 2616 // Otherwise scratch = k_not_float. 2617 __ JumpIfSmi(edx, &test_other, Label::kNear); 2618 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); 2619 Factory* factory = masm->isolate()->factory(); 2620 __ cmp(scratch, factory->heap_number_map()); 2621 __ j(not_equal, non_float); // argument in edx is not a number -> NaN 2622 2623 __ bind(&test_other); 2624 __ JumpIfSmi(eax, &done, Label::kNear); 2625 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); 2626 __ cmp(scratch, factory->heap_number_map()); 2627 __ j(not_equal, non_float); // argument in eax is not a number -> NaN 2628 2629 // Fall-through: Both operands are numbers. 2630 __ bind(&done); 2631 } 2632 2633 2634 void MathPowStub::Generate(MacroAssembler* masm) { 2635 CpuFeatureScope use_sse2(masm, SSE2); 2636 Factory* factory = masm->isolate()->factory(); 2637 const Register exponent = eax; 2638 const Register base = edx; 2639 const Register scratch = ecx; 2640 const XMMRegister double_result = xmm3; 2641 const XMMRegister double_base = xmm2; 2642 const XMMRegister double_exponent = xmm1; 2643 const XMMRegister double_scratch = xmm4; 2644 2645 Label call_runtime, done, exponent_not_smi, int_exponent; 2646 2647 // Save 1 in double_result - we need this several times later on. 2648 __ mov(scratch, Immediate(1)); 2649 __ cvtsi2sd(double_result, scratch); 2650 2651 if (exponent_type_ == ON_STACK) { 2652 Label base_is_smi, unpack_exponent; 2653 // The exponent and base are supplied as arguments on the stack. 2654 // This can only happen if the stub is called from non-optimized code. 2655 // Load input parameters from stack. 2656 __ mov(base, Operand(esp, 2 * kPointerSize)); 2657 __ mov(exponent, Operand(esp, 1 * kPointerSize)); 2658 2659 __ JumpIfSmi(base, &base_is_smi, Label::kNear); 2660 __ cmp(FieldOperand(base, HeapObject::kMapOffset), 2661 factory->heap_number_map()); 2662 __ j(not_equal, &call_runtime); 2663 2664 __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset)); 2665 __ jmp(&unpack_exponent, Label::kNear); 2666 2667 __ bind(&base_is_smi); 2668 __ SmiUntag(base); 2669 __ cvtsi2sd(double_base, base); 2670 2671 __ bind(&unpack_exponent); 2672 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); 2673 __ SmiUntag(exponent); 2674 __ jmp(&int_exponent); 2675 2676 __ bind(&exponent_not_smi); 2677 __ cmp(FieldOperand(exponent, HeapObject::kMapOffset), 2678 factory->heap_number_map()); 2679 __ j(not_equal, &call_runtime); 2680 __ movdbl(double_exponent, 2681 FieldOperand(exponent, HeapNumber::kValueOffset)); 2682 } else if (exponent_type_ == TAGGED) { 2683 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); 2684 __ SmiUntag(exponent); 2685 __ jmp(&int_exponent); 2686 2687 __ bind(&exponent_not_smi); 2688 __ movdbl(double_exponent, 2689 FieldOperand(exponent, HeapNumber::kValueOffset)); 2690 } 2691 2692 if (exponent_type_ != INTEGER) { 2693 Label fast_power; 2694 // Detect integer exponents stored as double. 2695 __ cvttsd2si(exponent, Operand(double_exponent)); 2696 // Skip to runtime if possibly NaN (indicated by the indefinite integer). 2697 __ cmp(exponent, Immediate(0x80000000u)); 2698 __ j(equal, &call_runtime); 2699 __ cvtsi2sd(double_scratch, exponent); 2700 // Already ruled out NaNs for exponent. 2701 __ ucomisd(double_exponent, double_scratch); 2702 __ j(equal, &int_exponent); 2703 2704 if (exponent_type_ == ON_STACK) { 2705 // Detect square root case. Crankshaft detects constant +/-0.5 at 2706 // compile time and uses DoMathPowHalf instead. We then skip this check 2707 // for non-constant cases of +/-0.5 as these hardly occur. 2708 Label continue_sqrt, continue_rsqrt, not_plus_half; 2709 // Test for 0.5. 2710 // Load double_scratch with 0.5. 2711 __ mov(scratch, Immediate(0x3F000000u)); 2712 __ movd(double_scratch, scratch); 2713 __ cvtss2sd(double_scratch, double_scratch); 2714 // Already ruled out NaNs for exponent. 2715 __ ucomisd(double_scratch, double_exponent); 2716 __ j(not_equal, ¬_plus_half, Label::kNear); 2717 2718 // Calculates square root of base. Check for the special case of 2719 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). 2720 // According to IEEE-754, single-precision -Infinity has the highest 2721 // 9 bits set and the lowest 23 bits cleared. 2722 __ mov(scratch, 0xFF800000u); 2723 __ movd(double_scratch, scratch); 2724 __ cvtss2sd(double_scratch, double_scratch); 2725 __ ucomisd(double_base, double_scratch); 2726 // Comparing -Infinity with NaN results in "unordered", which sets the 2727 // zero flag as if both were equal. However, it also sets the carry flag. 2728 __ j(not_equal, &continue_sqrt, Label::kNear); 2729 __ j(carry, &continue_sqrt, Label::kNear); 2730 2731 // Set result to Infinity in the special case. 2732 __ xorps(double_result, double_result); 2733 __ subsd(double_result, double_scratch); 2734 __ jmp(&done); 2735 2736 __ bind(&continue_sqrt); 2737 // sqrtsd returns -0 when input is -0. ECMA spec requires +0. 2738 __ xorps(double_scratch, double_scratch); 2739 __ addsd(double_scratch, double_base); // Convert -0 to +0. 2740 __ sqrtsd(double_result, double_scratch); 2741 __ jmp(&done); 2742 2743 // Test for -0.5. 2744 __ bind(¬_plus_half); 2745 // Load double_exponent with -0.5 by substracting 1. 2746 __ subsd(double_scratch, double_result); 2747 // Already ruled out NaNs for exponent. 2748 __ ucomisd(double_scratch, double_exponent); 2749 __ j(not_equal, &fast_power, Label::kNear); 2750 2751 // Calculates reciprocal of square root of base. Check for the special 2752 // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). 2753 // According to IEEE-754, single-precision -Infinity has the highest 2754 // 9 bits set and the lowest 23 bits cleared. 2755 __ mov(scratch, 0xFF800000u); 2756 __ movd(double_scratch, scratch); 2757 __ cvtss2sd(double_scratch, double_scratch); 2758 __ ucomisd(double_base, double_scratch); 2759 // Comparing -Infinity with NaN results in "unordered", which sets the 2760 // zero flag as if both were equal. However, it also sets the carry flag. 2761 __ j(not_equal, &continue_rsqrt, Label::kNear); 2762 __ j(carry, &continue_rsqrt, Label::kNear); 2763 2764 // Set result to 0 in the special case. 2765 __ xorps(double_result, double_result); 2766 __ jmp(&done); 2767 2768 __ bind(&continue_rsqrt); 2769 // sqrtsd returns -0 when input is -0. ECMA spec requires +0. 2770 __ xorps(double_exponent, double_exponent); 2771 __ addsd(double_exponent, double_base); // Convert -0 to +0. 2772 __ sqrtsd(double_exponent, double_exponent); 2773 __ divsd(double_result, double_exponent); 2774 __ jmp(&done); 2775 } 2776 2777 // Using FPU instructions to calculate power. 2778 Label fast_power_failed; 2779 __ bind(&fast_power); 2780 __ fnclex(); // Clear flags to catch exceptions later. 2781 // Transfer (B)ase and (E)xponent onto the FPU register stack. 2782 __ sub(esp, Immediate(kDoubleSize)); 2783 __ movdbl(Operand(esp, 0), double_exponent); 2784 __ fld_d(Operand(esp, 0)); // E 2785 __ movdbl(Operand(esp, 0), double_base); 2786 __ fld_d(Operand(esp, 0)); // B, E 2787 2788 // Exponent is in st(1) and base is in st(0) 2789 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B) 2790 // FYL2X calculates st(1) * log2(st(0)) 2791 __ fyl2x(); // X 2792 __ fld(0); // X, X 2793 __ frndint(); // rnd(X), X 2794 __ fsub(1); // rnd(X), X-rnd(X) 2795 __ fxch(1); // X - rnd(X), rnd(X) 2796 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1 2797 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X) 2798 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X) 2799 __ faddp(1); // 2^(X-rnd(X)), rnd(X) 2800 // FSCALE calculates st(0) * 2^st(1) 2801 __ fscale(); // 2^X, rnd(X) 2802 __ fstp(1); // 2^X 2803 // Bail out to runtime in case of exceptions in the status word. 2804 __ fnstsw_ax(); 2805 __ test_b(eax, 0x5F); // We check for all but precision exception. 2806 __ j(not_zero, &fast_power_failed, Label::kNear); 2807 __ fstp_d(Operand(esp, 0)); 2808 __ movdbl(double_result, Operand(esp, 0)); 2809 __ add(esp, Immediate(kDoubleSize)); 2810 __ jmp(&done); 2811 2812 __ bind(&fast_power_failed); 2813 __ fninit(); 2814 __ add(esp, Immediate(kDoubleSize)); 2815 __ jmp(&call_runtime); 2816 } 2817 2818 // Calculate power with integer exponent. 2819 __ bind(&int_exponent); 2820 const XMMRegister double_scratch2 = double_exponent; 2821 __ mov(scratch, exponent); // Back up exponent. 2822 __ movsd(double_scratch, double_base); // Back up base. 2823 __ movsd(double_scratch2, double_result); // Load double_exponent with 1. 2824 2825 // Get absolute value of exponent. 2826 Label no_neg, while_true, while_false; 2827 __ test(scratch, scratch); 2828 __ j(positive, &no_neg, Label::kNear); 2829 __ neg(scratch); 2830 __ bind(&no_neg); 2831 2832 __ j(zero, &while_false, Label::kNear); 2833 __ shr(scratch, 1); 2834 // Above condition means CF==0 && ZF==0. This means that the 2835 // bit that has been shifted out is 0 and the result is not 0. 2836 __ j(above, &while_true, Label::kNear); 2837 __ movsd(double_result, double_scratch); 2838 __ j(zero, &while_false, Label::kNear); 2839 2840 __ bind(&while_true); 2841 __ shr(scratch, 1); 2842 __ mulsd(double_scratch, double_scratch); 2843 __ j(above, &while_true, Label::kNear); 2844 __ mulsd(double_result, double_scratch); 2845 __ j(not_zero, &while_true); 2846 2847 __ bind(&while_false); 2848 // scratch has the original value of the exponent - if the exponent is 2849 // negative, return 1/result. 2850 __ test(exponent, exponent); 2851 __ j(positive, &done); 2852 __ divsd(double_scratch2, double_result); 2853 __ movsd(double_result, double_scratch2); 2854 // Test whether result is zero. Bail out to check for subnormal result. 2855 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. 2856 __ xorps(double_scratch2, double_scratch2); 2857 __ ucomisd(double_scratch2, double_result); // Result cannot be NaN. 2858 // double_exponent aliased as double_scratch2 has already been overwritten 2859 // and may not have contained the exponent value in the first place when the 2860 // exponent is a smi. We reset it with exponent value before bailing out. 2861 __ j(not_equal, &done); 2862 __ cvtsi2sd(double_exponent, exponent); 2863 2864 // Returning or bailing out. 2865 Counters* counters = masm->isolate()->counters(); 2866 if (exponent_type_ == ON_STACK) { 2867 // The arguments are still on the stack. 2868 __ bind(&call_runtime); 2869 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); 2870 2871 // The stub is called from non-optimized code, which expects the result 2872 // as heap number in exponent. 2873 __ bind(&done); 2874 __ AllocateHeapNumber(eax, scratch, base, &call_runtime); 2875 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result); 2876 __ IncrementCounter(counters->math_pow(), 1); 2877 __ ret(2 * kPointerSize); 2878 } else { 2879 __ bind(&call_runtime); 2880 { 2881 AllowExternalCallThatCantCauseGC scope(masm); 2882 __ PrepareCallCFunction(4, scratch); 2883 __ movdbl(Operand(esp, 0 * kDoubleSize), double_base); 2884 __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent); 2885 __ CallCFunction( 2886 ExternalReference::power_double_double_function(masm->isolate()), 4); 2887 } 2888 // Return value is in st(0) on ia32. 2889 // Store it into the (fixed) result register. 2890 __ sub(esp, Immediate(kDoubleSize)); 2891 __ fstp_d(Operand(esp, 0)); 2892 __ movdbl(double_result, Operand(esp, 0)); 2893 __ add(esp, Immediate(kDoubleSize)); 2894 2895 __ bind(&done); 2896 __ IncrementCounter(counters->math_pow(), 1); 2897 __ ret(0); 2898 } 2899 } 2900 2901 2902 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { 2903 // ----------- S t a t e ------------- 2904 // -- ecx : name 2905 // -- edx : receiver 2906 // -- esp[0] : return address 2907 // ----------------------------------- 2908 Label miss; 2909 2910 if (kind() == Code::KEYED_LOAD_IC) { 2911 __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_string())); 2912 __ j(not_equal, &miss); 2913 } 2914 2915 StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss); 2916 __ bind(&miss); 2917 StubCompiler::TailCallBuiltin( 2918 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); 2919 } 2920 2921 2922 void StringLengthStub::Generate(MacroAssembler* masm) { 2923 // ----------- S t a t e ------------- 2924 // -- ecx : name 2925 // -- edx : receiver 2926 // -- esp[0] : return address 2927 // ----------------------------------- 2928 Label miss; 2929 2930 if (kind() == Code::KEYED_LOAD_IC) { 2931 __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string())); 2932 __ j(not_equal, &miss); 2933 } 2934 2935 StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss, 2936 support_wrapper_); 2937 __ bind(&miss); 2938 StubCompiler::TailCallBuiltin( 2939 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); 2940 } 2941 2942 2943 void StoreArrayLengthStub::Generate(MacroAssembler* masm) { 2944 // ----------- S t a t e ------------- 2945 // -- eax : value 2946 // -- ecx : name 2947 // -- edx : receiver 2948 // -- esp[0] : return address 2949 // ----------------------------------- 2950 // 2951 // This accepts as a receiver anything JSArray::SetElementsLength accepts 2952 // (currently anything except for external arrays which means anything with 2953 // elements of FixedArray type). Value must be a number, but only smis are 2954 // accepted as the most common case. 2955 2956 Label miss; 2957 2958 Register receiver = edx; 2959 Register value = eax; 2960 Register scratch = ebx; 2961 2962 if (kind() == Code::KEYED_STORE_IC) { 2963 __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string())); 2964 __ j(not_equal, &miss); 2965 } 2966 2967 // Check that the receiver isn't a smi. 2968 __ JumpIfSmi(receiver, &miss); 2969 2970 // Check that the object is a JS array. 2971 __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); 2972 __ j(not_equal, &miss); 2973 2974 // Check that elements are FixedArray. 2975 // We rely on StoreIC_ArrayLength below to deal with all types of 2976 // fast elements (including COW). 2977 __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset)); 2978 __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch); 2979 __ j(not_equal, &miss); 2980 2981 // Check that the array has fast properties, otherwise the length 2982 // property might have been redefined. 2983 __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset)); 2984 __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset), 2985 Heap::kHashTableMapRootIndex); 2986 __ j(equal, &miss); 2987 2988 // Check that value is a smi. 2989 __ JumpIfNotSmi(value, &miss); 2990 2991 // Prepare tail call to StoreIC_ArrayLength. 2992 __ pop(scratch); 2993 __ push(receiver); 2994 __ push(value); 2995 __ push(scratch); // return address 2996 2997 ExternalReference ref = 2998 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); 2999 __ TailCallExternalReference(ref, 2, 1); 3000 3001 __ bind(&miss); 3002 3003 StubCompiler::TailCallBuiltin( 3004 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); 3005 } 3006 3007 3008 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 3009 // The key is in edx and the parameter count is in eax. 3010 3011 // The displacement is used for skipping the frame pointer on the 3012 // stack. It is the offset of the last parameter (if any) relative 3013 // to the frame pointer. 3014 static const int kDisplacement = 1 * kPointerSize; 3015 3016 // Check that the key is a smi. 3017 Label slow; 3018 __ JumpIfNotSmi(edx, &slow, Label::kNear); 3019 3020 // Check if the calling frame is an arguments adaptor frame. 3021 Label adaptor; 3022 __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3023 __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); 3024 __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3025 __ j(equal, &adaptor, Label::kNear); 3026 3027 // Check index against formal parameters count limit passed in 3028 // through register eax. Use unsigned comparison to get negative 3029 // check for free. 3030 __ cmp(edx, eax); 3031 __ j(above_equal, &slow, Label::kNear); 3032 3033 // Read the argument from the stack and return it. 3034 STATIC_ASSERT(kSmiTagSize == 1); 3035 STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. 3036 __ lea(ebx, Operand(ebp, eax, times_2, 0)); 3037 __ neg(edx); 3038 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); 3039 __ ret(0); 3040 3041 // Arguments adaptor case: Check index against actual arguments 3042 // limit found in the arguments adaptor frame. Use unsigned 3043 // comparison to get negative check for free. 3044 __ bind(&adaptor); 3045 __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3046 __ cmp(edx, ecx); 3047 __ j(above_equal, &slow, Label::kNear); 3048 3049 // Read the argument from the stack and return it. 3050 STATIC_ASSERT(kSmiTagSize == 1); 3051 STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. 3052 __ lea(ebx, Operand(ebx, ecx, times_2, 0)); 3053 __ neg(edx); 3054 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); 3055 __ ret(0); 3056 3057 // Slow-case: Handle non-smi or out-of-bounds access to arguments 3058 // by calling the runtime system. 3059 __ bind(&slow); 3060 __ pop(ebx); // Return address. 3061 __ push(edx); 3062 __ push(ebx); 3063 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); 3064 } 3065 3066 3067 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { 3068 // esp[0] : return address 3069 // esp[4] : number of parameters 3070 // esp[8] : receiver displacement 3071 // esp[12] : function 3072 3073 // Check if the calling frame is an arguments adaptor frame. 3074 Label runtime; 3075 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3076 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); 3077 __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3078 __ j(not_equal, &runtime, Label::kNear); 3079 3080 // Patch the arguments.length and the parameters pointer. 3081 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3082 __ mov(Operand(esp, 1 * kPointerSize), ecx); 3083 __ lea(edx, Operand(edx, ecx, times_2, 3084 StandardFrameConstants::kCallerSPOffset)); 3085 __ mov(Operand(esp, 2 * kPointerSize), edx); 3086 3087 __ bind(&runtime); 3088 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 3089 } 3090 3091 3092 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { 3093 Isolate* isolate = masm->isolate(); 3094 3095 // esp[0] : return address 3096 // esp[4] : number of parameters (tagged) 3097 // esp[8] : receiver displacement 3098 // esp[12] : function 3099 3100 // ebx = parameter count (tagged) 3101 __ mov(ebx, Operand(esp, 1 * kPointerSize)); 3102 3103 // Check if the calling frame is an arguments adaptor frame. 3104 // TODO(rossberg): Factor out some of the bits that are shared with the other 3105 // Generate* functions. 3106 Label runtime; 3107 Label adaptor_frame, try_allocate; 3108 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3109 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); 3110 __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3111 __ j(equal, &adaptor_frame, Label::kNear); 3112 3113 // No adaptor, parameter count = argument count. 3114 __ mov(ecx, ebx); 3115 __ jmp(&try_allocate, Label::kNear); 3116 3117 // We have an adaptor frame. Patch the parameters pointer. 3118 __ bind(&adaptor_frame); 3119 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3120 __ lea(edx, Operand(edx, ecx, times_2, 3121 StandardFrameConstants::kCallerSPOffset)); 3122 __ mov(Operand(esp, 2 * kPointerSize), edx); 3123 3124 // ebx = parameter count (tagged) 3125 // ecx = argument count (tagged) 3126 // esp[4] = parameter count (tagged) 3127 // esp[8] = address of receiver argument 3128 // Compute the mapped parameter count = min(ebx, ecx) in ebx. 3129 __ cmp(ebx, ecx); 3130 __ j(less_equal, &try_allocate, Label::kNear); 3131 __ mov(ebx, ecx); 3132 3133 __ bind(&try_allocate); 3134 3135 // Save mapped parameter count. 3136 __ push(ebx); 3137 3138 // Compute the sizes of backing store, parameter map, and arguments object. 3139 // 1. Parameter map, has 2 extra words containing context and backing store. 3140 const int kParameterMapHeaderSize = 3141 FixedArray::kHeaderSize + 2 * kPointerSize; 3142 Label no_parameter_map; 3143 __ test(ebx, ebx); 3144 __ j(zero, &no_parameter_map, Label::kNear); 3145 __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize)); 3146 __ bind(&no_parameter_map); 3147 3148 // 2. Backing store. 3149 __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); 3150 3151 // 3. Arguments object. 3152 __ add(ebx, Immediate(Heap::kArgumentsObjectSize)); 3153 3154 // Do the allocation of all three objects in one go. 3155 __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT); 3156 3157 // eax = address of new object(s) (tagged) 3158 // ecx = argument count (tagged) 3159 // esp[0] = mapped parameter count (tagged) 3160 // esp[8] = parameter count (tagged) 3161 // esp[12] = address of receiver argument 3162 // Get the arguments boilerplate from the current native context into edi. 3163 Label has_mapped_parameters, copy; 3164 __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 3165 __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset)); 3166 __ mov(ebx, Operand(esp, 0 * kPointerSize)); 3167 __ test(ebx, ebx); 3168 __ j(not_zero, &has_mapped_parameters, Label::kNear); 3169 __ mov(edi, Operand(edi, 3170 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX))); 3171 __ jmp(©, Label::kNear); 3172 3173 __ bind(&has_mapped_parameters); 3174 __ mov(edi, Operand(edi, 3175 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX))); 3176 __ bind(©); 3177 3178 // eax = address of new object (tagged) 3179 // ebx = mapped parameter count (tagged) 3180 // ecx = argument count (tagged) 3181 // edi = address of boilerplate object (tagged) 3182 // esp[0] = mapped parameter count (tagged) 3183 // esp[8] = parameter count (tagged) 3184 // esp[12] = address of receiver argument 3185 // Copy the JS object part. 3186 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { 3187 __ mov(edx, FieldOperand(edi, i)); 3188 __ mov(FieldOperand(eax, i), edx); 3189 } 3190 3191 // Set up the callee in-object property. 3192 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); 3193 __ mov(edx, Operand(esp, 4 * kPointerSize)); 3194 __ mov(FieldOperand(eax, JSObject::kHeaderSize + 3195 Heap::kArgumentsCalleeIndex * kPointerSize), 3196 edx); 3197 3198 // Use the length (smi tagged) and set that as an in-object property too. 3199 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 3200 __ mov(FieldOperand(eax, JSObject::kHeaderSize + 3201 Heap::kArgumentsLengthIndex * kPointerSize), 3202 ecx); 3203 3204 // Set up the elements pointer in the allocated arguments object. 3205 // If we allocated a parameter map, edi will point there, otherwise to the 3206 // backing store. 3207 __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize)); 3208 __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); 3209 3210 // eax = address of new object (tagged) 3211 // ebx = mapped parameter count (tagged) 3212 // ecx = argument count (tagged) 3213 // edi = address of parameter map or backing store (tagged) 3214 // esp[0] = mapped parameter count (tagged) 3215 // esp[8] = parameter count (tagged) 3216 // esp[12] = address of receiver argument 3217 // Free a register. 3218 __ push(eax); 3219 3220 // Initialize parameter map. If there are no mapped arguments, we're done. 3221 Label skip_parameter_map; 3222 __ test(ebx, ebx); 3223 __ j(zero, &skip_parameter_map); 3224 3225 __ mov(FieldOperand(edi, FixedArray::kMapOffset), 3226 Immediate(isolate->factory()->non_strict_arguments_elements_map())); 3227 __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2)))); 3228 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax); 3229 __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi); 3230 __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize)); 3231 __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax); 3232 3233 // Copy the parameter slots and the holes in the arguments. 3234 // We need to fill in mapped_parameter_count slots. They index the context, 3235 // where parameters are stored in reverse order, at 3236 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 3237 // The mapped parameter thus need to get indices 3238 // MIN_CONTEXT_SLOTS+parameter_count-1 .. 3239 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count 3240 // We loop from right to left. 3241 Label parameters_loop, parameters_test; 3242 __ push(ecx); 3243 __ mov(eax, Operand(esp, 2 * kPointerSize)); 3244 __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); 3245 __ add(ebx, Operand(esp, 4 * kPointerSize)); 3246 __ sub(ebx, eax); 3247 __ mov(ecx, isolate->factory()->the_hole_value()); 3248 __ mov(edx, edi); 3249 __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize)); 3250 // eax = loop variable (tagged) 3251 // ebx = mapping index (tagged) 3252 // ecx = the hole value 3253 // edx = address of parameter map (tagged) 3254 // edi = address of backing store (tagged) 3255 // esp[0] = argument count (tagged) 3256 // esp[4] = address of new object (tagged) 3257 // esp[8] = mapped parameter count (tagged) 3258 // esp[16] = parameter count (tagged) 3259 // esp[20] = address of receiver argument 3260 __ jmp(¶meters_test, Label::kNear); 3261 3262 __ bind(¶meters_loop); 3263 __ sub(eax, Immediate(Smi::FromInt(1))); 3264 __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx); 3265 __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx); 3266 __ add(ebx, Immediate(Smi::FromInt(1))); 3267 __ bind(¶meters_test); 3268 __ test(eax, eax); 3269 __ j(not_zero, ¶meters_loop, Label::kNear); 3270 __ pop(ecx); 3271 3272 __ bind(&skip_parameter_map); 3273 3274 // ecx = argument count (tagged) 3275 // edi = address of backing store (tagged) 3276 // esp[0] = address of new object (tagged) 3277 // esp[4] = mapped parameter count (tagged) 3278 // esp[12] = parameter count (tagged) 3279 // esp[16] = address of receiver argument 3280 // Copy arguments header and remaining slots (if there are any). 3281 __ mov(FieldOperand(edi, FixedArray::kMapOffset), 3282 Immediate(isolate->factory()->fixed_array_map())); 3283 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); 3284 3285 Label arguments_loop, arguments_test; 3286 __ mov(ebx, Operand(esp, 1 * kPointerSize)); 3287 __ mov(edx, Operand(esp, 4 * kPointerSize)); 3288 __ sub(edx, ebx); // Is there a smarter way to do negative scaling? 3289 __ sub(edx, ebx); 3290 __ jmp(&arguments_test, Label::kNear); 3291 3292 __ bind(&arguments_loop); 3293 __ sub(edx, Immediate(kPointerSize)); 3294 __ mov(eax, Operand(edx, 0)); 3295 __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax); 3296 __ add(ebx, Immediate(Smi::FromInt(1))); 3297 3298 __ bind(&arguments_test); 3299 __ cmp(ebx, ecx); 3300 __ j(less, &arguments_loop, Label::kNear); 3301 3302 // Restore. 3303 __ pop(eax); // Address of arguments object. 3304 __ pop(ebx); // Parameter count. 3305 3306 // Return and remove the on-stack parameters. 3307 __ ret(3 * kPointerSize); 3308 3309 // Do the runtime call to allocate the arguments object. 3310 __ bind(&runtime); 3311 __ pop(eax); // Remove saved parameter count. 3312 __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count. 3313 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 3314 } 3315 3316 3317 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { 3318 Isolate* isolate = masm->isolate(); 3319 3320 // esp[0] : return address 3321 // esp[4] : number of parameters 3322 // esp[8] : receiver displacement 3323 // esp[12] : function 3324 3325 // Check if the calling frame is an arguments adaptor frame. 3326 Label adaptor_frame, try_allocate, runtime; 3327 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 3328 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); 3329 __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3330 __ j(equal, &adaptor_frame, Label::kNear); 3331 3332 // Get the length from the frame. 3333 __ mov(ecx, Operand(esp, 1 * kPointerSize)); 3334 __ jmp(&try_allocate, Label::kNear); 3335 3336 // Patch the arguments.length and the parameters pointer. 3337 __ bind(&adaptor_frame); 3338 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3339 __ mov(Operand(esp, 1 * kPointerSize), ecx); 3340 __ lea(edx, Operand(edx, ecx, times_2, 3341 StandardFrameConstants::kCallerSPOffset)); 3342 __ mov(Operand(esp, 2 * kPointerSize), edx); 3343 3344 // Try the new space allocation. Start out with computing the size of 3345 // the arguments object and the elements array. 3346 Label add_arguments_object; 3347 __ bind(&try_allocate); 3348 __ test(ecx, ecx); 3349 __ j(zero, &add_arguments_object, Label::kNear); 3350 __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); 3351 __ bind(&add_arguments_object); 3352 __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict)); 3353 3354 // Do the allocation of both objects in one go. 3355 __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); 3356 3357 // Get the arguments boilerplate from the current native context. 3358 __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 3359 __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset)); 3360 const int offset = 3361 Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX); 3362 __ mov(edi, Operand(edi, offset)); 3363 3364 // Copy the JS object part. 3365 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { 3366 __ mov(ebx, FieldOperand(edi, i)); 3367 __ mov(FieldOperand(eax, i), ebx); 3368 } 3369 3370 // Get the length (smi tagged) and set that as an in-object property too. 3371 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 3372 __ mov(ecx, Operand(esp, 1 * kPointerSize)); 3373 __ mov(FieldOperand(eax, JSObject::kHeaderSize + 3374 Heap::kArgumentsLengthIndex * kPointerSize), 3375 ecx); 3376 3377 // If there are no actual arguments, we're done. 3378 Label done; 3379 __ test(ecx, ecx); 3380 __ j(zero, &done, Label::kNear); 3381 3382 // Get the parameters pointer from the stack. 3383 __ mov(edx, Operand(esp, 2 * kPointerSize)); 3384 3385 // Set up the elements pointer in the allocated arguments object and 3386 // initialize the header in the elements fixed array. 3387 __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict)); 3388 __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); 3389 __ mov(FieldOperand(edi, FixedArray::kMapOffset), 3390 Immediate(isolate->factory()->fixed_array_map())); 3391 3392 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); 3393 // Untag the length for the loop below. 3394 __ SmiUntag(ecx); 3395 3396 // Copy the fixed array slots. 3397 Label loop; 3398 __ bind(&loop); 3399 __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. 3400 __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); 3401 __ add(edi, Immediate(kPointerSize)); 3402 __ sub(edx, Immediate(kPointerSize)); 3403 __ dec(ecx); 3404 __ j(not_zero, &loop); 3405 3406 // Return and remove the on-stack parameters. 3407 __ bind(&done); 3408 __ ret(3 * kPointerSize); 3409 3410 // Do the runtime call to allocate the arguments object. 3411 __ bind(&runtime); 3412 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); 3413 } 3414 3415 3416 void RegExpExecStub::Generate(MacroAssembler* masm) { 3417 // Just jump directly to runtime if native RegExp is not selected at compile 3418 // time or if regexp entry in generated code is turned off runtime switch or 3419 // at compilation. 3420 #ifdef V8_INTERPRETED_REGEXP 3421 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 3422 #else // V8_INTERPRETED_REGEXP 3423 3424 // Stack frame on entry. 3425 // esp[0]: return address 3426 // esp[4]: last_match_info (expected JSArray) 3427 // esp[8]: previous index 3428 // esp[12]: subject string 3429 // esp[16]: JSRegExp object 3430 3431 static const int kLastMatchInfoOffset = 1 * kPointerSize; 3432 static const int kPreviousIndexOffset = 2 * kPointerSize; 3433 static const int kSubjectOffset = 3 * kPointerSize; 3434 static const int kJSRegExpOffset = 4 * kPointerSize; 3435 3436 Label runtime; 3437 Factory* factory = masm->isolate()->factory(); 3438 3439 // Ensure that a RegExp stack is allocated. 3440 ExternalReference address_of_regexp_stack_memory_address = 3441 ExternalReference::address_of_regexp_stack_memory_address( 3442 masm->isolate()); 3443 ExternalReference address_of_regexp_stack_memory_size = 3444 ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); 3445 __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); 3446 __ test(ebx, ebx); 3447 __ j(zero, &runtime); 3448 3449 // Check that the first argument is a JSRegExp object. 3450 __ mov(eax, Operand(esp, kJSRegExpOffset)); 3451 STATIC_ASSERT(kSmiTag == 0); 3452 __ JumpIfSmi(eax, &runtime); 3453 __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); 3454 __ j(not_equal, &runtime); 3455 3456 // Check that the RegExp has been compiled (data contains a fixed array). 3457 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); 3458 if (FLAG_debug_code) { 3459 __ test(ecx, Immediate(kSmiTagMask)); 3460 __ Check(not_zero, kUnexpectedTypeForRegExpDataFixedArrayExpected); 3461 __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx); 3462 __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected); 3463 } 3464 3465 // ecx: RegExp data (FixedArray) 3466 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. 3467 __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); 3468 __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); 3469 __ j(not_equal, &runtime); 3470 3471 // ecx: RegExp data (FixedArray) 3472 // Check that the number of captures fit in the static offsets vector buffer. 3473 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); 3474 // Check (number_of_captures + 1) * 2 <= offsets vector size 3475 // Or number_of_captures * 2 <= offsets vector size - 2 3476 // Multiplying by 2 comes for free since edx is smi-tagged. 3477 STATIC_ASSERT(kSmiTag == 0); 3478 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 3479 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); 3480 __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); 3481 __ j(above, &runtime); 3482 3483 // Reset offset for possibly sliced string. 3484 __ Set(edi, Immediate(0)); 3485 __ mov(eax, Operand(esp, kSubjectOffset)); 3486 __ JumpIfSmi(eax, &runtime); 3487 __ mov(edx, eax); // Make a copy of the original subject string. 3488 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 3489 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); 3490 3491 // eax: subject string 3492 // edx: subject string 3493 // ebx: subject string instance type 3494 // ecx: RegExp data (FixedArray) 3495 // Handle subject string according to its encoding and representation: 3496 // (1) Sequential two byte? If yes, go to (9). 3497 // (2) Sequential one byte? If yes, go to (6). 3498 // (3) Anything but sequential or cons? If yes, go to (7). 3499 // (4) Cons string. If the string is flat, replace subject with first string. 3500 // Otherwise bailout. 3501 // (5a) Is subject sequential two byte? If yes, go to (9). 3502 // (5b) Is subject external? If yes, go to (8). 3503 // (6) One byte sequential. Load regexp code for one byte. 3504 // (E) Carry on. 3505 /// [...] 3506 3507 // Deferred code at the end of the stub: 3508 // (7) Not a long external string? If yes, go to (10). 3509 // (8) External string. Make it, offset-wise, look like a sequential string. 3510 // (8a) Is the external string one byte? If yes, go to (6). 3511 // (9) Two byte sequential. Load regexp code for one byte. Go to (E). 3512 // (10) Short external string or not a string? If yes, bail out to runtime. 3513 // (11) Sliced string. Replace subject with parent. Go to (5a). 3514 3515 Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */, 3516 external_string /* 8 */, check_underlying /* 5a */, 3517 not_seq_nor_cons /* 7 */, check_code /* E */, 3518 not_long_external /* 10 */; 3519 3520 // (1) Sequential two byte? If yes, go to (9). 3521 __ and_(ebx, kIsNotStringMask | 3522 kStringRepresentationMask | 3523 kStringEncodingMask | 3524 kShortExternalStringMask); 3525 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); 3526 __ j(zero, &seq_two_byte_string); // Go to (9). 3527 3528 // (2) Sequential one byte? If yes, go to (6). 3529 // Any other sequential string must be one byte. 3530 __ and_(ebx, Immediate(kIsNotStringMask | 3531 kStringRepresentationMask | 3532 kShortExternalStringMask)); 3533 __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6). 3534 3535 // (3) Anything but sequential or cons? If yes, go to (7). 3536 // We check whether the subject string is a cons, since sequential strings 3537 // have already been covered. 3538 STATIC_ASSERT(kConsStringTag < kExternalStringTag); 3539 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); 3540 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); 3541 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); 3542 __ cmp(ebx, Immediate(kExternalStringTag)); 3543 __ j(greater_equal, ¬_seq_nor_cons); // Go to (7). 3544 3545 // (4) Cons string. Check that it's flat. 3546 // Replace subject with first string and reload instance type. 3547 __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string()); 3548 __ j(not_equal, &runtime); 3549 __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); 3550 __ bind(&check_underlying); 3551 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 3552 __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); 3553 3554 // (5a) Is subject sequential two byte? If yes, go to (9). 3555 __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask); 3556 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); 3557 __ j(zero, &seq_two_byte_string); // Go to (9). 3558 // (5b) Is subject external? If yes, go to (8). 3559 __ test_b(ebx, kStringRepresentationMask); 3560 // The underlying external string is never a short external string. 3561 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); 3562 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); 3563 __ j(not_zero, &external_string); // Go to (8). 3564 3565 // eax: sequential subject string (or look-alike, external string) 3566 // edx: original subject string 3567 // ecx: RegExp data (FixedArray) 3568 // (6) One byte sequential. Load regexp code for one byte. 3569 __ bind(&seq_one_byte_string); 3570 // Load previous index and check range before edx is overwritten. We have 3571 // to use edx instead of eax here because it might have been only made to 3572 // look like a sequential string when it actually is an external string. 3573 __ mov(ebx, Operand(esp, kPreviousIndexOffset)); 3574 __ JumpIfNotSmi(ebx, &runtime); 3575 __ cmp(ebx, FieldOperand(edx, String::kLengthOffset)); 3576 __ j(above_equal, &runtime); 3577 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset)); 3578 __ Set(ecx, Immediate(1)); // Type is one byte. 3579 3580 // (E) Carry on. String handling is done. 3581 __ bind(&check_code); 3582 // edx: irregexp code 3583 // Check that the irregexp code has been generated for the actual string 3584 // encoding. If it has, the field contains a code object otherwise it contains 3585 // a smi (code flushing support). 3586 __ JumpIfSmi(edx, &runtime); 3587 3588 // eax: subject string 3589 // ebx: previous index (smi) 3590 // edx: code 3591 // ecx: encoding of subject string (1 if ASCII, 0 if two_byte); 3592 // All checks done. Now push arguments for native regexp code. 3593 Counters* counters = masm->isolate()->counters(); 3594 __ IncrementCounter(counters->regexp_entry_native(), 1); 3595 3596 // Isolates: note we add an additional parameter here (isolate pointer). 3597 static const int kRegExpExecuteArguments = 9; 3598 __ EnterApiExitFrame(kRegExpExecuteArguments); 3599 3600 // Argument 9: Pass current isolate address. 3601 __ mov(Operand(esp, 8 * kPointerSize), 3602 Immediate(ExternalReference::isolate_address(masm->isolate()))); 3603 3604 // Argument 8: Indicate that this is a direct call from JavaScript. 3605 __ mov(Operand(esp, 7 * kPointerSize), Immediate(1)); 3606 3607 // Argument 7: Start (high end) of backtracking stack memory area. 3608 __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address)); 3609 __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size)); 3610 __ mov(Operand(esp, 6 * kPointerSize), esi); 3611 3612 // Argument 6: Set the number of capture registers to zero to force global 3613 // regexps to behave as non-global. This does not affect non-global regexps. 3614 __ mov(Operand(esp, 5 * kPointerSize), Immediate(0)); 3615 3616 // Argument 5: static offsets vector buffer. 3617 __ mov(Operand(esp, 4 * kPointerSize), 3618 Immediate(ExternalReference::address_of_static_offsets_vector( 3619 masm->isolate()))); 3620 3621 // Argument 2: Previous index. 3622 __ SmiUntag(ebx); 3623 __ mov(Operand(esp, 1 * kPointerSize), ebx); 3624 3625 // Argument 1: Original subject string. 3626 // The original subject is in the previous stack frame. Therefore we have to 3627 // use ebp, which points exactly to one pointer size below the previous esp. 3628 // (Because creating a new stack frame pushes the previous ebp onto the stack 3629 // and thereby moves up esp by one kPointerSize.) 3630 __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize)); 3631 __ mov(Operand(esp, 0 * kPointerSize), esi); 3632 3633 // esi: original subject string 3634 // eax: underlying subject string 3635 // ebx: previous index 3636 // ecx: encoding of subject string (1 if ASCII 0 if two_byte); 3637 // edx: code 3638 // Argument 4: End of string data 3639 // Argument 3: Start of string data 3640 // Prepare start and end index of the input. 3641 // Load the length from the original sliced string if that is the case. 3642 __ mov(esi, FieldOperand(esi, String::kLengthOffset)); 3643 __ add(esi, edi); // Calculate input end wrt offset. 3644 __ SmiUntag(edi); 3645 __ add(ebx, edi); // Calculate input start wrt offset. 3646 3647 // ebx: start index of the input string 3648 // esi: end index of the input string 3649 Label setup_two_byte, setup_rest; 3650 __ test(ecx, ecx); 3651 __ j(zero, &setup_two_byte, Label::kNear); 3652 __ SmiUntag(esi); 3653 __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize)); 3654 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. 3655 __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize)); 3656 __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. 3657 __ jmp(&setup_rest, Label::kNear); 3658 3659 __ bind(&setup_two_byte); 3660 STATIC_ASSERT(kSmiTag == 0); 3661 STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2). 3662 __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize)); 3663 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. 3664 __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); 3665 __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. 3666 3667 __ bind(&setup_rest); 3668 3669 // Locate the code entry and call it. 3670 __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); 3671 __ call(edx); 3672 3673 // Drop arguments and come back to JS mode. 3674 __ LeaveApiExitFrame(); 3675 3676 // Check the result. 3677 Label success; 3678 __ cmp(eax, 1); 3679 // We expect exactly one result since we force the called regexp to behave 3680 // as non-global. 3681 __ j(equal, &success); 3682 Label failure; 3683 __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); 3684 __ j(equal, &failure); 3685 __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION); 3686 // If not exception it can only be retry. Handle that in the runtime system. 3687 __ j(not_equal, &runtime); 3688 // Result must now be exception. If there is no pending exception already a 3689 // stack overflow (on the backtrack stack) was detected in RegExp code but 3690 // haven't created the exception yet. Handle that in the runtime system. 3691 // TODO(592): Rerunning the RegExp to get the stack overflow exception. 3692 ExternalReference pending_exception(Isolate::kPendingExceptionAddress, 3693 masm->isolate()); 3694 __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); 3695 __ mov(eax, Operand::StaticVariable(pending_exception)); 3696 __ cmp(edx, eax); 3697 __ j(equal, &runtime); 3698 // For exception, throw the exception again. 3699 3700 // Clear the pending exception variable. 3701 __ mov(Operand::StaticVariable(pending_exception), edx); 3702 3703 // Special handling of termination exceptions which are uncatchable 3704 // by javascript code. 3705 __ cmp(eax, factory->termination_exception()); 3706 Label throw_termination_exception; 3707 __ j(equal, &throw_termination_exception, Label::kNear); 3708 3709 // Handle normal exception by following handler chain. 3710 __ Throw(eax); 3711 3712 __ bind(&throw_termination_exception); 3713 __ ThrowUncatchable(eax); 3714 3715 __ bind(&failure); 3716 // For failure to match, return null. 3717 __ mov(eax, factory->null_value()); 3718 __ ret(4 * kPointerSize); 3719 3720 // Load RegExp data. 3721 __ bind(&success); 3722 __ mov(eax, Operand(esp, kJSRegExpOffset)); 3723 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); 3724 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); 3725 // Calculate number of capture registers (number_of_captures + 1) * 2. 3726 STATIC_ASSERT(kSmiTag == 0); 3727 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 3728 __ add(edx, Immediate(2)); // edx was a smi. 3729 3730 // edx: Number of capture registers 3731 // Load last_match_info which is still known to be a fast case JSArray. 3732 // Check that the fourth object is a JSArray object. 3733 __ mov(eax, Operand(esp, kLastMatchInfoOffset)); 3734 __ JumpIfSmi(eax, &runtime); 3735 __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); 3736 __ j(not_equal, &runtime); 3737 // Check that the JSArray is in fast case. 3738 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); 3739 __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); 3740 __ cmp(eax, factory->fixed_array_map()); 3741 __ j(not_equal, &runtime); 3742 // Check that the last match info has space for the capture registers and the 3743 // additional information. 3744 __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); 3745 __ SmiUntag(eax); 3746 __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead)); 3747 __ cmp(edx, eax); 3748 __ j(greater, &runtime); 3749 3750 // ebx: last_match_info backing store (FixedArray) 3751 // edx: number of capture registers 3752 // Store the capture count. 3753 __ SmiTag(edx); // Number of capture registers to smi. 3754 __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); 3755 __ SmiUntag(edx); // Number of capture registers back from smi. 3756 // Store last subject and last input. 3757 __ mov(eax, Operand(esp, kSubjectOffset)); 3758 __ mov(ecx, eax); 3759 __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); 3760 __ RecordWriteField(ebx, 3761 RegExpImpl::kLastSubjectOffset, 3762 eax, 3763 edi, 3764 kDontSaveFPRegs); 3765 __ mov(eax, ecx); 3766 __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); 3767 __ RecordWriteField(ebx, 3768 RegExpImpl::kLastInputOffset, 3769 eax, 3770 edi, 3771 kDontSaveFPRegs); 3772 3773 // Get the static offsets vector filled by the native regexp code. 3774 ExternalReference address_of_static_offsets_vector = 3775 ExternalReference::address_of_static_offsets_vector(masm->isolate()); 3776 __ mov(ecx, Immediate(address_of_static_offsets_vector)); 3777 3778 // ebx: last_match_info backing store (FixedArray) 3779 // ecx: offsets vector 3780 // edx: number of capture registers 3781 Label next_capture, done; 3782 // Capture register counter starts from number of capture registers and 3783 // counts down until wraping after zero. 3784 __ bind(&next_capture); 3785 __ sub(edx, Immediate(1)); 3786 __ j(negative, &done, Label::kNear); 3787 // Read the value from the static offsets vector buffer. 3788 __ mov(edi, Operand(ecx, edx, times_int_size, 0)); 3789 __ SmiTag(edi); 3790 // Store the smi value in the last match info. 3791 __ mov(FieldOperand(ebx, 3792 edx, 3793 times_pointer_size, 3794 RegExpImpl::kFirstCaptureOffset), 3795 edi); 3796 __ jmp(&next_capture); 3797 __ bind(&done); 3798 3799 // Return last match info. 3800 __ mov(eax, Operand(esp, kLastMatchInfoOffset)); 3801 __ ret(4 * kPointerSize); 3802 3803 // Do the runtime call to execute the regexp. 3804 __ bind(&runtime); 3805 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 3806 3807 // Deferred code for string handling. 3808 // (7) Not a long external string? If yes, go to (10). 3809 __ bind(¬_seq_nor_cons); 3810 // Compare flags are still set from (3). 3811 __ j(greater, ¬_long_external, Label::kNear); // Go to (10). 3812 3813 // (8) External string. Short external strings have been ruled out. 3814 __ bind(&external_string); 3815 // Reload instance type. 3816 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 3817 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); 3818 if (FLAG_debug_code) { 3819 // Assert that we do not have a cons or slice (indirect strings) here. 3820 // Sequential strings have already been ruled out. 3821 __ test_b(ebx, kIsIndirectStringMask); 3822 __ Assert(zero, kExternalStringExpectedButNotFound); 3823 } 3824 __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset)); 3825 // Move the pointer so that offset-wise, it looks like a sequential string. 3826 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 3827 __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 3828 STATIC_ASSERT(kTwoByteStringTag == 0); 3829 // (8a) Is the external string one byte? If yes, go to (6). 3830 __ test_b(ebx, kStringEncodingMask); 3831 __ j(not_zero, &seq_one_byte_string); // Goto (6). 3832 3833 // eax: sequential subject string (or look-alike, external string) 3834 // edx: original subject string 3835 // ecx: RegExp data (FixedArray) 3836 // (9) Two byte sequential. Load regexp code for one byte. Go to (E). 3837 __ bind(&seq_two_byte_string); 3838 // Load previous index and check range before edx is overwritten. We have 3839 // to use edx instead of eax here because it might have been only made to 3840 // look like a sequential string when it actually is an external string. 3841 __ mov(ebx, Operand(esp, kPreviousIndexOffset)); 3842 __ JumpIfNotSmi(ebx, &runtime); 3843 __ cmp(ebx, FieldOperand(edx, String::kLengthOffset)); 3844 __ j(above_equal, &runtime); 3845 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); 3846 __ Set(ecx, Immediate(0)); // Type is two byte. 3847 __ jmp(&check_code); // Go to (E). 3848 3849 // (10) Not a string or a short external string? If yes, bail out to runtime. 3850 __ bind(¬_long_external); 3851 // Catch non-string subject or short external string. 3852 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); 3853 __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag)); 3854 __ j(not_zero, &runtime); 3855 3856 // (11) Sliced string. Replace subject with parent. Go to (5a). 3857 // Load offset into edi and replace subject string with parent. 3858 __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset)); 3859 __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset)); 3860 __ jmp(&check_underlying); // Go to (5a). 3861 #endif // V8_INTERPRETED_REGEXP 3862 } 3863 3864 3865 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { 3866 const int kMaxInlineLength = 100; 3867 Label slowcase; 3868 Label done; 3869 __ mov(ebx, Operand(esp, kPointerSize * 3)); 3870 __ JumpIfNotSmi(ebx, &slowcase); 3871 __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength))); 3872 __ j(above, &slowcase); 3873 // Smi-tagging is equivalent to multiplying by 2. 3874 STATIC_ASSERT(kSmiTag == 0); 3875 STATIC_ASSERT(kSmiTagSize == 1); 3876 // Allocate RegExpResult followed by FixedArray with size in ebx. 3877 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] 3878 // Elements: [Map][Length][..elements..] 3879 __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize, 3880 times_pointer_size, 3881 ebx, // In: Number of elements as a smi 3882 REGISTER_VALUE_IS_SMI, 3883 eax, // Out: Start of allocation (tagged). 3884 ecx, // Out: End of allocation. 3885 edx, // Scratch register 3886 &slowcase, 3887 TAG_OBJECT); 3888 // eax: Start of allocated area, object-tagged. 3889 3890 // Set JSArray map to global.regexp_result_map(). 3891 // Set empty properties FixedArray. 3892 // Set elements to point to FixedArray allocated right after the JSArray. 3893 // Interleave operations for better latency. 3894 __ mov(edx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX)); 3895 Factory* factory = masm->isolate()->factory(); 3896 __ mov(ecx, Immediate(factory->empty_fixed_array())); 3897 __ lea(ebx, Operand(eax, JSRegExpResult::kSize)); 3898 __ mov(edx, FieldOperand(edx, GlobalObject::kNativeContextOffset)); 3899 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); 3900 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx); 3901 __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX)); 3902 __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx); 3903 3904 // Set input, index and length fields from arguments. 3905 __ mov(ecx, Operand(esp, kPointerSize * 1)); 3906 __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx); 3907 __ mov(ecx, Operand(esp, kPointerSize * 2)); 3908 __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx); 3909 __ mov(ecx, Operand(esp, kPointerSize * 3)); 3910 __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx); 3911 3912 // Fill out the elements FixedArray. 3913 // eax: JSArray. 3914 // ebx: FixedArray. 3915 // ecx: Number of elements in array, as smi. 3916 3917 // Set map. 3918 __ mov(FieldOperand(ebx, HeapObject::kMapOffset), 3919 Immediate(factory->fixed_array_map())); 3920 // Set length. 3921 __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx); 3922 // Fill contents of fixed-array with undefined. 3923 __ SmiUntag(ecx); 3924 __ mov(edx, Immediate(factory->undefined_value())); 3925 __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize)); 3926 // Fill fixed array elements with undefined. 3927 // eax: JSArray. 3928 // ecx: Number of elements to fill. 3929 // ebx: Start of elements in FixedArray. 3930 // edx: undefined. 3931 Label loop; 3932 __ test(ecx, ecx); 3933 __ bind(&loop); 3934 __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero. 3935 __ sub(ecx, Immediate(1)); 3936 __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx); 3937 __ jmp(&loop); 3938 3939 __ bind(&done); 3940 __ ret(3 * kPointerSize); 3941 3942 __ bind(&slowcase); 3943 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); 3944 } 3945 3946 3947 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, 3948 Register object, 3949 Register result, 3950 Register scratch1, 3951 Register scratch2, 3952 Label* not_found) { 3953 // Use of registers. Register result is used as a temporary. 3954 Register number_string_cache = result; 3955 Register mask = scratch1; 3956 Register scratch = scratch2; 3957 3958 // Load the number string cache. 3959 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); 3960 // Make the hash mask from the length of the number string cache. It 3961 // contains two elements (number and string) for each cache entry. 3962 __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); 3963 __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. 3964 __ sub(mask, Immediate(1)); // Make mask. 3965 3966 // Calculate the entry in the number string cache. The hash value in the 3967 // number string cache for smis is just the smi value, and the hash for 3968 // doubles is the xor of the upper and lower words. See 3969 // Heap::GetNumberStringCache. 3970 Label smi_hash_calculated; 3971 Label load_result_from_cache; 3972 Label not_smi; 3973 STATIC_ASSERT(kSmiTag == 0); 3974 __ JumpIfNotSmi(object, ¬_smi, Label::kNear); 3975 __ mov(scratch, object); 3976 __ SmiUntag(scratch); 3977 __ jmp(&smi_hash_calculated, Label::kNear); 3978 __ bind(¬_smi); 3979 __ cmp(FieldOperand(object, HeapObject::kMapOffset), 3980 masm->isolate()->factory()->heap_number_map()); 3981 __ j(not_equal, not_found); 3982 STATIC_ASSERT(8 == kDoubleSize); 3983 __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); 3984 __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); 3985 // Object is heap number and hash is now in scratch. Calculate cache index. 3986 __ and_(scratch, mask); 3987 Register index = scratch; 3988 Register probe = mask; 3989 __ mov(probe, 3990 FieldOperand(number_string_cache, 3991 index, 3992 times_twice_pointer_size, 3993 FixedArray::kHeaderSize)); 3994 __ JumpIfSmi(probe, not_found); 3995 if (CpuFeatures::IsSupported(SSE2)) { 3996 CpuFeatureScope fscope(masm, SSE2); 3997 __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); 3998 __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); 3999 __ ucomisd(xmm0, xmm1); 4000 } else { 4001 __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); 4002 __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); 4003 __ FCmp(); 4004 } 4005 __ j(parity_even, not_found); // Bail out if NaN is involved. 4006 __ j(not_equal, not_found); // The cache did not contain this value. 4007 __ jmp(&load_result_from_cache, Label::kNear); 4008 4009 __ bind(&smi_hash_calculated); 4010 // Object is smi and hash is now in scratch. Calculate cache index. 4011 __ and_(scratch, mask); 4012 // Check if the entry is the smi we are looking for. 4013 __ cmp(object, 4014 FieldOperand(number_string_cache, 4015 index, 4016 times_twice_pointer_size, 4017 FixedArray::kHeaderSize)); 4018 __ j(not_equal, not_found); 4019 4020 // Get the result from the cache. 4021 __ bind(&load_result_from_cache); 4022 __ mov(result, 4023 FieldOperand(number_string_cache, 4024 index, 4025 times_twice_pointer_size, 4026 FixedArray::kHeaderSize + kPointerSize)); 4027 Counters* counters = masm->isolate()->counters(); 4028 __ IncrementCounter(counters->number_to_string_native(), 1); 4029 } 4030 4031 4032 void NumberToStringStub::Generate(MacroAssembler* masm) { 4033 Label runtime; 4034 4035 __ mov(ebx, Operand(esp, kPointerSize)); 4036 4037 // Generate code to lookup number in the number string cache. 4038 GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime); 4039 __ ret(1 * kPointerSize); 4040 4041 __ bind(&runtime); 4042 // Handle number to string in the runtime system if not found in the cache. 4043 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); 4044 } 4045 4046 4047 static int NegativeComparisonResult(Condition cc) { 4048 ASSERT(cc != equal); 4049 ASSERT((cc == less) || (cc == less_equal) 4050 || (cc == greater) || (cc == greater_equal)); 4051 return (cc == greater || cc == greater_equal) ? LESS : GREATER; 4052 } 4053 4054 4055 static void CheckInputType(MacroAssembler* masm, 4056 Register input, 4057 CompareIC::State expected, 4058 Label* fail) { 4059 Label ok; 4060 if (expected == CompareIC::SMI) { 4061 __ JumpIfNotSmi(input, fail); 4062 } else if (expected == CompareIC::NUMBER) { 4063 __ JumpIfSmi(input, &ok); 4064 __ cmp(FieldOperand(input, HeapObject::kMapOffset), 4065 Immediate(masm->isolate()->factory()->heap_number_map())); 4066 __ j(not_equal, fail); 4067 } 4068 // We could be strict about internalized/non-internalized here, but as long as 4069 // hydrogen doesn't care, the stub doesn't have to care either. 4070 __ bind(&ok); 4071 } 4072 4073 4074 static void BranchIfNotInternalizedString(MacroAssembler* masm, 4075 Label* label, 4076 Register object, 4077 Register scratch) { 4078 __ JumpIfSmi(object, label); 4079 __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); 4080 __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); 4081 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); 4082 __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); 4083 __ j(not_zero, label); 4084 } 4085 4086 4087 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { 4088 Label check_unequal_objects; 4089 Condition cc = GetCondition(); 4090 4091 Label miss; 4092 CheckInputType(masm, edx, left_, &miss); 4093 CheckInputType(masm, eax, right_, &miss); 4094 4095 // Compare two smis. 4096 Label non_smi, smi_done; 4097 __ mov(ecx, edx); 4098 __ or_(ecx, eax); 4099 __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); 4100 __ sub(edx, eax); // Return on the result of the subtraction. 4101 __ j(no_overflow, &smi_done, Label::kNear); 4102 __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. 4103 __ bind(&smi_done); 4104 __ mov(eax, edx); 4105 __ ret(0); 4106 __ bind(&non_smi); 4107 4108 // NOTICE! This code is only reached after a smi-fast-case check, so 4109 // it is certain that at least one operand isn't a smi. 4110 4111 // Identical objects can be compared fast, but there are some tricky cases 4112 // for NaN and undefined. 4113 Label generic_heap_number_comparison; 4114 { 4115 Label not_identical; 4116 __ cmp(eax, edx); 4117 __ j(not_equal, ¬_identical); 4118 4119 if (cc != equal) { 4120 // Check for undefined. undefined OP undefined is false even though 4121 // undefined == undefined. 4122 Label check_for_nan; 4123 __ cmp(edx, masm->isolate()->factory()->undefined_value()); 4124 __ j(not_equal, &check_for_nan, Label::kNear); 4125 __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); 4126 __ ret(0); 4127 __ bind(&check_for_nan); 4128 } 4129 4130 // Test for NaN. Compare heap numbers in a general way, 4131 // to hanlde NaNs correctly. 4132 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), 4133 Immediate(masm->isolate()->factory()->heap_number_map())); 4134 __ j(equal, &generic_heap_number_comparison, Label::kNear); 4135 if (cc != equal) { 4136 // Call runtime on identical JSObjects. Otherwise return equal. 4137 __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); 4138 __ j(above_equal, ¬_identical); 4139 } 4140 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 4141 __ ret(0); 4142 4143 4144 __ bind(¬_identical); 4145 } 4146 4147 // Strict equality can quickly decide whether objects are equal. 4148 // Non-strict object equality is slower, so it is handled later in the stub. 4149 if (cc == equal && strict()) { 4150 Label slow; // Fallthrough label. 4151 Label not_smis; 4152 // If we're doing a strict equality comparison, we don't have to do 4153 // type conversion, so we generate code to do fast comparison for objects 4154 // and oddballs. Non-smi numbers and strings still go through the usual 4155 // slow-case code. 4156 // If either is a Smi (we know that not both are), then they can only 4157 // be equal if the other is a HeapNumber. If so, use the slow case. 4158 STATIC_ASSERT(kSmiTag == 0); 4159 ASSERT_EQ(0, Smi::FromInt(0)); 4160 __ mov(ecx, Immediate(kSmiTagMask)); 4161 __ and_(ecx, eax); 4162 __ test(ecx, edx); 4163 __ j(not_zero, ¬_smis, Label::kNear); 4164 // One operand is a smi. 4165 4166 // Check whether the non-smi is a heap number. 4167 STATIC_ASSERT(kSmiTagMask == 1); 4168 // ecx still holds eax & kSmiTag, which is either zero or one. 4169 __ sub(ecx, Immediate(0x01)); 4170 __ mov(ebx, edx); 4171 __ xor_(ebx, eax); 4172 __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx. 4173 __ xor_(ebx, eax); 4174 // if eax was smi, ebx is now edx, else eax. 4175 4176 // Check if the non-smi operand is a heap number. 4177 __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), 4178 Immediate(masm->isolate()->factory()->heap_number_map())); 4179 // If heap number, handle it in the slow case. 4180 __ j(equal, &slow, Label::kNear); 4181 // Return non-equal (ebx is not zero) 4182 __ mov(eax, ebx); 4183 __ ret(0); 4184 4185 __ bind(¬_smis); 4186 // If either operand is a JSObject or an oddball value, then they are not 4187 // equal since their pointers are different 4188 // There is no test for undetectability in strict equality. 4189 4190 // Get the type of the first operand. 4191 // If the first object is a JS object, we have done pointer comparison. 4192 Label first_non_object; 4193 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); 4194 __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); 4195 __ j(below, &first_non_object, Label::kNear); 4196 4197 // Return non-zero (eax is not zero) 4198 Label return_not_equal; 4199 STATIC_ASSERT(kHeapObjectTag != 0); 4200 __ bind(&return_not_equal); 4201 __ ret(0); 4202 4203 __ bind(&first_non_object); 4204 // Check for oddballs: true, false, null, undefined. 4205 __ CmpInstanceType(ecx, ODDBALL_TYPE); 4206 __ j(equal, &return_not_equal); 4207 4208 __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx); 4209 __ j(above_equal, &return_not_equal); 4210 4211 // Check for oddballs: true, false, null, undefined. 4212 __ CmpInstanceType(ecx, ODDBALL_TYPE); 4213 __ j(equal, &return_not_equal); 4214 4215 // Fall through to the general case. 4216 __ bind(&slow); 4217 } 4218 4219 // Generate the number comparison code. 4220 Label non_number_comparison; 4221 Label unordered; 4222 __ bind(&generic_heap_number_comparison); 4223 if (CpuFeatures::IsSupported(SSE2)) { 4224 CpuFeatureScope use_sse2(masm, SSE2); 4225 CpuFeatureScope use_cmov(masm, CMOV); 4226 4227 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); 4228 __ ucomisd(xmm0, xmm1); 4229 4230 // Don't base result on EFLAGS when a NaN is involved. 4231 __ j(parity_even, &unordered, Label::kNear); 4232 // Return a result of -1, 0, or 1, based on EFLAGS. 4233 __ mov(eax, 0); // equal 4234 __ mov(ecx, Immediate(Smi::FromInt(1))); 4235 __ cmov(above, eax, ecx); 4236 __ mov(ecx, Immediate(Smi::FromInt(-1))); 4237 __ cmov(below, eax, ecx); 4238 __ ret(0); 4239 } else { 4240 FloatingPointHelper::CheckFloatOperands( 4241 masm, &non_number_comparison, ebx); 4242 FloatingPointHelper::LoadFloatOperand(masm, eax); 4243 FloatingPointHelper::LoadFloatOperand(masm, edx); 4244 __ FCmp(); 4245 4246 // Don't base result on EFLAGS when a NaN is involved. 4247 __ j(parity_even, &unordered, Label::kNear); 4248 4249 Label below_label, above_label; 4250 // Return a result of -1, 0, or 1, based on EFLAGS. 4251 __ j(below, &below_label, Label::kNear); 4252 __ j(above, &above_label, Label::kNear); 4253 4254 __ Set(eax, Immediate(0)); 4255 __ ret(0); 4256 4257 __ bind(&below_label); 4258 __ mov(eax, Immediate(Smi::FromInt(-1))); 4259 __ ret(0); 4260 4261 __ bind(&above_label); 4262 __ mov(eax, Immediate(Smi::FromInt(1))); 4263 __ ret(0); 4264 } 4265 4266 // If one of the numbers was NaN, then the result is always false. 4267 // The cc is never not-equal. 4268 __ bind(&unordered); 4269 ASSERT(cc != not_equal); 4270 if (cc == less || cc == less_equal) { 4271 __ mov(eax, Immediate(Smi::FromInt(1))); 4272 } else { 4273 __ mov(eax, Immediate(Smi::FromInt(-1))); 4274 } 4275 __ ret(0); 4276 4277 // The number comparison code did not provide a valid result. 4278 __ bind(&non_number_comparison); 4279 4280 // Fast negative check for internalized-to-internalized equality. 4281 Label check_for_strings; 4282 if (cc == equal) { 4283 BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx); 4284 BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx); 4285 4286 // We've already checked for object identity, so if both operands 4287 // are internalized they aren't equal. Register eax already holds a 4288 // non-zero value, which indicates not equal, so just return. 4289 __ ret(0); 4290 } 4291 4292 __ bind(&check_for_strings); 4293 4294 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, 4295 &check_unequal_objects); 4296 4297 // Inline comparison of ASCII strings. 4298 if (cc == equal) { 4299 StringCompareStub::GenerateFlatAsciiStringEquals(masm, 4300 edx, 4301 eax, 4302 ecx, 4303 ebx); 4304 } else { 4305 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, 4306 edx, 4307 eax, 4308 ecx, 4309 ebx, 4310 edi); 4311 } 4312 #ifdef DEBUG 4313 __ Abort(kUnexpectedFallThroughFromStringComparison); 4314 #endif 4315 4316 __ bind(&check_unequal_objects); 4317 if (cc == equal && !strict()) { 4318 // Non-strict equality. Objects are unequal if 4319 // they are both JSObjects and not undetectable, 4320 // and their pointers are different. 4321 Label not_both_objects; 4322 Label return_unequal; 4323 // At most one is a smi, so we can test for smi by adding the two. 4324 // A smi plus a heap object has the low bit set, a heap object plus 4325 // a heap object has the low bit clear. 4326 STATIC_ASSERT(kSmiTag == 0); 4327 STATIC_ASSERT(kSmiTagMask == 1); 4328 __ lea(ecx, Operand(eax, edx, times_1, 0)); 4329 __ test(ecx, Immediate(kSmiTagMask)); 4330 __ j(not_zero, ¬_both_objects, Label::kNear); 4331 __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); 4332 __ j(below, ¬_both_objects, Label::kNear); 4333 __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx); 4334 __ j(below, ¬_both_objects, Label::kNear); 4335 // We do not bail out after this point. Both are JSObjects, and 4336 // they are equal if and only if both are undetectable. 4337 // The and of the undetectable flags is 1 if and only if they are equal. 4338 __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 4339 1 << Map::kIsUndetectable); 4340 __ j(zero, &return_unequal, Label::kNear); 4341 __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), 4342 1 << Map::kIsUndetectable); 4343 __ j(zero, &return_unequal, Label::kNear); 4344 // The objects are both undetectable, so they both compare as the value 4345 // undefined, and are equal. 4346 __ Set(eax, Immediate(EQUAL)); 4347 __ bind(&return_unequal); 4348 // Return non-equal by returning the non-zero object pointer in eax, 4349 // or return equal if we fell through to here. 4350 __ ret(0); // rax, rdx were pushed 4351 __ bind(¬_both_objects); 4352 } 4353 4354 // Push arguments below the return address. 4355 __ pop(ecx); 4356 __ push(edx); 4357 __ push(eax); 4358 4359 // Figure out which native to call and setup the arguments. 4360 Builtins::JavaScript builtin; 4361 if (cc == equal) { 4362 builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; 4363 } else { 4364 builtin = Builtins::COMPARE; 4365 __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); 4366 } 4367 4368 // Restore return address on the stack. 4369 __ push(ecx); 4370 4371 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 4372 // tagged as a small integer. 4373 __ InvokeBuiltin(builtin, JUMP_FUNCTION); 4374 4375 __ bind(&miss); 4376 GenerateMiss(masm); 4377 } 4378 4379 4380 void StackCheckStub::Generate(MacroAssembler* masm) { 4381 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); 4382 } 4383 4384 4385 void InterruptStub::Generate(MacroAssembler* masm) { 4386 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); 4387 } 4388 4389 4390 static void GenerateRecordCallTarget(MacroAssembler* masm) { 4391 // Cache the called function in a global property cell. Cache states 4392 // are uninitialized, monomorphic (indicated by a JSFunction), and 4393 // megamorphic. 4394 // ebx : cache cell for call target 4395 // edi : the function to call 4396 Isolate* isolate = masm->isolate(); 4397 Label initialize, done, miss, megamorphic, not_array_function; 4398 4399 // Load the cache state into ecx. 4400 __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset)); 4401 4402 // A monomorphic cache hit or an already megamorphic state: invoke the 4403 // function without changing the state. 4404 __ cmp(ecx, edi); 4405 __ j(equal, &done); 4406 __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate))); 4407 __ j(equal, &done); 4408 4409 // If we came here, we need to see if we are the array function. 4410 // If we didn't have a matching function, and we didn't find the megamorph 4411 // sentinel, then we have in the cell either some other function or an 4412 // AllocationSite. Do a map check on the object in ecx. 4413 Handle<Map> allocation_site_map( 4414 masm->isolate()->heap()->allocation_site_map(), 4415 masm->isolate()); 4416 __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map)); 4417 __ j(not_equal, &miss); 4418 4419 // Load the global or builtins object from the current context 4420 __ LoadGlobalContext(ecx); 4421 // Make sure the function is the Array() function 4422 __ cmp(edi, Operand(ecx, 4423 Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); 4424 __ j(not_equal, &megamorphic); 4425 __ jmp(&done); 4426 4427 __ bind(&miss); 4428 4429 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 4430 // megamorphic. 4431 __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate))); 4432 __ j(equal, &initialize); 4433 // MegamorphicSentinel is an immortal immovable object (undefined) so no 4434 // write-barrier is needed. 4435 __ bind(&megamorphic); 4436 __ mov(FieldOperand(ebx, Cell::kValueOffset), 4437 Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate))); 4438 __ jmp(&done, Label::kNear); 4439 4440 // An uninitialized cache is patched with the function or sentinel to 4441 // indicate the ElementsKind if function is the Array constructor. 4442 __ bind(&initialize); 4443 __ LoadGlobalContext(ecx); 4444 // Make sure the function is the Array() function 4445 __ cmp(edi, Operand(ecx, 4446 Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); 4447 __ j(not_equal, ¬_array_function); 4448 4449 // The target function is the Array constructor, 4450 // Create an AllocationSite if we don't already have it, store it in the cell 4451 { 4452 FrameScope scope(masm, StackFrame::INTERNAL); 4453 4454 __ SmiTag(eax); 4455 __ push(eax); 4456 __ push(edi); 4457 __ push(ebx); 4458 4459 CreateAllocationSiteStub create_stub; 4460 __ CallStub(&create_stub); 4461 4462 __ pop(ebx); 4463 __ pop(edi); 4464 __ pop(eax); 4465 __ SmiUntag(eax); 4466 } 4467 __ jmp(&done); 4468 4469 __ bind(¬_array_function); 4470 __ mov(FieldOperand(ebx, Cell::kValueOffset), edi); 4471 // No need for a write barrier here - cells are rescanned. 4472 4473 __ bind(&done); 4474 } 4475 4476 4477 void CallFunctionStub::Generate(MacroAssembler* masm) { 4478 // ebx : cache cell for call target 4479 // edi : the function to call 4480 Isolate* isolate = masm->isolate(); 4481 Label slow, non_function; 4482 4483 // The receiver might implicitly be the global object. This is 4484 // indicated by passing the hole as the receiver to the call 4485 // function stub. 4486 if (ReceiverMightBeImplicit()) { 4487 Label receiver_ok; 4488 // Get the receiver from the stack. 4489 // +1 ~ return address 4490 __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); 4491 // Call as function is indicated with the hole. 4492 __ cmp(eax, isolate->factory()->the_hole_value()); 4493 __ j(not_equal, &receiver_ok, Label::kNear); 4494 // Patch the receiver on the stack with the global receiver object. 4495 __ mov(ecx, GlobalObjectOperand()); 4496 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset)); 4497 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx); 4498 __ bind(&receiver_ok); 4499 } 4500 4501 // Check that the function really is a JavaScript function. 4502 __ JumpIfSmi(edi, &non_function); 4503 // Goto slow case if we do not have a function. 4504 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); 4505 __ j(not_equal, &slow); 4506 4507 if (RecordCallTarget()) { 4508 GenerateRecordCallTarget(masm); 4509 } 4510 4511 // Fast-case: Just invoke the function. 4512 ParameterCount actual(argc_); 4513 4514 if (ReceiverMightBeImplicit()) { 4515 Label call_as_function; 4516 __ cmp(eax, isolate->factory()->the_hole_value()); 4517 __ j(equal, &call_as_function); 4518 __ InvokeFunction(edi, 4519 actual, 4520 JUMP_FUNCTION, 4521 NullCallWrapper(), 4522 CALL_AS_METHOD); 4523 __ bind(&call_as_function); 4524 } 4525 __ InvokeFunction(edi, 4526 actual, 4527 JUMP_FUNCTION, 4528 NullCallWrapper(), 4529 CALL_AS_FUNCTION); 4530 4531 // Slow-case: Non-function called. 4532 __ bind(&slow); 4533 if (RecordCallTarget()) { 4534 // If there is a call target cache, mark it megamorphic in the 4535 // non-function case. MegamorphicSentinel is an immortal immovable 4536 // object (undefined) so no write barrier is needed. 4537 __ mov(FieldOperand(ebx, Cell::kValueOffset), 4538 Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate))); 4539 } 4540 // Check for function proxy. 4541 __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); 4542 __ j(not_equal, &non_function); 4543 __ pop(ecx); 4544 __ push(edi); // put proxy as additional argument under return address 4545 __ push(ecx); 4546 __ Set(eax, Immediate(argc_ + 1)); 4547 __ Set(ebx, Immediate(0)); 4548 __ SetCallKind(ecx, CALL_AS_FUNCTION); 4549 __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); 4550 { 4551 Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); 4552 __ jmp(adaptor, RelocInfo::CODE_TARGET); 4553 } 4554 4555 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 4556 // of the original receiver from the call site). 4557 __ bind(&non_function); 4558 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi); 4559 __ Set(eax, Immediate(argc_)); 4560 __ Set(ebx, Immediate(0)); 4561 __ SetCallKind(ecx, CALL_AS_METHOD); 4562 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); 4563 Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); 4564 __ jmp(adaptor, RelocInfo::CODE_TARGET); 4565 } 4566 4567 4568 void CallConstructStub::Generate(MacroAssembler* masm) { 4569 // eax : number of arguments 4570 // ebx : cache cell for call target 4571 // edi : constructor function 4572 Label slow, non_function_call; 4573 4574 // Check that function is not a smi. 4575 __ JumpIfSmi(edi, &non_function_call); 4576 // Check that function is a JSFunction. 4577 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); 4578 __ j(not_equal, &slow); 4579 4580 if (RecordCallTarget()) { 4581 GenerateRecordCallTarget(masm); 4582 } 4583 4584 // Jump to the function-specific construct stub. 4585 Register jmp_reg = ecx; 4586 __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); 4587 __ mov(jmp_reg, FieldOperand(jmp_reg, 4588 SharedFunctionInfo::kConstructStubOffset)); 4589 __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); 4590 __ jmp(jmp_reg); 4591 4592 // edi: called object 4593 // eax: number of arguments 4594 // ecx: object map 4595 Label do_call; 4596 __ bind(&slow); 4597 __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); 4598 __ j(not_equal, &non_function_call); 4599 __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); 4600 __ jmp(&do_call); 4601 4602 __ bind(&non_function_call); 4603 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); 4604 __ bind(&do_call); 4605 // Set expected number of arguments to zero (not changing eax). 4606 __ Set(ebx, Immediate(0)); 4607 Handle<Code> arguments_adaptor = 4608 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); 4609 __ SetCallKind(ecx, CALL_AS_METHOD); 4610 __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET); 4611 } 4612 4613 4614 bool CEntryStub::NeedsImmovableCode() { 4615 return false; 4616 } 4617 4618 4619 bool CEntryStub::IsPregenerated() { 4620 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && 4621 result_size_ == 1; 4622 } 4623 4624 4625 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 4626 CEntryStub::GenerateAheadOfTime(isolate); 4627 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 4628 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 4629 // It is important that the store buffer overflow stubs are generated first. 4630 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 4631 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 4632 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 4633 } 4634 4635 4636 void CodeStub::GenerateFPStubs(Isolate* isolate) { 4637 if (CpuFeatures::IsSupported(SSE2)) { 4638 CEntryStub save_doubles(1, kSaveFPRegs); 4639 // Stubs might already be in the snapshot, detect that and don't regenerate, 4640 // which would lead to code stub initialization state being messed up. 4641 Code* save_doubles_code; 4642 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { 4643 save_doubles_code = *(save_doubles.GetCode(isolate)); 4644 } 4645 save_doubles_code->set_is_pregenerated(true); 4646 isolate->set_fp_stubs_generated(true); 4647 } 4648 } 4649 4650 4651 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { 4652 CEntryStub stub(1, kDontSaveFPRegs); 4653 Handle<Code> code = stub.GetCode(isolate); 4654 code->set_is_pregenerated(true); 4655 } 4656 4657 4658 static void JumpIfOOM(MacroAssembler* masm, 4659 Register value, 4660 Register scratch, 4661 Label* oom_label) { 4662 __ mov(scratch, value); 4663 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); 4664 STATIC_ASSERT(kFailureTag == 3); 4665 __ and_(scratch, 0xf); 4666 __ cmp(scratch, 0xf); 4667 __ j(equal, oom_label); 4668 } 4669 4670 4671 void CEntryStub::GenerateCore(MacroAssembler* masm, 4672 Label* throw_normal_exception, 4673 Label* throw_termination_exception, 4674 Label* throw_out_of_memory_exception, 4675 bool do_gc, 4676 bool always_allocate_scope) { 4677 // eax: result parameter for PerformGC, if any 4678 // ebx: pointer to C function (C callee-saved) 4679 // ebp: frame pointer (restored after C call) 4680 // esp: stack pointer (restored after C call) 4681 // edi: number of arguments including receiver (C callee-saved) 4682 // esi: pointer to the first argument (C callee-saved) 4683 4684 // Result returned in eax, or eax+edx if result_size_ is 2. 4685 4686 // Check stack alignment. 4687 if (FLAG_debug_code) { 4688 __ CheckStackAlignment(); 4689 } 4690 4691 if (do_gc) { 4692 // Pass failure code returned from last attempt as first argument to 4693 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the 4694 // stack alignment is known to be correct. This function takes one argument 4695 // which is passed on the stack, and we know that the stack has been 4696 // prepared to pass at least one argument. 4697 __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. 4698 __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); 4699 } 4700 4701 ExternalReference scope_depth = 4702 ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); 4703 if (always_allocate_scope) { 4704 __ inc(Operand::StaticVariable(scope_depth)); 4705 } 4706 4707 // Call C function. 4708 __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. 4709 __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. 4710 __ mov(Operand(esp, 2 * kPointerSize), 4711 Immediate(ExternalReference::isolate_address(masm->isolate()))); 4712 __ call(ebx); 4713 // Result is in eax or edx:eax - do not destroy these registers! 4714 4715 if (always_allocate_scope) { 4716 __ dec(Operand::StaticVariable(scope_depth)); 4717 } 4718 4719 // Runtime functions should not return 'the hole'. Allowing it to escape may 4720 // lead to crashes in the IC code later. 4721 if (FLAG_debug_code) { 4722 Label okay; 4723 __ cmp(eax, masm->isolate()->factory()->the_hole_value()); 4724 __ j(not_equal, &okay, Label::kNear); 4725 __ int3(); 4726 __ bind(&okay); 4727 } 4728 4729 // Check for failure result. 4730 Label failure_returned; 4731 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); 4732 __ lea(ecx, Operand(eax, 1)); 4733 // Lower 2 bits of ecx are 0 iff eax has failure tag. 4734 __ test(ecx, Immediate(kFailureTagMask)); 4735 __ j(zero, &failure_returned); 4736 4737 ExternalReference pending_exception_address( 4738 Isolate::kPendingExceptionAddress, masm->isolate()); 4739 4740 // Check that there is no pending exception, otherwise we 4741 // should have returned some failure value. 4742 if (FLAG_debug_code) { 4743 __ push(edx); 4744 __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); 4745 Label okay; 4746 __ cmp(edx, Operand::StaticVariable(pending_exception_address)); 4747 // Cannot use check here as it attempts to generate call into runtime. 4748 __ j(equal, &okay, Label::kNear); 4749 __ int3(); 4750 __ bind(&okay); 4751 __ pop(edx); 4752 } 4753 4754 // Exit the JavaScript to C++ exit frame. 4755 __ LeaveExitFrame(save_doubles_ == kSaveFPRegs); 4756 __ ret(0); 4757 4758 // Handling of failure. 4759 __ bind(&failure_returned); 4760 4761 Label retry; 4762 // If the returned exception is RETRY_AFTER_GC continue at retry label 4763 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); 4764 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); 4765 __ j(zero, &retry, Label::kNear); 4766 4767 // Special handling of out of memory exceptions. 4768 JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception); 4769 4770 // Retrieve the pending exception. 4771 __ mov(eax, Operand::StaticVariable(pending_exception_address)); 4772 4773 // See if we just retrieved an OOM exception. 4774 JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception); 4775 4776 // Clear the pending exception. 4777 __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); 4778 __ mov(Operand::StaticVariable(pending_exception_address), edx); 4779 4780 // Special handling of termination exceptions which are uncatchable 4781 // by javascript code. 4782 __ cmp(eax, masm->isolate()->factory()->termination_exception()); 4783 __ j(equal, throw_termination_exception); 4784 4785 // Handle normal exception. 4786 __ jmp(throw_normal_exception); 4787 4788 // Retry. 4789 __ bind(&retry); 4790 } 4791 4792 4793 void CEntryStub::Generate(MacroAssembler* masm) { 4794 // eax: number of arguments including receiver 4795 // ebx: pointer to C function (C callee-saved) 4796 // ebp: frame pointer (restored after C call) 4797 // esp: stack pointer (restored after C call) 4798 // esi: current context (C callee-saved) 4799 // edi: JS function of the caller (C callee-saved) 4800 4801 ProfileEntryHookStub::MaybeCallEntryHook(masm); 4802 4803 // NOTE: Invocations of builtins may return failure objects instead 4804 // of a proper result. The builtin entry handles this by performing 4805 // a garbage collection and retrying the builtin (twice). 4806 4807 // Enter the exit frame that transitions from JavaScript to C++. 4808 __ EnterExitFrame(save_doubles_ == kSaveFPRegs); 4809 4810 // eax: result parameter for PerformGC, if any (setup below) 4811 // ebx: pointer to builtin function (C callee-saved) 4812 // ebp: frame pointer (restored after C call) 4813 // esp: stack pointer (restored after C call) 4814 // edi: number of arguments including receiver (C callee-saved) 4815 // esi: argv pointer (C callee-saved) 4816 4817 Label throw_normal_exception; 4818 Label throw_termination_exception; 4819 Label throw_out_of_memory_exception; 4820 4821 // Call into the runtime system. 4822 GenerateCore(masm, 4823 &throw_normal_exception, 4824 &throw_termination_exception, 4825 &throw_out_of_memory_exception, 4826 false, 4827 false); 4828 4829 // Do space-specific GC and retry runtime call. 4830 GenerateCore(masm, 4831 &throw_normal_exception, 4832 &throw_termination_exception, 4833 &throw_out_of_memory_exception, 4834 true, 4835 false); 4836 4837 // Do full GC and retry runtime call one final time. 4838 Failure* failure = Failure::InternalError(); 4839 __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure))); 4840 GenerateCore(masm, 4841 &throw_normal_exception, 4842 &throw_termination_exception, 4843 &throw_out_of_memory_exception, 4844 true, 4845 true); 4846 4847 __ bind(&throw_out_of_memory_exception); 4848 // Set external caught exception to false. 4849 Isolate* isolate = masm->isolate(); 4850 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, 4851 isolate); 4852 __ mov(Operand::StaticVariable(external_caught), Immediate(false)); 4853 4854 // Set pending exception and eax to out of memory exception. 4855 ExternalReference pending_exception(Isolate::kPendingExceptionAddress, 4856 isolate); 4857 Label already_have_failure; 4858 JumpIfOOM(masm, eax, ecx, &already_have_failure); 4859 __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1))); 4860 __ bind(&already_have_failure); 4861 __ mov(Operand::StaticVariable(pending_exception), eax); 4862 // Fall through to the next label. 4863 4864 __ bind(&throw_termination_exception); 4865 __ ThrowUncatchable(eax); 4866 4867 __ bind(&throw_normal_exception); 4868 __ Throw(eax); 4869 } 4870 4871 4872 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { 4873 Label invoke, handler_entry, exit; 4874 Label not_outermost_js, not_outermost_js_2; 4875 4876 ProfileEntryHookStub::MaybeCallEntryHook(masm); 4877 4878 // Set up frame. 4879 __ push(ebp); 4880 __ mov(ebp, esp); 4881 4882 // Push marker in two places. 4883 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; 4884 __ push(Immediate(Smi::FromInt(marker))); // context slot 4885 __ push(Immediate(Smi::FromInt(marker))); // function slot 4886 // Save callee-saved registers (C calling conventions). 4887 __ push(edi); 4888 __ push(esi); 4889 __ push(ebx); 4890 4891 // Save copies of the top frame descriptor on the stack. 4892 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate()); 4893 __ push(Operand::StaticVariable(c_entry_fp)); 4894 4895 // If this is the outermost JS call, set js_entry_sp value. 4896 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, 4897 masm->isolate()); 4898 __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); 4899 __ j(not_equal, ¬_outermost_js, Label::kNear); 4900 __ mov(Operand::StaticVariable(js_entry_sp), ebp); 4901 __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); 4902 __ jmp(&invoke, Label::kNear); 4903 __ bind(¬_outermost_js); 4904 __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); 4905 4906 // Jump to a faked try block that does the invoke, with a faked catch 4907 // block that sets the pending exception. 4908 __ jmp(&invoke); 4909 __ bind(&handler_entry); 4910 handler_offset_ = handler_entry.pos(); 4911 // Caught exception: Store result (exception) in the pending exception 4912 // field in the JSEnv and return a failure sentinel. 4913 ExternalReference pending_exception(Isolate::kPendingExceptionAddress, 4914 masm->isolate()); 4915 __ mov(Operand::StaticVariable(pending_exception), eax); 4916 __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception())); 4917 __ jmp(&exit); 4918 4919 // Invoke: Link this frame into the handler chain. There's only one 4920 // handler block in this code object, so its index is 0. 4921 __ bind(&invoke); 4922 __ PushTryHandler(StackHandler::JS_ENTRY, 0); 4923 4924 // Clear any pending exceptions. 4925 __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); 4926 __ mov(Operand::StaticVariable(pending_exception), edx); 4927 4928 // Fake a receiver (NULL). 4929 __ push(Immediate(0)); // receiver 4930 4931 // Invoke the function by calling through JS entry trampoline builtin and 4932 // pop the faked function when we return. Notice that we cannot store a 4933 // reference to the trampoline code directly in this stub, because the 4934 // builtin stubs may not have been generated yet. 4935 if (is_construct) { 4936 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, 4937 masm->isolate()); 4938 __ mov(edx, Immediate(construct_entry)); 4939 } else { 4940 ExternalReference entry(Builtins::kJSEntryTrampoline, 4941 masm->isolate()); 4942 __ mov(edx, Immediate(entry)); 4943 } 4944 __ mov(edx, Operand(edx, 0)); // deref address 4945 __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); 4946 __ call(edx); 4947 4948 // Unlink this frame from the handler chain. 4949 __ PopTryHandler(); 4950 4951 __ bind(&exit); 4952 // Check if the current stack frame is marked as the outermost JS frame. 4953 __ pop(ebx); 4954 __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); 4955 __ j(not_equal, ¬_outermost_js_2); 4956 __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); 4957 __ bind(¬_outermost_js_2); 4958 4959 // Restore the top frame descriptor from the stack. 4960 __ pop(Operand::StaticVariable(ExternalReference( 4961 Isolate::kCEntryFPAddress, 4962 masm->isolate()))); 4963 4964 // Restore callee-saved registers (C calling conventions). 4965 __ pop(ebx); 4966 __ pop(esi); 4967 __ pop(edi); 4968 __ add(esp, Immediate(2 * kPointerSize)); // remove markers 4969 4970 // Restore frame pointer and return. 4971 __ pop(ebp); 4972 __ ret(0); 4973 } 4974 4975 4976 // Generate stub code for instanceof. 4977 // This code can patch a call site inlined cache of the instance of check, 4978 // which looks like this. 4979 // 4980 // 81 ff XX XX XX XX cmp edi, <the hole, patched to a map> 4981 // 75 0a jne <some near label> 4982 // b8 XX XX XX XX mov eax, <the hole, patched to either true or false> 4983 // 4984 // If call site patching is requested the stack will have the delta from the 4985 // return address to the cmp instruction just below the return address. This 4986 // also means that call site patching can only take place with arguments in 4987 // registers. TOS looks like this when call site patching is requested 4988 // 4989 // esp[0] : return address 4990 // esp[4] : delta from return address to cmp instruction 4991 // 4992 void InstanceofStub::Generate(MacroAssembler* masm) { 4993 // Call site inlining and patching implies arguments in registers. 4994 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); 4995 4996 // Fixed register usage throughout the stub. 4997 Register object = eax; // Object (lhs). 4998 Register map = ebx; // Map of the object. 4999 Register function = edx; // Function (rhs). 5000 Register prototype = edi; // Prototype of the function. 5001 Register scratch = ecx; 5002 5003 // Constants describing the call site code to patch. 5004 static const int kDeltaToCmpImmediate = 2; 5005 static const int kDeltaToMov = 8; 5006 static const int kDeltaToMovImmediate = 9; 5007 static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b); 5008 static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d); 5009 static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8); 5010 5011 ASSERT_EQ(object.code(), InstanceofStub::left().code()); 5012 ASSERT_EQ(function.code(), InstanceofStub::right().code()); 5013 5014 // Get the object and function - they are always both needed. 5015 Label slow, not_js_object; 5016 if (!HasArgsInRegisters()) { 5017 __ mov(object, Operand(esp, 2 * kPointerSize)); 5018 __ mov(function, Operand(esp, 1 * kPointerSize)); 5019 } 5020 5021 // Check that the left hand is a JS object. 5022 __ JumpIfSmi(object, ¬_js_object); 5023 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); 5024 5025 // If there is a call site cache don't look in the global cache, but do the 5026 // real lookup and update the call site cache. 5027 if (!HasCallSiteInlineCheck()) { 5028 // Look up the function and the map in the instanceof cache. 5029 Label miss; 5030 __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex); 5031 __ j(not_equal, &miss, Label::kNear); 5032 __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex); 5033 __ j(not_equal, &miss, Label::kNear); 5034 __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex); 5035 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 5036 __ bind(&miss); 5037 } 5038 5039 // Get the prototype of the function. 5040 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); 5041 5042 // Check that the function prototype is a JS object. 5043 __ JumpIfSmi(prototype, &slow); 5044 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); 5045 5046 // Update the global instanceof or call site inlined cache with the current 5047 // map and function. The cached answer will be set when it is known below. 5048 if (!HasCallSiteInlineCheck()) { 5049 __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex); 5050 __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex); 5051 } else { 5052 // The constants for the code patching are based on no push instructions 5053 // at the call site. 5054 ASSERT(HasArgsInRegisters()); 5055 // Get return address and delta to inlined map check. 5056 __ mov(scratch, Operand(esp, 0 * kPointerSize)); 5057 __ sub(scratch, Operand(esp, 1 * kPointerSize)); 5058 if (FLAG_debug_code) { 5059 __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1); 5060 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1); 5061 __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2); 5062 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2); 5063 } 5064 __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate)); 5065 __ mov(Operand(scratch, 0), map); 5066 } 5067 5068 // Loop through the prototype chain of the object looking for the function 5069 // prototype. 5070 __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset)); 5071 Label loop, is_instance, is_not_instance; 5072 __ bind(&loop); 5073 __ cmp(scratch, prototype); 5074 __ j(equal, &is_instance, Label::kNear); 5075 Factory* factory = masm->isolate()->factory(); 5076 __ cmp(scratch, Immediate(factory->null_value())); 5077 __ j(equal, &is_not_instance, Label::kNear); 5078 __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); 5079 __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); 5080 __ jmp(&loop); 5081 5082 __ bind(&is_instance); 5083 if (!HasCallSiteInlineCheck()) { 5084 __ mov(eax, Immediate(0)); 5085 __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex); 5086 } else { 5087 // Get return address and delta to inlined map check. 5088 __ mov(eax, factory->true_value()); 5089 __ mov(scratch, Operand(esp, 0 * kPointerSize)); 5090 __ sub(scratch, Operand(esp, 1 * kPointerSize)); 5091 if (FLAG_debug_code) { 5092 __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte); 5093 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); 5094 } 5095 __ mov(Operand(scratch, kDeltaToMovImmediate), eax); 5096 if (!ReturnTrueFalseObject()) { 5097 __ Set(eax, Immediate(0)); 5098 } 5099 } 5100 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 5101 5102 __ bind(&is_not_instance); 5103 if (!HasCallSiteInlineCheck()) { 5104 __ mov(eax, Immediate(Smi::FromInt(1))); 5105 __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex); 5106 } else { 5107 // Get return address and delta to inlined map check. 5108 __ mov(eax, factory->false_value()); 5109 __ mov(scratch, Operand(esp, 0 * kPointerSize)); 5110 __ sub(scratch, Operand(esp, 1 * kPointerSize)); 5111 if (FLAG_debug_code) { 5112 __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte); 5113 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); 5114 } 5115 __ mov(Operand(scratch, kDeltaToMovImmediate), eax); 5116 if (!ReturnTrueFalseObject()) { 5117 __ Set(eax, Immediate(Smi::FromInt(1))); 5118 } 5119 } 5120 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 5121 5122 Label object_not_null, object_not_null_or_smi; 5123 __ bind(¬_js_object); 5124 // Before null, smi and string value checks, check that the rhs is a function 5125 // as for a non-function rhs an exception needs to be thrown. 5126 __ JumpIfSmi(function, &slow, Label::kNear); 5127 __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch); 5128 __ j(not_equal, &slow, Label::kNear); 5129 5130 // Null is not instance of anything. 5131 __ cmp(object, factory->null_value()); 5132 __ j(not_equal, &object_not_null, Label::kNear); 5133 __ Set(eax, Immediate(Smi::FromInt(1))); 5134 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 5135 5136 __ bind(&object_not_null); 5137 // Smi values is not instance of anything. 5138 __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear); 5139 __ Set(eax, Immediate(Smi::FromInt(1))); 5140 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 5141 5142 __ bind(&object_not_null_or_smi); 5143 // String values is not instance of anything. 5144 Condition is_string = masm->IsObjectStringType(object, scratch, scratch); 5145 __ j(NegateCondition(is_string), &slow, Label::kNear); 5146 __ Set(eax, Immediate(Smi::FromInt(1))); 5147 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 5148 5149 // Slow-case: Go through the JavaScript implementation. 5150 __ bind(&slow); 5151 if (!ReturnTrueFalseObject()) { 5152 // Tail call the builtin which returns 0 or 1. 5153 if (HasArgsInRegisters()) { 5154 // Push arguments below return address. 5155 __ pop(scratch); 5156 __ push(object); 5157 __ push(function); 5158 __ push(scratch); 5159 } 5160 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 5161 } else { 5162 // Call the builtin and convert 0/1 to true/false. 5163 { 5164 FrameScope scope(masm, StackFrame::INTERNAL); 5165 __ push(object); 5166 __ push(function); 5167 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); 5168 } 5169 Label true_value, done; 5170 __ test(eax, eax); 5171 __ j(zero, &true_value, Label::kNear); 5172 __ mov(eax, factory->false_value()); 5173 __ jmp(&done, Label::kNear); 5174 __ bind(&true_value); 5175 __ mov(eax, factory->true_value()); 5176 __ bind(&done); 5177 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 5178 } 5179 } 5180 5181 5182 Register InstanceofStub::left() { return eax; } 5183 5184 5185 Register InstanceofStub::right() { return edx; } 5186 5187 5188 // ------------------------------------------------------------------------- 5189 // StringCharCodeAtGenerator 5190 5191 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { 5192 // If the receiver is a smi trigger the non-string case. 5193 STATIC_ASSERT(kSmiTag == 0); 5194 __ JumpIfSmi(object_, receiver_not_string_); 5195 5196 // Fetch the instance type of the receiver into result register. 5197 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); 5198 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); 5199 // If the receiver is not a string trigger the non-string case. 5200 __ test(result_, Immediate(kIsNotStringMask)); 5201 __ j(not_zero, receiver_not_string_); 5202 5203 // If the index is non-smi trigger the non-smi case. 5204 STATIC_ASSERT(kSmiTag == 0); 5205 __ JumpIfNotSmi(index_, &index_not_smi_); 5206 __ bind(&got_smi_index_); 5207 5208 // Check for index out of range. 5209 __ cmp(index_, FieldOperand(object_, String::kLengthOffset)); 5210 __ j(above_equal, index_out_of_range_); 5211 5212 __ SmiUntag(index_); 5213 5214 Factory* factory = masm->isolate()->factory(); 5215 StringCharLoadGenerator::Generate( 5216 masm, factory, object_, index_, result_, &call_runtime_); 5217 5218 __ SmiTag(result_); 5219 __ bind(&exit_); 5220 } 5221 5222 5223 void StringCharCodeAtGenerator::GenerateSlow( 5224 MacroAssembler* masm, 5225 const RuntimeCallHelper& call_helper) { 5226 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); 5227 5228 // Index is not a smi. 5229 __ bind(&index_not_smi_); 5230 // If index is a heap number, try converting it to an integer. 5231 __ CheckMap(index_, 5232 masm->isolate()->factory()->heap_number_map(), 5233 index_not_number_, 5234 DONT_DO_SMI_CHECK); 5235 call_helper.BeforeCall(masm); 5236 __ push(object_); 5237 __ push(index_); // Consumed by runtime conversion function. 5238 if (index_flags_ == STRING_INDEX_IS_NUMBER) { 5239 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); 5240 } else { 5241 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); 5242 // NumberToSmi discards numbers that are not exact integers. 5243 __ CallRuntime(Runtime::kNumberToSmi, 1); 5244 } 5245 if (!index_.is(eax)) { 5246 // Save the conversion result before the pop instructions below 5247 // have a chance to overwrite it. 5248 __ mov(index_, eax); 5249 } 5250 __ pop(object_); 5251 // Reload the instance type. 5252 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); 5253 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); 5254 call_helper.AfterCall(masm); 5255 // If index is still not a smi, it must be out of range. 5256 STATIC_ASSERT(kSmiTag == 0); 5257 __ JumpIfNotSmi(index_, index_out_of_range_); 5258 // Otherwise, return to the fast path. 5259 __ jmp(&got_smi_index_); 5260 5261 // Call runtime. We get here when the receiver is a string and the 5262 // index is a number, but the code of getting the actual character 5263 // is too complex (e.g., when the string needs to be flattened). 5264 __ bind(&call_runtime_); 5265 call_helper.BeforeCall(masm); 5266 __ push(object_); 5267 __ SmiTag(index_); 5268 __ push(index_); 5269 __ CallRuntime(Runtime::kStringCharCodeAt, 2); 5270 if (!result_.is(eax)) { 5271 __ mov(result_, eax); 5272 } 5273 call_helper.AfterCall(masm); 5274 __ jmp(&exit_); 5275 5276 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); 5277 } 5278 5279 5280 // ------------------------------------------------------------------------- 5281 // StringCharFromCodeGenerator 5282 5283 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { 5284 // Fast case of Heap::LookupSingleCharacterStringFromCode. 5285 STATIC_ASSERT(kSmiTag == 0); 5286 STATIC_ASSERT(kSmiShiftSize == 0); 5287 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); 5288 __ test(code_, 5289 Immediate(kSmiTagMask | 5290 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); 5291 __ j(not_zero, &slow_case_); 5292 5293 Factory* factory = masm->isolate()->factory(); 5294 __ Set(result_, Immediate(factory->single_character_string_cache())); 5295 STATIC_ASSERT(kSmiTag == 0); 5296 STATIC_ASSERT(kSmiTagSize == 1); 5297 STATIC_ASSERT(kSmiShiftSize == 0); 5298 // At this point code register contains smi tagged ASCII char code. 5299 __ mov(result_, FieldOperand(result_, 5300 code_, times_half_pointer_size, 5301 FixedArray::kHeaderSize)); 5302 __ cmp(result_, factory->undefined_value()); 5303 __ j(equal, &slow_case_); 5304 __ bind(&exit_); 5305 } 5306 5307 5308 void StringCharFromCodeGenerator::GenerateSlow( 5309 MacroAssembler* masm, 5310 const RuntimeCallHelper& call_helper) { 5311 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); 5312 5313 __ bind(&slow_case_); 5314 call_helper.BeforeCall(masm); 5315 __ push(code_); 5316 __ CallRuntime(Runtime::kCharFromCode, 1); 5317 if (!result_.is(eax)) { 5318 __ mov(result_, eax); 5319 } 5320 call_helper.AfterCall(masm); 5321 __ jmp(&exit_); 5322 5323 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); 5324 } 5325 5326 5327 void StringAddStub::Generate(MacroAssembler* masm) { 5328 Label call_runtime, call_builtin; 5329 Builtins::JavaScript builtin_id = Builtins::ADD; 5330 5331 // Load the two arguments. 5332 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. 5333 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. 5334 5335 // Make sure that both arguments are strings if not known in advance. 5336 // Otherwise, at least one of the arguments is definitely a string, 5337 // and we convert the one that is not known to be a string. 5338 if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) { 5339 ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT); 5340 ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT); 5341 __ JumpIfSmi(eax, &call_runtime); 5342 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx); 5343 __ j(above_equal, &call_runtime); 5344 5345 // First argument is a a string, test second. 5346 __ JumpIfSmi(edx, &call_runtime); 5347 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx); 5348 __ j(above_equal, &call_runtime); 5349 } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) { 5350 ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0); 5351 GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi, 5352 &call_builtin); 5353 builtin_id = Builtins::STRING_ADD_RIGHT; 5354 } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) { 5355 ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0); 5356 GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi, 5357 &call_builtin); 5358 builtin_id = Builtins::STRING_ADD_LEFT; 5359 } 5360 5361 // Both arguments are strings. 5362 // eax: first string 5363 // edx: second string 5364 // Check if either of the strings are empty. In that case return the other. 5365 Label second_not_zero_length, both_not_zero_length; 5366 __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); 5367 STATIC_ASSERT(kSmiTag == 0); 5368 __ test(ecx, ecx); 5369 __ j(not_zero, &second_not_zero_length, Label::kNear); 5370 // Second string is empty, result is first string which is already in eax. 5371 Counters* counters = masm->isolate()->counters(); 5372 __ IncrementCounter(counters->string_add_native(), 1); 5373 __ ret(2 * kPointerSize); 5374 __ bind(&second_not_zero_length); 5375 __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); 5376 STATIC_ASSERT(kSmiTag == 0); 5377 __ test(ebx, ebx); 5378 __ j(not_zero, &both_not_zero_length, Label::kNear); 5379 // First string is empty, result is second string which is in edx. 5380 __ mov(eax, edx); 5381 __ IncrementCounter(counters->string_add_native(), 1); 5382 __ ret(2 * kPointerSize); 5383 5384 // Both strings are non-empty. 5385 // eax: first string 5386 // ebx: length of first string as a smi 5387 // ecx: length of second string as a smi 5388 // edx: second string 5389 // Look at the length of the result of adding the two strings. 5390 Label string_add_flat_result, longer_than_two; 5391 __ bind(&both_not_zero_length); 5392 __ add(ebx, ecx); 5393 STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); 5394 // Handle exceptionally long strings in the runtime system. 5395 __ j(overflow, &call_runtime); 5396 // Use the string table when adding two one character strings, as it 5397 // helps later optimizations to return an internalized string here. 5398 __ cmp(ebx, Immediate(Smi::FromInt(2))); 5399 __ j(not_equal, &longer_than_two); 5400 5401 // Check that both strings are non-external ASCII strings. 5402 __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime); 5403 5404 // Get the two characters forming the new string. 5405 __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize)); 5406 __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize)); 5407 5408 // Try to lookup two character string in string table. If it is not found 5409 // just allocate a new one. 5410 Label make_two_character_string, make_two_character_string_no_reload; 5411 StringHelper::GenerateTwoCharacterStringTableProbe( 5412 masm, ebx, ecx, eax, edx, edi, 5413 &make_two_character_string_no_reload, &make_two_character_string); 5414 __ IncrementCounter(counters->string_add_native(), 1); 5415 __ ret(2 * kPointerSize); 5416 5417 // Allocate a two character string. 5418 __ bind(&make_two_character_string); 5419 // Reload the arguments. 5420 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. 5421 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. 5422 // Get the two characters forming the new string. 5423 __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize)); 5424 __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize)); 5425 __ bind(&make_two_character_string_no_reload); 5426 __ IncrementCounter(counters->string_add_make_two_char(), 1); 5427 __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime); 5428 // Pack both characters in ebx. 5429 __ shl(ecx, kBitsPerByte); 5430 __ or_(ebx, ecx); 5431 // Set the characters in the new string. 5432 __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx); 5433 __ IncrementCounter(counters->string_add_native(), 1); 5434 __ ret(2 * kPointerSize); 5435 5436 __ bind(&longer_than_two); 5437 // Check if resulting string will be flat. 5438 __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength))); 5439 __ j(below, &string_add_flat_result); 5440 5441 // If result is not supposed to be flat allocate a cons string object. If both 5442 // strings are ASCII the result is an ASCII cons string. 5443 Label non_ascii, allocated, ascii_data; 5444 __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset)); 5445 __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); 5446 __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); 5447 __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); 5448 __ and_(ecx, edi); 5449 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); 5450 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 5451 __ test(ecx, Immediate(kStringEncodingMask)); 5452 __ j(zero, &non_ascii); 5453 __ bind(&ascii_data); 5454 // Allocate an ASCII cons string. 5455 __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime); 5456 __ bind(&allocated); 5457 // Fill the fields of the cons string. 5458 __ AssertSmi(ebx); 5459 __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx); 5460 __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset), 5461 Immediate(String::kEmptyHashField)); 5462 5463 Label skip_write_barrier, after_writing; 5464 ExternalReference high_promotion_mode = ExternalReference:: 5465 new_space_high_promotion_mode_active_address(masm->isolate()); 5466 __ test(Operand::StaticVariable(high_promotion_mode), Immediate(1)); 5467 __ j(zero, &skip_write_barrier); 5468 5469 __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax); 5470 __ RecordWriteField(ecx, 5471 ConsString::kFirstOffset, 5472 eax, 5473 ebx, 5474 kDontSaveFPRegs); 5475 __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx); 5476 __ RecordWriteField(ecx, 5477 ConsString::kSecondOffset, 5478 edx, 5479 ebx, 5480 kDontSaveFPRegs); 5481 __ jmp(&after_writing); 5482 5483 __ bind(&skip_write_barrier); 5484 __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax); 5485 __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx); 5486 5487 __ bind(&after_writing); 5488 5489 __ mov(eax, ecx); 5490 __ IncrementCounter(counters->string_add_native(), 1); 5491 __ ret(2 * kPointerSize); 5492 __ bind(&non_ascii); 5493 // At least one of the strings is two-byte. Check whether it happens 5494 // to contain only one byte characters. 5495 // ecx: first instance type AND second instance type. 5496 // edi: second instance type. 5497 __ test(ecx, Immediate(kOneByteDataHintMask)); 5498 __ j(not_zero, &ascii_data); 5499 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); 5500 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); 5501 __ xor_(edi, ecx); 5502 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); 5503 __ and_(edi, kOneByteStringTag | kOneByteDataHintTag); 5504 __ cmp(edi, kOneByteStringTag | kOneByteDataHintTag); 5505 __ j(equal, &ascii_data); 5506 // Allocate a two byte cons string. 5507 __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime); 5508 __ jmp(&allocated); 5509 5510 // We cannot encounter sliced strings or cons strings here since: 5511 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); 5512 // Handle creating a flat result from either external or sequential strings. 5513 // Locate the first characters' locations. 5514 // eax: first string 5515 // ebx: length of resulting flat string as a smi 5516 // edx: second string 5517 Label first_prepared, second_prepared; 5518 Label first_is_sequential, second_is_sequential; 5519 __ bind(&string_add_flat_result); 5520 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); 5521 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); 5522 // ecx: instance type of first string 5523 STATIC_ASSERT(kSeqStringTag == 0); 5524 __ test_b(ecx, kStringRepresentationMask); 5525 __ j(zero, &first_is_sequential, Label::kNear); 5526 // Rule out short external string and load string resource. 5527 STATIC_ASSERT(kShortExternalStringTag != 0); 5528 __ test_b(ecx, kShortExternalStringMask); 5529 __ j(not_zero, &call_runtime); 5530 __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset)); 5531 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5532 __ jmp(&first_prepared, Label::kNear); 5533 __ bind(&first_is_sequential); 5534 __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5535 __ bind(&first_prepared); 5536 5537 __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); 5538 __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); 5539 // Check whether both strings have same encoding. 5540 // edi: instance type of second string 5541 __ xor_(ecx, edi); 5542 __ test_b(ecx, kStringEncodingMask); 5543 __ j(not_zero, &call_runtime); 5544 STATIC_ASSERT(kSeqStringTag == 0); 5545 __ test_b(edi, kStringRepresentationMask); 5546 __ j(zero, &second_is_sequential, Label::kNear); 5547 // Rule out short external string and load string resource. 5548 STATIC_ASSERT(kShortExternalStringTag != 0); 5549 __ test_b(edi, kShortExternalStringMask); 5550 __ j(not_zero, &call_runtime); 5551 __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset)); 5552 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5553 __ jmp(&second_prepared, Label::kNear); 5554 __ bind(&second_is_sequential); 5555 __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5556 __ bind(&second_prepared); 5557 5558 // Push the addresses of both strings' first characters onto the stack. 5559 __ push(edx); 5560 __ push(eax); 5561 5562 Label non_ascii_string_add_flat_result, call_runtime_drop_two; 5563 // edi: instance type of second string 5564 // First string and second string have the same encoding. 5565 STATIC_ASSERT(kTwoByteStringTag == 0); 5566 __ test_b(edi, kStringEncodingMask); 5567 __ j(zero, &non_ascii_string_add_flat_result); 5568 5569 // Both strings are ASCII strings. 5570 // ebx: length of resulting flat string as a smi 5571 __ SmiUntag(ebx); 5572 __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two); 5573 // eax: result string 5574 __ mov(ecx, eax); 5575 // Locate first character of result. 5576 __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5577 // Load first argument's length and first character location. Account for 5578 // values currently on the stack when fetching arguments from it. 5579 __ mov(edx, Operand(esp, 4 * kPointerSize)); 5580 __ mov(edi, FieldOperand(edx, String::kLengthOffset)); 5581 __ SmiUntag(edi); 5582 __ pop(edx); 5583 // eax: result string 5584 // ecx: first character of result 5585 // edx: first char of first argument 5586 // edi: length of first argument 5587 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); 5588 // Load second argument's length and first character location. Account for 5589 // values currently on the stack when fetching arguments from it. 5590 __ mov(edx, Operand(esp, 2 * kPointerSize)); 5591 __ mov(edi, FieldOperand(edx, String::kLengthOffset)); 5592 __ SmiUntag(edi); 5593 __ pop(edx); 5594 // eax: result string 5595 // ecx: next character of result 5596 // edx: first char of second argument 5597 // edi: length of second argument 5598 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); 5599 __ IncrementCounter(counters->string_add_native(), 1); 5600 __ ret(2 * kPointerSize); 5601 5602 // Handle creating a flat two byte result. 5603 // eax: first string - known to be two byte 5604 // ebx: length of resulting flat string as a smi 5605 // edx: second string 5606 __ bind(&non_ascii_string_add_flat_result); 5607 // Both strings are two byte strings. 5608 __ SmiUntag(ebx); 5609 __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two); 5610 // eax: result string 5611 __ mov(ecx, eax); 5612 // Locate first character of result. 5613 __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5614 // Load second argument's length and first character location. Account for 5615 // values currently on the stack when fetching arguments from it. 5616 __ mov(edx, Operand(esp, 4 * kPointerSize)); 5617 __ mov(edi, FieldOperand(edx, String::kLengthOffset)); 5618 __ SmiUntag(edi); 5619 __ pop(edx); 5620 // eax: result string 5621 // ecx: first character of result 5622 // edx: first char of first argument 5623 // edi: length of first argument 5624 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); 5625 // Load second argument's length and first character location. Account for 5626 // values currently on the stack when fetching arguments from it. 5627 __ mov(edx, Operand(esp, 2 * kPointerSize)); 5628 __ mov(edi, FieldOperand(edx, String::kLengthOffset)); 5629 __ SmiUntag(edi); 5630 __ pop(edx); 5631 // eax: result string 5632 // ecx: next character of result 5633 // edx: first char of second argument 5634 // edi: length of second argument 5635 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); 5636 __ IncrementCounter(counters->string_add_native(), 1); 5637 __ ret(2 * kPointerSize); 5638 5639 // Recover stack pointer before jumping to runtime. 5640 __ bind(&call_runtime_drop_two); 5641 __ Drop(2); 5642 // Just jump to runtime to add the two strings. 5643 __ bind(&call_runtime); 5644 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { 5645 GenerateRegisterArgsPop(masm, ecx); 5646 // Build a frame 5647 { 5648 FrameScope scope(masm, StackFrame::INTERNAL); 5649 GenerateRegisterArgsPush(masm); 5650 __ CallRuntime(Runtime::kStringAdd, 2); 5651 } 5652 __ ret(0); 5653 } else { 5654 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 5655 } 5656 5657 if (call_builtin.is_linked()) { 5658 __ bind(&call_builtin); 5659 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { 5660 GenerateRegisterArgsPop(masm, ecx); 5661 // Build a frame 5662 { 5663 FrameScope scope(masm, StackFrame::INTERNAL); 5664 GenerateRegisterArgsPush(masm); 5665 __ InvokeBuiltin(builtin_id, CALL_FUNCTION); 5666 } 5667 __ ret(0); 5668 } else { 5669 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); 5670 } 5671 } 5672 } 5673 5674 5675 void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 5676 __ push(eax); 5677 __ push(edx); 5678 } 5679 5680 5681 void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm, 5682 Register temp) { 5683 __ pop(temp); 5684 __ pop(edx); 5685 __ pop(eax); 5686 __ push(temp); 5687 } 5688 5689 5690 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, 5691 int stack_offset, 5692 Register arg, 5693 Register scratch1, 5694 Register scratch2, 5695 Register scratch3, 5696 Label* slow) { 5697 // First check if the argument is already a string. 5698 Label not_string, done; 5699 __ JumpIfSmi(arg, ¬_string); 5700 __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1); 5701 __ j(below, &done); 5702 5703 // Check the number to string cache. 5704 Label not_cached; 5705 __ bind(¬_string); 5706 // Puts the cached result into scratch1. 5707 NumberToStringStub::GenerateLookupNumberStringCache(masm, 5708 arg, 5709 scratch1, 5710 scratch2, 5711 scratch3, 5712 ¬_cached); 5713 __ mov(arg, scratch1); 5714 __ mov(Operand(esp, stack_offset), arg); 5715 __ jmp(&done); 5716 5717 // Check if the argument is a safe string wrapper. 5718 __ bind(¬_cached); 5719 __ JumpIfSmi(arg, slow); 5720 __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1. 5721 __ j(not_equal, slow); 5722 __ test_b(FieldOperand(scratch1, Map::kBitField2Offset), 5723 1 << Map::kStringWrapperSafeForDefaultValueOf); 5724 __ j(zero, slow); 5725 __ mov(arg, FieldOperand(arg, JSValue::kValueOffset)); 5726 __ mov(Operand(esp, stack_offset), arg); 5727 5728 __ bind(&done); 5729 } 5730 5731 5732 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, 5733 Register dest, 5734 Register src, 5735 Register count, 5736 Register scratch, 5737 bool ascii) { 5738 Label loop; 5739 __ bind(&loop); 5740 // This loop just copies one character at a time, as it is only used for very 5741 // short strings. 5742 if (ascii) { 5743 __ mov_b(scratch, Operand(src, 0)); 5744 __ mov_b(Operand(dest, 0), scratch); 5745 __ add(src, Immediate(1)); 5746 __ add(dest, Immediate(1)); 5747 } else { 5748 __ mov_w(scratch, Operand(src, 0)); 5749 __ mov_w(Operand(dest, 0), scratch); 5750 __ add(src, Immediate(2)); 5751 __ add(dest, Immediate(2)); 5752 } 5753 __ sub(count, Immediate(1)); 5754 __ j(not_zero, &loop); 5755 } 5756 5757 5758 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, 5759 Register dest, 5760 Register src, 5761 Register count, 5762 Register scratch, 5763 bool ascii) { 5764 // Copy characters using rep movs of doublewords. 5765 // The destination is aligned on a 4 byte boundary because we are 5766 // copying to the beginning of a newly allocated string. 5767 ASSERT(dest.is(edi)); // rep movs destination 5768 ASSERT(src.is(esi)); // rep movs source 5769 ASSERT(count.is(ecx)); // rep movs count 5770 ASSERT(!scratch.is(dest)); 5771 ASSERT(!scratch.is(src)); 5772 ASSERT(!scratch.is(count)); 5773 5774 // Nothing to do for zero characters. 5775 Label done; 5776 __ test(count, count); 5777 __ j(zero, &done); 5778 5779 // Make count the number of bytes to copy. 5780 if (!ascii) { 5781 __ shl(count, 1); 5782 } 5783 5784 // Don't enter the rep movs if there are less than 4 bytes to copy. 5785 Label last_bytes; 5786 __ test(count, Immediate(~3)); 5787 __ j(zero, &last_bytes, Label::kNear); 5788 5789 // Copy from edi to esi using rep movs instruction. 5790 __ mov(scratch, count); 5791 __ sar(count, 2); // Number of doublewords to copy. 5792 __ cld(); 5793 __ rep_movs(); 5794 5795 // Find number of bytes left. 5796 __ mov(count, scratch); 5797 __ and_(count, 3); 5798 5799 // Check if there are more bytes to copy. 5800 __ bind(&last_bytes); 5801 __ test(count, count); 5802 __ j(zero, &done); 5803 5804 // Copy remaining characters. 5805 Label loop; 5806 __ bind(&loop); 5807 __ mov_b(scratch, Operand(src, 0)); 5808 __ mov_b(Operand(dest, 0), scratch); 5809 __ add(src, Immediate(1)); 5810 __ add(dest, Immediate(1)); 5811 __ sub(count, Immediate(1)); 5812 __ j(not_zero, &loop); 5813 5814 __ bind(&done); 5815 } 5816 5817 5818 void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, 5819 Register c1, 5820 Register c2, 5821 Register scratch1, 5822 Register scratch2, 5823 Register scratch3, 5824 Label* not_probed, 5825 Label* not_found) { 5826 // Register scratch3 is the general scratch register in this function. 5827 Register scratch = scratch3; 5828 5829 // Make sure that both characters are not digits as such strings has a 5830 // different hash algorithm. Don't try to look for these in the string table. 5831 Label not_array_index; 5832 __ mov(scratch, c1); 5833 __ sub(scratch, Immediate(static_cast<int>('0'))); 5834 __ cmp(scratch, Immediate(static_cast<int>('9' - '0'))); 5835 __ j(above, ¬_array_index, Label::kNear); 5836 __ mov(scratch, c2); 5837 __ sub(scratch, Immediate(static_cast<int>('0'))); 5838 __ cmp(scratch, Immediate(static_cast<int>('9' - '0'))); 5839 __ j(below_equal, not_probed); 5840 5841 __ bind(¬_array_index); 5842 // Calculate the two character string hash. 5843 Register hash = scratch1; 5844 GenerateHashInit(masm, hash, c1, scratch); 5845 GenerateHashAddCharacter(masm, hash, c2, scratch); 5846 GenerateHashGetHash(masm, hash, scratch); 5847 5848 // Collect the two characters in a register. 5849 Register chars = c1; 5850 __ shl(c2, kBitsPerByte); 5851 __ or_(chars, c2); 5852 5853 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. 5854 // hash: hash of two character string. 5855 5856 // Load the string table. 5857 Register string_table = c2; 5858 __ LoadRoot(string_table, Heap::kStringTableRootIndex); 5859 5860 // Calculate capacity mask from the string table capacity. 5861 Register mask = scratch2; 5862 __ mov(mask, FieldOperand(string_table, StringTable::kCapacityOffset)); 5863 __ SmiUntag(mask); 5864 __ sub(mask, Immediate(1)); 5865 5866 // Registers 5867 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. 5868 // hash: hash of two character string 5869 // string_table: string table 5870 // mask: capacity mask 5871 // scratch: - 5872 5873 // Perform a number of probes in the string table. 5874 static const int kProbes = 4; 5875 Label found_in_string_table; 5876 Label next_probe[kProbes], next_probe_pop_mask[kProbes]; 5877 Register candidate = scratch; // Scratch register contains candidate. 5878 for (int i = 0; i < kProbes; i++) { 5879 // Calculate entry in string table. 5880 __ mov(scratch, hash); 5881 if (i > 0) { 5882 __ add(scratch, Immediate(StringTable::GetProbeOffset(i))); 5883 } 5884 __ and_(scratch, mask); 5885 5886 // Load the entry from the string table. 5887 STATIC_ASSERT(StringTable::kEntrySize == 1); 5888 __ mov(candidate, 5889 FieldOperand(string_table, 5890 scratch, 5891 times_pointer_size, 5892 StringTable::kElementsStartOffset)); 5893 5894 // If entry is undefined no string with this hash can be found. 5895 Factory* factory = masm->isolate()->factory(); 5896 __ cmp(candidate, factory->undefined_value()); 5897 __ j(equal, not_found); 5898 __ cmp(candidate, factory->the_hole_value()); 5899 __ j(equal, &next_probe[i]); 5900 5901 // If length is not 2 the string is not a candidate. 5902 __ cmp(FieldOperand(candidate, String::kLengthOffset), 5903 Immediate(Smi::FromInt(2))); 5904 __ j(not_equal, &next_probe[i]); 5905 5906 // As we are out of registers save the mask on the stack and use that 5907 // register as a temporary. 5908 __ push(mask); 5909 Register temp = mask; 5910 5911 // Check that the candidate is a non-external ASCII string. 5912 __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset)); 5913 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); 5914 __ JumpIfInstanceTypeIsNotSequentialAscii( 5915 temp, temp, &next_probe_pop_mask[i]); 5916 5917 // Check if the two characters match. 5918 __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize)); 5919 __ and_(temp, 0x0000ffff); 5920 __ cmp(chars, temp); 5921 __ j(equal, &found_in_string_table); 5922 __ bind(&next_probe_pop_mask[i]); 5923 __ pop(mask); 5924 __ bind(&next_probe[i]); 5925 } 5926 5927 // No matching 2 character string found by probing. 5928 __ jmp(not_found); 5929 5930 // Scratch register contains result when we fall through to here. 5931 Register result = candidate; 5932 __ bind(&found_in_string_table); 5933 __ pop(mask); // Pop saved mask from the stack. 5934 if (!result.is(eax)) { 5935 __ mov(eax, result); 5936 } 5937 } 5938 5939 5940 void StringHelper::GenerateHashInit(MacroAssembler* masm, 5941 Register hash, 5942 Register character, 5943 Register scratch) { 5944 // hash = (seed + character) + ((seed + character) << 10); 5945 if (Serializer::enabled()) { 5946 __ LoadRoot(scratch, Heap::kHashSeedRootIndex); 5947 __ SmiUntag(scratch); 5948 __ add(scratch, character); 5949 __ mov(hash, scratch); 5950 __ shl(scratch, 10); 5951 __ add(hash, scratch); 5952 } else { 5953 int32_t seed = masm->isolate()->heap()->HashSeed(); 5954 __ lea(scratch, Operand(character, seed)); 5955 __ shl(scratch, 10); 5956 __ lea(hash, Operand(scratch, character, times_1, seed)); 5957 } 5958 // hash ^= hash >> 6; 5959 __ mov(scratch, hash); 5960 __ shr(scratch, 6); 5961 __ xor_(hash, scratch); 5962 } 5963 5964 5965 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, 5966 Register hash, 5967 Register character, 5968 Register scratch) { 5969 // hash += character; 5970 __ add(hash, character); 5971 // hash += hash << 10; 5972 __ mov(scratch, hash); 5973 __ shl(scratch, 10); 5974 __ add(hash, scratch); 5975 // hash ^= hash >> 6; 5976 __ mov(scratch, hash); 5977 __ shr(scratch, 6); 5978 __ xor_(hash, scratch); 5979 } 5980 5981 5982 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, 5983 Register hash, 5984 Register scratch) { 5985 // hash += hash << 3; 5986 __ mov(scratch, hash); 5987 __ shl(scratch, 3); 5988 __ add(hash, scratch); 5989 // hash ^= hash >> 11; 5990 __ mov(scratch, hash); 5991 __ shr(scratch, 11); 5992 __ xor_(hash, scratch); 5993 // hash += hash << 15; 5994 __ mov(scratch, hash); 5995 __ shl(scratch, 15); 5996 __ add(hash, scratch); 5997 5998 __ and_(hash, String::kHashBitMask); 5999 6000 // if (hash == 0) hash = 27; 6001 Label hash_not_zero; 6002 __ j(not_zero, &hash_not_zero, Label::kNear); 6003 __ mov(hash, Immediate(StringHasher::kZeroHash)); 6004 __ bind(&hash_not_zero); 6005 } 6006 6007 6008 void SubStringStub::Generate(MacroAssembler* masm) { 6009 Label runtime; 6010 6011 // Stack frame on entry. 6012 // esp[0]: return address 6013 // esp[4]: to 6014 // esp[8]: from 6015 // esp[12]: string 6016 6017 // Make sure first argument is a string. 6018 __ mov(eax, Operand(esp, 3 * kPointerSize)); 6019 STATIC_ASSERT(kSmiTag == 0); 6020 __ JumpIfSmi(eax, &runtime); 6021 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); 6022 __ j(NegateCondition(is_string), &runtime); 6023 6024 // eax: string 6025 // ebx: instance type 6026 6027 // Calculate length of sub string using the smi values. 6028 __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. 6029 __ JumpIfNotSmi(ecx, &runtime); 6030 __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. 6031 __ JumpIfNotSmi(edx, &runtime); 6032 __ sub(ecx, edx); 6033 __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); 6034 Label not_original_string; 6035 // Shorter than original string's length: an actual substring. 6036 __ j(below, ¬_original_string, Label::kNear); 6037 // Longer than original string's length or negative: unsafe arguments. 6038 __ j(above, &runtime); 6039 // Return original string. 6040 Counters* counters = masm->isolate()->counters(); 6041 __ IncrementCounter(counters->sub_string_native(), 1); 6042 __ ret(3 * kPointerSize); 6043 __ bind(¬_original_string); 6044 6045 Label single_char; 6046 __ cmp(ecx, Immediate(Smi::FromInt(1))); 6047 __ j(equal, &single_char); 6048 6049 // eax: string 6050 // ebx: instance type 6051 // ecx: sub string length (smi) 6052 // edx: from index (smi) 6053 // Deal with different string types: update the index if necessary 6054 // and put the underlying string into edi. 6055 Label underlying_unpacked, sliced_string, seq_or_external_string; 6056 // If the string is not indirect, it can only be sequential or external. 6057 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); 6058 STATIC_ASSERT(kIsIndirectStringMask != 0); 6059 __ test(ebx, Immediate(kIsIndirectStringMask)); 6060 __ j(zero, &seq_or_external_string, Label::kNear); 6061 6062 Factory* factory = masm->isolate()->factory(); 6063 __ test(ebx, Immediate(kSlicedNotConsMask)); 6064 __ j(not_zero, &sliced_string, Label::kNear); 6065 // Cons string. Check whether it is flat, then fetch first part. 6066 // Flat cons strings have an empty second part. 6067 __ cmp(FieldOperand(eax, ConsString::kSecondOffset), 6068 factory->empty_string()); 6069 __ j(not_equal, &runtime); 6070 __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset)); 6071 // Update instance type. 6072 __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset)); 6073 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); 6074 __ jmp(&underlying_unpacked, Label::kNear); 6075 6076 __ bind(&sliced_string); 6077 // Sliced string. Fetch parent and adjust start index by offset. 6078 __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset)); 6079 __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset)); 6080 // Update instance type. 6081 __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset)); 6082 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); 6083 __ jmp(&underlying_unpacked, Label::kNear); 6084 6085 __ bind(&seq_or_external_string); 6086 // Sequential or external string. Just move string to the expected register. 6087 __ mov(edi, eax); 6088 6089 __ bind(&underlying_unpacked); 6090 6091 if (FLAG_string_slices) { 6092 Label copy_routine; 6093 // edi: underlying subject string 6094 // ebx: instance type of underlying subject string 6095 // edx: adjusted start index (smi) 6096 // ecx: length (smi) 6097 __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength))); 6098 // Short slice. Copy instead of slicing. 6099 __ j(less, ©_routine); 6100 // Allocate new sliced string. At this point we do not reload the instance 6101 // type including the string encoding because we simply rely on the info 6102 // provided by the original string. It does not matter if the original 6103 // string's encoding is wrong because we always have to recheck encoding of 6104 // the newly created string's parent anyways due to externalized strings. 6105 Label two_byte_slice, set_slice_header; 6106 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); 6107 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 6108 __ test(ebx, Immediate(kStringEncodingMask)); 6109 __ j(zero, &two_byte_slice, Label::kNear); 6110 __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime); 6111 __ jmp(&set_slice_header, Label::kNear); 6112 __ bind(&two_byte_slice); 6113 __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime); 6114 __ bind(&set_slice_header); 6115 __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx); 6116 __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset), 6117 Immediate(String::kEmptyHashField)); 6118 __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi); 6119 __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx); 6120 __ IncrementCounter(counters->sub_string_native(), 1); 6121 __ ret(3 * kPointerSize); 6122 6123 __ bind(©_routine); 6124 } 6125 6126 // edi: underlying subject string 6127 // ebx: instance type of underlying subject string 6128 // edx: adjusted start index (smi) 6129 // ecx: length (smi) 6130 // The subject string can only be external or sequential string of either 6131 // encoding at this point. 6132 Label two_byte_sequential, runtime_drop_two, sequential_string; 6133 STATIC_ASSERT(kExternalStringTag != 0); 6134 STATIC_ASSERT(kSeqStringTag == 0); 6135 __ test_b(ebx, kExternalStringTag); 6136 __ j(zero, &sequential_string); 6137 6138 // Handle external string. 6139 // Rule out short external strings. 6140 STATIC_CHECK(kShortExternalStringTag != 0); 6141 __ test_b(ebx, kShortExternalStringMask); 6142 __ j(not_zero, &runtime); 6143 __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset)); 6144 // Move the pointer so that offset-wise, it looks like a sequential string. 6145 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 6146 __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 6147 6148 __ bind(&sequential_string); 6149 // Stash away (adjusted) index and (underlying) string. 6150 __ push(edx); 6151 __ push(edi); 6152 __ SmiUntag(ecx); 6153 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); 6154 __ test_b(ebx, kStringEncodingMask); 6155 __ j(zero, &two_byte_sequential); 6156 6157 // Sequential ASCII string. Allocate the result. 6158 __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two); 6159 6160 // eax: result string 6161 // ecx: result string length 6162 __ mov(edx, esi); // esi used by following code. 6163 // Locate first character of result. 6164 __ mov(edi, eax); 6165 __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 6166 // Load string argument and locate character of sub string start. 6167 __ pop(esi); 6168 __ pop(ebx); 6169 __ SmiUntag(ebx); 6170 __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize)); 6171 6172 // eax: result string 6173 // ecx: result length 6174 // edx: original value of esi 6175 // edi: first character of result 6176 // esi: character of sub string start 6177 StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true); 6178 __ mov(esi, edx); // Restore esi. 6179 __ IncrementCounter(counters->sub_string_native(), 1); 6180 __ ret(3 * kPointerSize); 6181 6182 __ bind(&two_byte_sequential); 6183 // Sequential two-byte string. Allocate the result. 6184 __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two); 6185 6186 // eax: result string 6187 // ecx: result string length 6188 __ mov(edx, esi); // esi used by following code. 6189 // Locate first character of result. 6190 __ mov(edi, eax); 6191 __ add(edi, 6192 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 6193 // Load string argument and locate character of sub string start. 6194 __ pop(esi); 6195 __ pop(ebx); 6196 // As from is a smi it is 2 times the value which matches the size of a two 6197 // byte character. 6198 STATIC_ASSERT(kSmiTag == 0); 6199 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 6200 __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize)); 6201 6202 // eax: result string 6203 // ecx: result length 6204 // edx: original value of esi 6205 // edi: first character of result 6206 // esi: character of sub string start 6207 StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false); 6208 __ mov(esi, edx); // Restore esi. 6209 __ IncrementCounter(counters->sub_string_native(), 1); 6210 __ ret(3 * kPointerSize); 6211 6212 // Drop pushed values on the stack before tail call. 6213 __ bind(&runtime_drop_two); 6214 __ Drop(2); 6215 6216 // Just jump to runtime to create the sub string. 6217 __ bind(&runtime); 6218 __ TailCallRuntime(Runtime::kSubString, 3, 1); 6219 6220 __ bind(&single_char); 6221 // eax: string 6222 // ebx: instance type 6223 // ecx: sub string length (smi) 6224 // edx: from index (smi) 6225 StringCharAtGenerator generator( 6226 eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); 6227 generator.GenerateFast(masm); 6228 __ ret(3 * kPointerSize); 6229 generator.SkipSlow(masm, &runtime); 6230 } 6231 6232 6233 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, 6234 Register left, 6235 Register right, 6236 Register scratch1, 6237 Register scratch2) { 6238 Register length = scratch1; 6239 6240 // Compare lengths. 6241 Label strings_not_equal, check_zero_length; 6242 __ mov(length, FieldOperand(left, String::kLengthOffset)); 6243 __ cmp(length, FieldOperand(right, String::kLengthOffset)); 6244 __ j(equal, &check_zero_length, Label::kNear); 6245 __ bind(&strings_not_equal); 6246 __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL))); 6247 __ ret(0); 6248 6249 // Check if the length is zero. 6250 Label compare_chars; 6251 __ bind(&check_zero_length); 6252 STATIC_ASSERT(kSmiTag == 0); 6253 __ test(length, length); 6254 __ j(not_zero, &compare_chars, Label::kNear); 6255 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 6256 __ ret(0); 6257 6258 // Compare characters. 6259 __ bind(&compare_chars); 6260 GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2, 6261 &strings_not_equal, Label::kNear); 6262 6263 // Characters are equal. 6264 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 6265 __ ret(0); 6266 } 6267 6268 6269 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, 6270 Register left, 6271 Register right, 6272 Register scratch1, 6273 Register scratch2, 6274 Register scratch3) { 6275 Counters* counters = masm->isolate()->counters(); 6276 __ IncrementCounter(counters->string_compare_native(), 1); 6277 6278 // Find minimum length. 6279 Label left_shorter; 6280 __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); 6281 __ mov(scratch3, scratch1); 6282 __ sub(scratch3, FieldOperand(right, String::kLengthOffset)); 6283 6284 Register length_delta = scratch3; 6285 6286 __ j(less_equal, &left_shorter, Label::kNear); 6287 // Right string is shorter. Change scratch1 to be length of right string. 6288 __ sub(scratch1, length_delta); 6289 __ bind(&left_shorter); 6290 6291 Register min_length = scratch1; 6292 6293 // If either length is zero, just compare lengths. 6294 Label compare_lengths; 6295 __ test(min_length, min_length); 6296 __ j(zero, &compare_lengths, Label::kNear); 6297 6298 // Compare characters. 6299 Label result_not_equal; 6300 GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2, 6301 &result_not_equal, Label::kNear); 6302 6303 // Compare lengths - strings up to min-length are equal. 6304 __ bind(&compare_lengths); 6305 __ test(length_delta, length_delta); 6306 Label length_not_equal; 6307 __ j(not_zero, &length_not_equal, Label::kNear); 6308 6309 // Result is EQUAL. 6310 STATIC_ASSERT(EQUAL == 0); 6311 STATIC_ASSERT(kSmiTag == 0); 6312 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 6313 __ ret(0); 6314 6315 Label result_greater; 6316 Label result_less; 6317 __ bind(&length_not_equal); 6318 __ j(greater, &result_greater, Label::kNear); 6319 __ jmp(&result_less, Label::kNear); 6320 __ bind(&result_not_equal); 6321 __ j(above, &result_greater, Label::kNear); 6322 __ bind(&result_less); 6323 6324 // Result is LESS. 6325 __ Set(eax, Immediate(Smi::FromInt(LESS))); 6326 __ ret(0); 6327 6328 // Result is GREATER. 6329 __ bind(&result_greater); 6330 __ Set(eax, Immediate(Smi::FromInt(GREATER))); 6331 __ ret(0); 6332 } 6333 6334 6335 void StringCompareStub::GenerateAsciiCharsCompareLoop( 6336 MacroAssembler* masm, 6337 Register left, 6338 Register right, 6339 Register length, 6340 Register scratch, 6341 Label* chars_not_equal, 6342 Label::Distance chars_not_equal_near) { 6343 // Change index to run from -length to -1 by adding length to string 6344 // start. This means that loop ends when index reaches zero, which 6345 // doesn't need an additional compare. 6346 __ SmiUntag(length); 6347 __ lea(left, 6348 FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); 6349 __ lea(right, 6350 FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); 6351 __ neg(length); 6352 Register index = length; // index = -length; 6353 6354 // Compare loop. 6355 Label loop; 6356 __ bind(&loop); 6357 __ mov_b(scratch, Operand(left, index, times_1, 0)); 6358 __ cmpb(scratch, Operand(right, index, times_1, 0)); 6359 __ j(not_equal, chars_not_equal, chars_not_equal_near); 6360 __ inc(index); 6361 __ j(not_zero, &loop); 6362 } 6363 6364 6365 void StringCompareStub::Generate(MacroAssembler* masm) { 6366 Label runtime; 6367 6368 // Stack frame on entry. 6369 // esp[0]: return address 6370 // esp[4]: right string 6371 // esp[8]: left string 6372 6373 __ mov(edx, Operand(esp, 2 * kPointerSize)); // left 6374 __ mov(eax, Operand(esp, 1 * kPointerSize)); // right 6375 6376 Label not_same; 6377 __ cmp(edx, eax); 6378 __ j(not_equal, ¬_same, Label::kNear); 6379 STATIC_ASSERT(EQUAL == 0); 6380 STATIC_ASSERT(kSmiTag == 0); 6381 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 6382 __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1); 6383 __ ret(2 * kPointerSize); 6384 6385 __ bind(¬_same); 6386 6387 // Check that both objects are sequential ASCII strings. 6388 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime); 6389 6390 // Compare flat ASCII strings. 6391 // Drop arguments from the stack. 6392 __ pop(ecx); 6393 __ add(esp, Immediate(2 * kPointerSize)); 6394 __ push(ecx); 6395 GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); 6396 6397 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 6398 // tagged as a small integer. 6399 __ bind(&runtime); 6400 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 6401 } 6402 6403 6404 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { 6405 ASSERT(state_ == CompareIC::SMI); 6406 Label miss; 6407 __ mov(ecx, edx); 6408 __ or_(ecx, eax); 6409 __ JumpIfNotSmi(ecx, &miss, Label::kNear); 6410 6411 if (GetCondition() == equal) { 6412 // For equality we do not care about the sign of the result. 6413 __ sub(eax, edx); 6414 } else { 6415 Label done; 6416 __ sub(edx, eax); 6417 __ j(no_overflow, &done, Label::kNear); 6418 // Correct sign of result in case of overflow. 6419 __ not_(edx); 6420 __ bind(&done); 6421 __ mov(eax, edx); 6422 } 6423 __ ret(0); 6424 6425 __ bind(&miss); 6426 GenerateMiss(masm); 6427 } 6428 6429 6430 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { 6431 ASSERT(state_ == CompareIC::NUMBER); 6432 6433 Label generic_stub; 6434 Label unordered, maybe_undefined1, maybe_undefined2; 6435 Label miss; 6436 6437 if (left_ == CompareIC::SMI) { 6438 __ JumpIfNotSmi(edx, &miss); 6439 } 6440 if (right_ == CompareIC::SMI) { 6441 __ JumpIfNotSmi(eax, &miss); 6442 } 6443 6444 // Inlining the double comparison and falling back to the general compare 6445 // stub if NaN is involved or SSE2 or CMOV is unsupported. 6446 if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { 6447 CpuFeatureScope scope1(masm, SSE2); 6448 CpuFeatureScope scope2(masm, CMOV); 6449 6450 // Load left and right operand. 6451 Label done, left, left_smi, right_smi; 6452 __ JumpIfSmi(eax, &right_smi, Label::kNear); 6453 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), 6454 masm->isolate()->factory()->heap_number_map()); 6455 __ j(not_equal, &maybe_undefined1, Label::kNear); 6456 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 6457 __ jmp(&left, Label::kNear); 6458 __ bind(&right_smi); 6459 __ mov(ecx, eax); // Can't clobber eax because we can still jump away. 6460 __ SmiUntag(ecx); 6461 __ cvtsi2sd(xmm1, ecx); 6462 6463 __ bind(&left); 6464 __ JumpIfSmi(edx, &left_smi, Label::kNear); 6465 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), 6466 masm->isolate()->factory()->heap_number_map()); 6467 __ j(not_equal, &maybe_undefined2, Label::kNear); 6468 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 6469 __ jmp(&done); 6470 __ bind(&left_smi); 6471 __ mov(ecx, edx); // Can't clobber edx because we can still jump away. 6472 __ SmiUntag(ecx); 6473 __ cvtsi2sd(xmm0, ecx); 6474 6475 __ bind(&done); 6476 // Compare operands. 6477 __ ucomisd(xmm0, xmm1); 6478 6479 // Don't base result on EFLAGS when a NaN is involved. 6480 __ j(parity_even, &unordered, Label::kNear); 6481 6482 // Return a result of -1, 0, or 1, based on EFLAGS. 6483 // Performing mov, because xor would destroy the flag register. 6484 __ mov(eax, 0); // equal 6485 __ mov(ecx, Immediate(Smi::FromInt(1))); 6486 __ cmov(above, eax, ecx); 6487 __ mov(ecx, Immediate(Smi::FromInt(-1))); 6488 __ cmov(below, eax, ecx); 6489 __ ret(0); 6490 } else { 6491 __ mov(ecx, edx); 6492 __ and_(ecx, eax); 6493 __ JumpIfSmi(ecx, &generic_stub, Label::kNear); 6494 6495 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), 6496 masm->isolate()->factory()->heap_number_map()); 6497 __ j(not_equal, &maybe_undefined1, Label::kNear); 6498 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), 6499 masm->isolate()->factory()->heap_number_map()); 6500 __ j(not_equal, &maybe_undefined2, Label::kNear); 6501 } 6502 6503 __ bind(&unordered); 6504 __ bind(&generic_stub); 6505 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, 6506 CompareIC::GENERIC); 6507 __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 6508 6509 __ bind(&maybe_undefined1); 6510 if (Token::IsOrderedRelationalCompareOp(op_)) { 6511 __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value())); 6512 __ j(not_equal, &miss); 6513 __ JumpIfSmi(edx, &unordered); 6514 __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); 6515 __ j(not_equal, &maybe_undefined2, Label::kNear); 6516 __ jmp(&unordered); 6517 } 6518 6519 __ bind(&maybe_undefined2); 6520 if (Token::IsOrderedRelationalCompareOp(op_)) { 6521 __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value())); 6522 __ j(equal, &unordered); 6523 } 6524 6525 __ bind(&miss); 6526 GenerateMiss(masm); 6527 } 6528 6529 6530 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { 6531 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); 6532 ASSERT(GetCondition() == equal); 6533 6534 // Registers containing left and right operands respectively. 6535 Register left = edx; 6536 Register right = eax; 6537 Register tmp1 = ecx; 6538 Register tmp2 = ebx; 6539 6540 // Check that both operands are heap objects. 6541 Label miss; 6542 __ mov(tmp1, left); 6543 STATIC_ASSERT(kSmiTag == 0); 6544 __ and_(tmp1, right); 6545 __ JumpIfSmi(tmp1, &miss, Label::kNear); 6546 6547 // Check that both operands are internalized strings. 6548 __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); 6549 __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); 6550 __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); 6551 __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); 6552 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); 6553 __ or_(tmp1, tmp2); 6554 __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); 6555 __ j(not_zero, &miss, Label::kNear); 6556 6557 // Internalized strings are compared by identity. 6558 Label done; 6559 __ cmp(left, right); 6560 // Make sure eax is non-zero. At this point input operands are 6561 // guaranteed to be non-zero. 6562 ASSERT(right.is(eax)); 6563 __ j(not_equal, &done, Label::kNear); 6564 STATIC_ASSERT(EQUAL == 0); 6565 STATIC_ASSERT(kSmiTag == 0); 6566 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 6567 __ bind(&done); 6568 __ ret(0); 6569 6570 __ bind(&miss); 6571 GenerateMiss(masm); 6572 } 6573 6574 6575 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { 6576 ASSERT(state_ == CompareIC::UNIQUE_NAME); 6577 ASSERT(GetCondition() == equal); 6578 6579 // Registers containing left and right operands respectively. 6580 Register left = edx; 6581 Register right = eax; 6582 Register tmp1 = ecx; 6583 Register tmp2 = ebx; 6584 6585 // Check that both operands are heap objects. 6586 Label miss; 6587 __ mov(tmp1, left); 6588 STATIC_ASSERT(kSmiTag == 0); 6589 __ and_(tmp1, right); 6590 __ JumpIfSmi(tmp1, &miss, Label::kNear); 6591 6592 // Check that both operands are unique names. This leaves the instance 6593 // types loaded in tmp1 and tmp2. 6594 __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); 6595 __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); 6596 __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); 6597 __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); 6598 6599 __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear); 6600 __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear); 6601 6602 // Unique names are compared by identity. 6603 Label done; 6604 __ cmp(left, right); 6605 // Make sure eax is non-zero. At this point input operands are 6606 // guaranteed to be non-zero. 6607 ASSERT(right.is(eax)); 6608 __ j(not_equal, &done, Label::kNear); 6609 STATIC_ASSERT(EQUAL == 0); 6610 STATIC_ASSERT(kSmiTag == 0); 6611 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 6612 __ bind(&done); 6613 __ ret(0); 6614 6615 __ bind(&miss); 6616 GenerateMiss(masm); 6617 } 6618 6619 6620 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { 6621 ASSERT(state_ == CompareIC::STRING); 6622 Label miss; 6623 6624 bool equality = Token::IsEqualityOp(op_); 6625 6626 // Registers containing left and right operands respectively. 6627 Register left = edx; 6628 Register right = eax; 6629 Register tmp1 = ecx; 6630 Register tmp2 = ebx; 6631 Register tmp3 = edi; 6632 6633 // Check that both operands are heap objects. 6634 __ mov(tmp1, left); 6635 STATIC_ASSERT(kSmiTag == 0); 6636 __ and_(tmp1, right); 6637 __ JumpIfSmi(tmp1, &miss); 6638 6639 // Check that both operands are strings. This leaves the instance 6640 // types loaded in tmp1 and tmp2. 6641 __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); 6642 __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); 6643 __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); 6644 __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); 6645 __ mov(tmp3, tmp1); 6646 STATIC_ASSERT(kNotStringTag != 0); 6647 __ or_(tmp3, tmp2); 6648 __ test(tmp3, Immediate(kIsNotStringMask)); 6649 __ j(not_zero, &miss); 6650 6651 // Fast check for identical strings. 6652 Label not_same; 6653 __ cmp(left, right); 6654 __ j(not_equal, ¬_same, Label::kNear); 6655 STATIC_ASSERT(EQUAL == 0); 6656 STATIC_ASSERT(kSmiTag == 0); 6657 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 6658 __ ret(0); 6659 6660 // Handle not identical strings. 6661 __ bind(¬_same); 6662 6663 // Check that both strings are internalized. If they are, we're done 6664 // because we already know they are not identical. But in the case of 6665 // non-equality compare, we still need to determine the order. We 6666 // also know they are both strings. 6667 if (equality) { 6668 Label do_compare; 6669 STATIC_ASSERT(kInternalizedTag == 0); 6670 __ or_(tmp1, tmp2); 6671 __ test(tmp1, Immediate(kIsNotInternalizedMask)); 6672 __ j(not_zero, &do_compare, Label::kNear); 6673 // Make sure eax is non-zero. At this point input operands are 6674 // guaranteed to be non-zero. 6675 ASSERT(right.is(eax)); 6676 __ ret(0); 6677 __ bind(&do_compare); 6678 } 6679 6680 // Check that both strings are sequential ASCII. 6681 Label runtime; 6682 __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime); 6683 6684 // Compare flat ASCII strings. Returns when done. 6685 if (equality) { 6686 StringCompareStub::GenerateFlatAsciiStringEquals( 6687 masm, left, right, tmp1, tmp2); 6688 } else { 6689 StringCompareStub::GenerateCompareFlatAsciiStrings( 6690 masm, left, right, tmp1, tmp2, tmp3); 6691 } 6692 6693 // Handle more complex cases in runtime. 6694 __ bind(&runtime); 6695 __ pop(tmp1); // Return address. 6696 __ push(left); 6697 __ push(right); 6698 __ push(tmp1); 6699 if (equality) { 6700 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); 6701 } else { 6702 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 6703 } 6704 6705 __ bind(&miss); 6706 GenerateMiss(masm); 6707 } 6708 6709 6710 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { 6711 ASSERT(state_ == CompareIC::OBJECT); 6712 Label miss; 6713 __ mov(ecx, edx); 6714 __ and_(ecx, eax); 6715 __ JumpIfSmi(ecx, &miss, Label::kNear); 6716 6717 __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx); 6718 __ j(not_equal, &miss, Label::kNear); 6719 __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx); 6720 __ j(not_equal, &miss, Label::kNear); 6721 6722 ASSERT(GetCondition() == equal); 6723 __ sub(eax, edx); 6724 __ ret(0); 6725 6726 __ bind(&miss); 6727 GenerateMiss(masm); 6728 } 6729 6730 6731 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { 6732 Label miss; 6733 __ mov(ecx, edx); 6734 __ and_(ecx, eax); 6735 __ JumpIfSmi(ecx, &miss, Label::kNear); 6736 6737 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); 6738 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); 6739 __ cmp(ecx, known_map_); 6740 __ j(not_equal, &miss, Label::kNear); 6741 __ cmp(ebx, known_map_); 6742 __ j(not_equal, &miss, Label::kNear); 6743 6744 __ sub(eax, edx); 6745 __ ret(0); 6746 6747 __ bind(&miss); 6748 GenerateMiss(masm); 6749 } 6750 6751 6752 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 6753 { 6754 // Call the runtime system in a fresh internal frame. 6755 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), 6756 masm->isolate()); 6757 FrameScope scope(masm, StackFrame::INTERNAL); 6758 __ push(edx); // Preserve edx and eax. 6759 __ push(eax); 6760 __ push(edx); // And also use them as the arguments. 6761 __ push(eax); 6762 __ push(Immediate(Smi::FromInt(op_))); 6763 __ CallExternalReference(miss, 3); 6764 // Compute the entry point of the rewritten stub. 6765 __ lea(edi, FieldOperand(eax, Code::kHeaderSize)); 6766 __ pop(eax); 6767 __ pop(edx); 6768 } 6769 6770 // Do a tail call to the rewritten stub. 6771 __ jmp(edi); 6772 } 6773 6774 6775 // Helper function used to check that the dictionary doesn't contain 6776 // the property. This function may return false negatives, so miss_label 6777 // must always call a backup property check that is complete. 6778 // This function is safe to call if the receiver has fast properties. 6779 // Name must be a unique name and receiver must be a heap object. 6780 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, 6781 Label* miss, 6782 Label* done, 6783 Register properties, 6784 Handle<Name> name, 6785 Register r0) { 6786 ASSERT(name->IsUniqueName()); 6787 6788 // If names of slots in range from 1 to kProbes - 1 for the hash value are 6789 // not equal to the name and kProbes-th slot is not used (its name is the 6790 // undefined value), it guarantees the hash table doesn't contain the 6791 // property. It's true even if some slots represent deleted properties 6792 // (their names are the hole value). 6793 for (int i = 0; i < kInlinedProbes; i++) { 6794 // Compute the masked index: (hash + i + i * i) & mask. 6795 Register index = r0; 6796 // Capacity is smi 2^n. 6797 __ mov(index, FieldOperand(properties, kCapacityOffset)); 6798 __ dec(index); 6799 __ and_(index, 6800 Immediate(Smi::FromInt(name->Hash() + 6801 NameDictionary::GetProbeOffset(i)))); 6802 6803 // Scale the index by multiplying by the entry size. 6804 ASSERT(NameDictionary::kEntrySize == 3); 6805 __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. 6806 Register entity_name = r0; 6807 // Having undefined at this place means the name is not contained. 6808 ASSERT_EQ(kSmiTagSize, 1); 6809 __ mov(entity_name, Operand(properties, index, times_half_pointer_size, 6810 kElementsStartOffset - kHeapObjectTag)); 6811 __ cmp(entity_name, masm->isolate()->factory()->undefined_value()); 6812 __ j(equal, done); 6813 6814 // Stop if found the property. 6815 __ cmp(entity_name, Handle<Name>(name)); 6816 __ j(equal, miss); 6817 6818 Label good; 6819 // Check for the hole and skip. 6820 __ cmp(entity_name, masm->isolate()->factory()->the_hole_value()); 6821 __ j(equal, &good, Label::kNear); 6822 6823 // Check if the entry name is not a unique name. 6824 __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset)); 6825 __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset), 6826 miss); 6827 __ bind(&good); 6828 } 6829 6830 NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP); 6831 __ push(Immediate(Handle<Object>(name))); 6832 __ push(Immediate(name->Hash())); 6833 __ CallStub(&stub); 6834 __ test(r0, r0); 6835 __ j(not_zero, miss); 6836 __ jmp(done); 6837 } 6838 6839 6840 // Probe the name dictionary in the |elements| register. Jump to the 6841 // |done| label if a property with the given name is found leaving the 6842 // index into the dictionary in |r0|. Jump to the |miss| label 6843 // otherwise. 6844 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, 6845 Label* miss, 6846 Label* done, 6847 Register elements, 6848 Register name, 6849 Register r0, 6850 Register r1) { 6851 ASSERT(!elements.is(r0)); 6852 ASSERT(!elements.is(r1)); 6853 ASSERT(!name.is(r0)); 6854 ASSERT(!name.is(r1)); 6855 6856 __ AssertName(name); 6857 6858 __ mov(r1, FieldOperand(elements, kCapacityOffset)); 6859 __ shr(r1, kSmiTagSize); // convert smi to int 6860 __ dec(r1); 6861 6862 // Generate an unrolled loop that performs a few probes before 6863 // giving up. Measurements done on Gmail indicate that 2 probes 6864 // cover ~93% of loads from dictionaries. 6865 for (int i = 0; i < kInlinedProbes; i++) { 6866 // Compute the masked index: (hash + i + i * i) & mask. 6867 __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); 6868 __ shr(r0, Name::kHashShift); 6869 if (i > 0) { 6870 __ add(r0, Immediate(NameDictionary::GetProbeOffset(i))); 6871 } 6872 __ and_(r0, r1); 6873 6874 // Scale the index by multiplying by the entry size. 6875 ASSERT(NameDictionary::kEntrySize == 3); 6876 __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3 6877 6878 // Check if the key is identical to the name. 6879 __ cmp(name, Operand(elements, 6880 r0, 6881 times_4, 6882 kElementsStartOffset - kHeapObjectTag)); 6883 __ j(equal, done); 6884 } 6885 6886 NameDictionaryLookupStub stub(elements, r1, r0, POSITIVE_LOOKUP); 6887 __ push(name); 6888 __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); 6889 __ shr(r0, Name::kHashShift); 6890 __ push(r0); 6891 __ CallStub(&stub); 6892 6893 __ test(r1, r1); 6894 __ j(zero, miss); 6895 __ jmp(done); 6896 } 6897 6898 6899 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { 6900 // This stub overrides SometimesSetsUpAFrame() to return false. That means 6901 // we cannot call anything that could cause a GC from this stub. 6902 // Stack frame on entry: 6903 // esp[0 * kPointerSize]: return address. 6904 // esp[1 * kPointerSize]: key's hash. 6905 // esp[2 * kPointerSize]: key. 6906 // Registers: 6907 // dictionary_: NameDictionary to probe. 6908 // result_: used as scratch. 6909 // index_: will hold an index of entry if lookup is successful. 6910 // might alias with result_. 6911 // Returns: 6912 // result_ is zero if lookup failed, non zero otherwise. 6913 6914 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; 6915 6916 Register scratch = result_; 6917 6918 __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset)); 6919 __ dec(scratch); 6920 __ SmiUntag(scratch); 6921 __ push(scratch); 6922 6923 // If names of slots in range from 1 to kProbes - 1 for the hash value are 6924 // not equal to the name and kProbes-th slot is not used (its name is the 6925 // undefined value), it guarantees the hash table doesn't contain the 6926 // property. It's true even if some slots represent deleted properties 6927 // (their names are the null value). 6928 for (int i = kInlinedProbes; i < kTotalProbes; i++) { 6929 // Compute the masked index: (hash + i + i * i) & mask. 6930 __ mov(scratch, Operand(esp, 2 * kPointerSize)); 6931 if (i > 0) { 6932 __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i))); 6933 } 6934 __ and_(scratch, Operand(esp, 0)); 6935 6936 // Scale the index by multiplying by the entry size. 6937 ASSERT(NameDictionary::kEntrySize == 3); 6938 __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. 6939 6940 // Having undefined at this place means the name is not contained. 6941 ASSERT_EQ(kSmiTagSize, 1); 6942 __ mov(scratch, Operand(dictionary_, 6943 index_, 6944 times_pointer_size, 6945 kElementsStartOffset - kHeapObjectTag)); 6946 __ cmp(scratch, masm->isolate()->factory()->undefined_value()); 6947 __ j(equal, ¬_in_dictionary); 6948 6949 // Stop if found the property. 6950 __ cmp(scratch, Operand(esp, 3 * kPointerSize)); 6951 __ j(equal, &in_dictionary); 6952 6953 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { 6954 // If we hit a key that is not a unique name during negative 6955 // lookup we have to bailout as this key might be equal to the 6956 // key we are looking for. 6957 6958 // Check if the entry name is not a unique name. 6959 __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); 6960 __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset), 6961 &maybe_in_dictionary); 6962 } 6963 } 6964 6965 __ bind(&maybe_in_dictionary); 6966 // If we are doing negative lookup then probing failure should be 6967 // treated as a lookup success. For positive lookup probing failure 6968 // should be treated as lookup failure. 6969 if (mode_ == POSITIVE_LOOKUP) { 6970 __ mov(result_, Immediate(0)); 6971 __ Drop(1); 6972 __ ret(2 * kPointerSize); 6973 } 6974 6975 __ bind(&in_dictionary); 6976 __ mov(result_, Immediate(1)); 6977 __ Drop(1); 6978 __ ret(2 * kPointerSize); 6979 6980 __ bind(¬_in_dictionary); 6981 __ mov(result_, Immediate(0)); 6982 __ Drop(1); 6983 __ ret(2 * kPointerSize); 6984 } 6985 6986 6987 struct AheadOfTimeWriteBarrierStubList { 6988 Register object, value, address; 6989 RememberedSetAction action; 6990 }; 6991 6992 6993 #define REG(Name) { kRegister_ ## Name ## _Code } 6994 6995 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { 6996 // Used in RegExpExecStub. 6997 { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET }, 6998 // Used in CompileArrayPushCall. 6999 { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET }, 7000 { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET }, 7001 // Used in CompileStoreGlobal and CallFunctionStub. 7002 { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET }, 7003 // Used in StoreStubCompiler::CompileStoreField and 7004 // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. 7005 { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET }, 7006 // GenerateStoreField calls the stub with two different permutations of 7007 // registers. This is the second. 7008 { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET }, 7009 // StoreIC::GenerateNormal via GenerateDictionaryStore 7010 { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET }, 7011 // KeyedStoreIC::GenerateGeneric. 7012 { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET}, 7013 // KeyedStoreStubCompiler::GenerateStoreFastElement. 7014 { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET}, 7015 { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET}, 7016 // ElementsTransitionGenerator::GenerateMapChangeElementTransition 7017 // and ElementsTransitionGenerator::GenerateSmiToDouble 7018 // and ElementsTransitionGenerator::GenerateDoubleToObject 7019 { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET}, 7020 { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET}, 7021 // ElementsTransitionGenerator::GenerateDoubleToObject 7022 { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET}, 7023 { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET}, 7024 // StoreArrayLiteralElementStub::Generate 7025 { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET}, 7026 // FastNewClosureStub and StringAddStub::Generate 7027 { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET}, 7028 // StringAddStub::Generate 7029 { REG(ecx), REG(eax), REG(ebx), EMIT_REMEMBERED_SET}, 7030 // Null termination. 7031 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 7032 }; 7033 7034 #undef REG 7035 7036 bool RecordWriteStub::IsPregenerated() { 7037 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7038 !entry->object.is(no_reg); 7039 entry++) { 7040 if (object_.is(entry->object) && 7041 value_.is(entry->value) && 7042 address_.is(entry->address) && 7043 remembered_set_action_ == entry->action && 7044 save_fp_regs_mode_ == kDontSaveFPRegs) { 7045 return true; 7046 } 7047 } 7048 return false; 7049 } 7050 7051 7052 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( 7053 Isolate* isolate) { 7054 StoreBufferOverflowStub stub(kDontSaveFPRegs); 7055 stub.GetCode(isolate)->set_is_pregenerated(true); 7056 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { 7057 StoreBufferOverflowStub stub2(kSaveFPRegs); 7058 stub2.GetCode(isolate)->set_is_pregenerated(true); 7059 } 7060 } 7061 7062 7063 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { 7064 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7065 !entry->object.is(no_reg); 7066 entry++) { 7067 RecordWriteStub stub(entry->object, 7068 entry->value, 7069 entry->address, 7070 entry->action, 7071 kDontSaveFPRegs); 7072 stub.GetCode(isolate)->set_is_pregenerated(true); 7073 } 7074 } 7075 7076 7077 bool CodeStub::CanUseFPRegisters() { 7078 return CpuFeatures::IsSupported(SSE2); 7079 } 7080 7081 7082 // Takes the input in 3 registers: address_ value_ and object_. A pointer to 7083 // the value has just been written into the object, now this stub makes sure 7084 // we keep the GC informed. The word in the object where the value has been 7085 // written is in the address register. 7086 void RecordWriteStub::Generate(MacroAssembler* masm) { 7087 Label skip_to_incremental_noncompacting; 7088 Label skip_to_incremental_compacting; 7089 7090 // The first two instructions are generated with labels so as to get the 7091 // offset fixed up correctly by the bind(Label*) call. We patch it back and 7092 // forth between a compare instructions (a nop in this position) and the 7093 // real branch when we start and stop incremental heap marking. 7094 __ jmp(&skip_to_incremental_noncompacting, Label::kNear); 7095 __ jmp(&skip_to_incremental_compacting, Label::kFar); 7096 7097 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { 7098 __ RememberedSetHelper(object_, 7099 address_, 7100 value_, 7101 save_fp_regs_mode_, 7102 MacroAssembler::kReturnAtEnd); 7103 } else { 7104 __ ret(0); 7105 } 7106 7107 __ bind(&skip_to_incremental_noncompacting); 7108 GenerateIncremental(masm, INCREMENTAL); 7109 7110 __ bind(&skip_to_incremental_compacting); 7111 GenerateIncremental(masm, INCREMENTAL_COMPACTION); 7112 7113 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. 7114 // Will be checked in IncrementalMarking::ActivateGeneratedStub. 7115 masm->set_byte_at(0, kTwoByteNopInstruction); 7116 masm->set_byte_at(2, kFiveByteNopInstruction); 7117 } 7118 7119 7120 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { 7121 regs_.Save(masm); 7122 7123 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { 7124 Label dont_need_remembered_set; 7125 7126 __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); 7127 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. 7128 regs_.scratch0(), 7129 &dont_need_remembered_set); 7130 7131 __ CheckPageFlag(regs_.object(), 7132 regs_.scratch0(), 7133 1 << MemoryChunk::SCAN_ON_SCAVENGE, 7134 not_zero, 7135 &dont_need_remembered_set); 7136 7137 // First notify the incremental marker if necessary, then update the 7138 // remembered set. 7139 CheckNeedsToInformIncrementalMarker( 7140 masm, 7141 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, 7142 mode); 7143 InformIncrementalMarker(masm, mode); 7144 regs_.Restore(masm); 7145 __ RememberedSetHelper(object_, 7146 address_, 7147 value_, 7148 save_fp_regs_mode_, 7149 MacroAssembler::kReturnAtEnd); 7150 7151 __ bind(&dont_need_remembered_set); 7152 } 7153 7154 CheckNeedsToInformIncrementalMarker( 7155 masm, 7156 kReturnOnNoNeedToInformIncrementalMarker, 7157 mode); 7158 InformIncrementalMarker(masm, mode); 7159 regs_.Restore(masm); 7160 __ ret(0); 7161 } 7162 7163 7164 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { 7165 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); 7166 int argument_count = 3; 7167 __ PrepareCallCFunction(argument_count, regs_.scratch0()); 7168 __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); 7169 __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. 7170 __ mov(Operand(esp, 2 * kPointerSize), 7171 Immediate(ExternalReference::isolate_address(masm->isolate()))); 7172 7173 AllowExternalCallThatCantCauseGC scope(masm); 7174 if (mode == INCREMENTAL_COMPACTION) { 7175 __ CallCFunction( 7176 ExternalReference::incremental_evacuation_record_write_function( 7177 masm->isolate()), 7178 argument_count); 7179 } else { 7180 ASSERT(mode == INCREMENTAL); 7181 __ CallCFunction( 7182 ExternalReference::incremental_marking_record_write_function( 7183 masm->isolate()), 7184 argument_count); 7185 } 7186 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); 7187 } 7188 7189 7190 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( 7191 MacroAssembler* masm, 7192 OnNoNeedToInformIncrementalMarker on_no_need, 7193 Mode mode) { 7194 Label object_is_black, need_incremental, need_incremental_pop_object; 7195 7196 __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask)); 7197 __ and_(regs_.scratch0(), regs_.object()); 7198 __ mov(regs_.scratch1(), 7199 Operand(regs_.scratch0(), 7200 MemoryChunk::kWriteBarrierCounterOffset)); 7201 __ sub(regs_.scratch1(), Immediate(1)); 7202 __ mov(Operand(regs_.scratch0(), 7203 MemoryChunk::kWriteBarrierCounterOffset), 7204 regs_.scratch1()); 7205 __ j(negative, &need_incremental); 7206 7207 // Let's look at the color of the object: If it is not black we don't have 7208 // to inform the incremental marker. 7209 __ JumpIfBlack(regs_.object(), 7210 regs_.scratch0(), 7211 regs_.scratch1(), 7212 &object_is_black, 7213 Label::kNear); 7214 7215 regs_.Restore(masm); 7216 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 7217 __ RememberedSetHelper(object_, 7218 address_, 7219 value_, 7220 save_fp_regs_mode_, 7221 MacroAssembler::kReturnAtEnd); 7222 } else { 7223 __ ret(0); 7224 } 7225 7226 __ bind(&object_is_black); 7227 7228 // Get the value from the slot. 7229 __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); 7230 7231 if (mode == INCREMENTAL_COMPACTION) { 7232 Label ensure_not_white; 7233 7234 __ CheckPageFlag(regs_.scratch0(), // Contains value. 7235 regs_.scratch1(), // Scratch. 7236 MemoryChunk::kEvacuationCandidateMask, 7237 zero, 7238 &ensure_not_white, 7239 Label::kNear); 7240 7241 __ CheckPageFlag(regs_.object(), 7242 regs_.scratch1(), // Scratch. 7243 MemoryChunk::kSkipEvacuationSlotsRecordingMask, 7244 not_zero, 7245 &ensure_not_white, 7246 Label::kNear); 7247 7248 __ jmp(&need_incremental); 7249 7250 __ bind(&ensure_not_white); 7251 } 7252 7253 // We need an extra register for this, so we push the object register 7254 // temporarily. 7255 __ push(regs_.object()); 7256 __ EnsureNotWhite(regs_.scratch0(), // The value. 7257 regs_.scratch1(), // Scratch. 7258 regs_.object(), // Scratch. 7259 &need_incremental_pop_object, 7260 Label::kNear); 7261 __ pop(regs_.object()); 7262 7263 regs_.Restore(masm); 7264 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 7265 __ RememberedSetHelper(object_, 7266 address_, 7267 value_, 7268 save_fp_regs_mode_, 7269 MacroAssembler::kReturnAtEnd); 7270 } else { 7271 __ ret(0); 7272 } 7273 7274 __ bind(&need_incremental_pop_object); 7275 __ pop(regs_.object()); 7276 7277 __ bind(&need_incremental); 7278 7279 // Fall through when we need to inform the incremental marker. 7280 } 7281 7282 7283 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { 7284 // ----------- S t a t e ------------- 7285 // -- eax : element value to store 7286 // -- ecx : element index as smi 7287 // -- esp[0] : return address 7288 // -- esp[4] : array literal index in function 7289 // -- esp[8] : array literal 7290 // clobbers ebx, edx, edi 7291 // ----------------------------------- 7292 7293 Label element_done; 7294 Label double_elements; 7295 Label smi_element; 7296 Label slow_elements; 7297 Label slow_elements_from_double; 7298 Label fast_elements; 7299 7300 // Get array literal index, array literal and its map. 7301 __ mov(edx, Operand(esp, 1 * kPointerSize)); 7302 __ mov(ebx, Operand(esp, 2 * kPointerSize)); 7303 __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset)); 7304 7305 __ CheckFastElements(edi, &double_elements); 7306 7307 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements 7308 __ JumpIfSmi(eax, &smi_element); 7309 __ CheckFastSmiElements(edi, &fast_elements, Label::kNear); 7310 7311 // Store into the array literal requires a elements transition. Call into 7312 // the runtime. 7313 7314 __ bind(&slow_elements); 7315 __ pop(edi); // Pop return address and remember to put back later for tail 7316 // call. 7317 __ push(ebx); 7318 __ push(ecx); 7319 __ push(eax); 7320 __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); 7321 __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset)); 7322 __ push(edx); 7323 __ push(edi); // Return return address so that tail call returns to right 7324 // place. 7325 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); 7326 7327 __ bind(&slow_elements_from_double); 7328 __ pop(edx); 7329 __ jmp(&slow_elements); 7330 7331 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. 7332 __ bind(&fast_elements); 7333 __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); 7334 __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size, 7335 FixedArrayBase::kHeaderSize)); 7336 __ mov(Operand(ecx, 0), eax); 7337 // Update the write barrier for the array store. 7338 __ RecordWrite(ebx, ecx, eax, 7339 kDontSaveFPRegs, 7340 EMIT_REMEMBERED_SET, 7341 OMIT_SMI_CHECK); 7342 __ ret(0); 7343 7344 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, 7345 // and value is Smi. 7346 __ bind(&smi_element); 7347 __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); 7348 __ mov(FieldOperand(ebx, ecx, times_half_pointer_size, 7349 FixedArrayBase::kHeaderSize), eax); 7350 __ ret(0); 7351 7352 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. 7353 __ bind(&double_elements); 7354 7355 __ push(edx); 7356 __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset)); 7357 __ StoreNumberToDoubleElements(eax, 7358 edx, 7359 ecx, 7360 edi, 7361 xmm0, 7362 &slow_elements_from_double, 7363 false); 7364 __ pop(edx); 7365 __ ret(0); 7366 } 7367 7368 7369 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { 7370 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); 7371 __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 7372 int parameter_count_offset = 7373 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; 7374 __ mov(ebx, MemOperand(ebp, parameter_count_offset)); 7375 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); 7376 __ pop(ecx); 7377 int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE 7378 ? kPointerSize 7379 : 0; 7380 __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset)); 7381 __ jmp(ecx); // Return to IC Miss stub, continuation still on stack. 7382 } 7383 7384 7385 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { 7386 if (masm->isolate()->function_entry_hook() != NULL) { 7387 // It's always safe to call the entry hook stub, as the hook itself 7388 // is not allowed to call back to V8. 7389 AllowStubCallsScope allow_stub_calls(masm, true); 7390 7391 ProfileEntryHookStub stub; 7392 masm->CallStub(&stub); 7393 } 7394 } 7395 7396 7397 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { 7398 // Save volatile registers. 7399 const int kNumSavedRegisters = 3; 7400 __ push(eax); 7401 __ push(ecx); 7402 __ push(edx); 7403 7404 // Calculate and push the original stack pointer. 7405 __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize)); 7406 __ push(eax); 7407 7408 // Retrieve our return address and use it to calculate the calling 7409 // function's address. 7410 __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize)); 7411 __ sub(eax, Immediate(Assembler::kCallInstructionLength)); 7412 __ push(eax); 7413 7414 // Call the entry hook. 7415 ASSERT(masm->isolate()->function_entry_hook() != NULL); 7416 __ call(FUNCTION_ADDR(masm->isolate()->function_entry_hook()), 7417 RelocInfo::RUNTIME_ENTRY); 7418 __ add(esp, Immediate(2 * kPointerSize)); 7419 7420 // Restore ecx. 7421 __ pop(edx); 7422 __ pop(ecx); 7423 __ pop(eax); 7424 7425 __ ret(0); 7426 } 7427 7428 7429 template<class T> 7430 static void CreateArrayDispatch(MacroAssembler* masm) { 7431 int last_index = GetSequenceIndexFromFastElementsKind( 7432 TERMINAL_FAST_ELEMENTS_KIND); 7433 for (int i = 0; i <= last_index; ++i) { 7434 Label next; 7435 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); 7436 __ cmp(edx, kind); 7437 __ j(not_equal, &next); 7438 T stub(kind); 7439 __ TailCallStub(&stub); 7440 __ bind(&next); 7441 } 7442 7443 // If we reached this point there is a problem. 7444 __ Abort(kUnexpectedElementsKindInArrayConstructor); 7445 } 7446 7447 7448 static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { 7449 // ebx - type info cell 7450 // edx - kind 7451 // eax - number of arguments 7452 // edi - constructor? 7453 // esp[0] - return address 7454 // esp[4] - last argument 7455 ASSERT(FAST_SMI_ELEMENTS == 0); 7456 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); 7457 ASSERT(FAST_ELEMENTS == 2); 7458 ASSERT(FAST_HOLEY_ELEMENTS == 3); 7459 ASSERT(FAST_DOUBLE_ELEMENTS == 4); 7460 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); 7461 7462 Handle<Object> undefined_sentinel( 7463 masm->isolate()->heap()->undefined_value(), 7464 masm->isolate()); 7465 7466 // is the low bit set? If so, we are holey and that is good. 7467 __ test_b(edx, 1); 7468 Label normal_sequence; 7469 __ j(not_zero, &normal_sequence); 7470 7471 // look at the first argument 7472 __ mov(ecx, Operand(esp, kPointerSize)); 7473 __ test(ecx, ecx); 7474 __ j(zero, &normal_sequence); 7475 7476 // We are going to create a holey array, but our kind is non-holey. 7477 // Fix kind and retry (only if we have an allocation site in the cell). 7478 __ inc(edx); 7479 __ cmp(ebx, Immediate(undefined_sentinel)); 7480 __ j(equal, &normal_sequence); 7481 __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset)); 7482 Handle<Map> allocation_site_map( 7483 masm->isolate()->heap()->allocation_site_map(), 7484 masm->isolate()); 7485 __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map)); 7486 __ j(not_equal, &normal_sequence); 7487 7488 // Save the resulting elements kind in type info 7489 __ SmiTag(edx); 7490 __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx); 7491 __ SmiUntag(edx); 7492 7493 __ bind(&normal_sequence); 7494 int last_index = GetSequenceIndexFromFastElementsKind( 7495 TERMINAL_FAST_ELEMENTS_KIND); 7496 for (int i = 0; i <= last_index; ++i) { 7497 Label next; 7498 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); 7499 __ cmp(edx, kind); 7500 __ j(not_equal, &next); 7501 ArraySingleArgumentConstructorStub stub(kind); 7502 __ TailCallStub(&stub); 7503 __ bind(&next); 7504 } 7505 7506 // If we reached this point there is a problem. 7507 __ Abort(kUnexpectedElementsKindInArrayConstructor); 7508 } 7509 7510 7511 template<class T> 7512 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { 7513 int to_index = GetSequenceIndexFromFastElementsKind( 7514 TERMINAL_FAST_ELEMENTS_KIND); 7515 for (int i = 0; i <= to_index; ++i) { 7516 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); 7517 T stub(kind); 7518 stub.GetCode(isolate)->set_is_pregenerated(true); 7519 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { 7520 T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES); 7521 stub1.GetCode(isolate)->set_is_pregenerated(true); 7522 } 7523 } 7524 } 7525 7526 7527 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { 7528 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( 7529 isolate); 7530 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( 7531 isolate); 7532 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( 7533 isolate); 7534 } 7535 7536 7537 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( 7538 Isolate* isolate) { 7539 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; 7540 for (int i = 0; i < 2; i++) { 7541 // For internal arrays we only need a few things 7542 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); 7543 stubh1.GetCode(isolate)->set_is_pregenerated(true); 7544 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); 7545 stubh2.GetCode(isolate)->set_is_pregenerated(true); 7546 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); 7547 stubh3.GetCode(isolate)->set_is_pregenerated(true); 7548 } 7549 } 7550 7551 7552 void ArrayConstructorStub::Generate(MacroAssembler* masm) { 7553 // ----------- S t a t e ------------- 7554 // -- eax : argc (only if argument_count_ == ANY) 7555 // -- ebx : type info cell 7556 // -- edi : constructor 7557 // -- esp[0] : return address 7558 // -- esp[4] : last argument 7559 // ----------------------------------- 7560 Handle<Object> undefined_sentinel( 7561 masm->isolate()->heap()->undefined_value(), 7562 masm->isolate()); 7563 7564 if (FLAG_debug_code) { 7565 // The array construct code is only set for the global and natives 7566 // builtin Array functions which always have maps. 7567 7568 // Initial map for the builtin Array function should be a map. 7569 __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); 7570 // Will both indicate a NULL and a Smi. 7571 __ test(ecx, Immediate(kSmiTagMask)); 7572 __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction); 7573 __ CmpObjectType(ecx, MAP_TYPE, ecx); 7574 __ Assert(equal, kUnexpectedInitialMapForArrayFunction); 7575 7576 // We should either have undefined in ebx or a valid cell 7577 Label okay_here; 7578 Handle<Map> cell_map = masm->isolate()->factory()->cell_map(); 7579 __ cmp(ebx, Immediate(undefined_sentinel)); 7580 __ j(equal, &okay_here); 7581 __ cmp(FieldOperand(ebx, 0), Immediate(cell_map)); 7582 __ Assert(equal, kExpectedPropertyCellInRegisterEbx); 7583 __ bind(&okay_here); 7584 } 7585 7586 Label no_info, switch_ready; 7587 // Get the elements kind and case on that. 7588 __ cmp(ebx, Immediate(undefined_sentinel)); 7589 __ j(equal, &no_info); 7590 __ mov(edx, FieldOperand(ebx, Cell::kValueOffset)); 7591 7592 // The type cell may have undefined in its value. 7593 __ cmp(edx, Immediate(undefined_sentinel)); 7594 __ j(equal, &no_info); 7595 7596 // The type cell has either an AllocationSite or a JSFunction 7597 __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>( 7598 masm->isolate()->heap()->allocation_site_map()))); 7599 __ j(not_equal, &no_info); 7600 7601 __ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset)); 7602 __ SmiUntag(edx); 7603 __ jmp(&switch_ready); 7604 __ bind(&no_info); 7605 __ mov(edx, Immediate(GetInitialFastElementsKind())); 7606 __ bind(&switch_ready); 7607 7608 if (argument_count_ == ANY) { 7609 Label not_zero_case, not_one_case; 7610 __ test(eax, eax); 7611 __ j(not_zero, ¬_zero_case); 7612 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); 7613 7614 __ bind(¬_zero_case); 7615 __ cmp(eax, 1); 7616 __ j(greater, ¬_one_case); 7617 CreateArrayDispatchOneArgument(masm); 7618 7619 __ bind(¬_one_case); 7620 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); 7621 } else if (argument_count_ == NONE) { 7622 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); 7623 } else if (argument_count_ == ONE) { 7624 CreateArrayDispatchOneArgument(masm); 7625 } else if (argument_count_ == MORE_THAN_ONE) { 7626 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); 7627 } else { 7628 UNREACHABLE(); 7629 } 7630 } 7631 7632 7633 void InternalArrayConstructorStub::GenerateCase( 7634 MacroAssembler* masm, ElementsKind kind) { 7635 Label not_zero_case, not_one_case; 7636 Label normal_sequence; 7637 7638 __ test(eax, eax); 7639 __ j(not_zero, ¬_zero_case); 7640 InternalArrayNoArgumentConstructorStub stub0(kind); 7641 __ TailCallStub(&stub0); 7642 7643 __ bind(¬_zero_case); 7644 __ cmp(eax, 1); 7645 __ j(greater, ¬_one_case); 7646 7647 if (IsFastPackedElementsKind(kind)) { 7648 // We might need to create a holey array 7649 // look at the first argument 7650 __ mov(ecx, Operand(esp, kPointerSize)); 7651 __ test(ecx, ecx); 7652 __ j(zero, &normal_sequence); 7653 7654 InternalArraySingleArgumentConstructorStub 7655 stub1_holey(GetHoleyElementsKind(kind)); 7656 __ TailCallStub(&stub1_holey); 7657 } 7658 7659 __ bind(&normal_sequence); 7660 InternalArraySingleArgumentConstructorStub stub1(kind); 7661 __ TailCallStub(&stub1); 7662 7663 __ bind(¬_one_case); 7664 InternalArrayNArgumentsConstructorStub stubN(kind); 7665 __ TailCallStub(&stubN); 7666 } 7667 7668 7669 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { 7670 // ----------- S t a t e ------------- 7671 // -- eax : argc 7672 // -- ebx : type info cell 7673 // -- edi : constructor 7674 // -- esp[0] : return address 7675 // -- esp[4] : last argument 7676 // ----------------------------------- 7677 7678 if (FLAG_debug_code) { 7679 // The array construct code is only set for the global and natives 7680 // builtin Array functions which always have maps. 7681 7682 // Initial map for the builtin Array function should be a map. 7683 __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); 7684 // Will both indicate a NULL and a Smi. 7685 __ test(ecx, Immediate(kSmiTagMask)); 7686 __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction); 7687 __ CmpObjectType(ecx, MAP_TYPE, ecx); 7688 __ Assert(equal, kUnexpectedInitialMapForArrayFunction); 7689 } 7690 7691 // Figure out the right elements kind 7692 __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); 7693 7694 // Load the map's "bit field 2" into |result|. We only need the first byte, 7695 // but the following masking takes care of that anyway. 7696 __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset)); 7697 // Retrieve elements_kind from bit field 2. 7698 __ and_(ecx, Map::kElementsKindMask); 7699 __ shr(ecx, Map::kElementsKindShift); 7700 7701 if (FLAG_debug_code) { 7702 Label done; 7703 __ cmp(ecx, Immediate(FAST_ELEMENTS)); 7704 __ j(equal, &done); 7705 __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS)); 7706 __ Assert(equal, 7707 kInvalidElementsKindForInternalArrayOrInternalPackedArray); 7708 __ bind(&done); 7709 } 7710 7711 Label fast_elements_case; 7712 __ cmp(ecx, Immediate(FAST_ELEMENTS)); 7713 __ j(equal, &fast_elements_case); 7714 GenerateCase(masm, FAST_HOLEY_ELEMENTS); 7715 7716 __ bind(&fast_elements_case); 7717 GenerateCase(masm, FAST_ELEMENTS); 7718 } 7719 7720 7721 #undef __ 7722 7723 } } // namespace v8::internal 7724 7725 #endif // V8_TARGET_ARCH_IA32 7726