1 // Copyright 2011 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "v8.h" 29 30 #if defined(V8_TARGET_ARCH_IA32) 31 32 #include "code-stubs.h" 33 #include "bootstrapper.h" 34 #include "jsregexp.h" 35 #include "isolate.h" 36 #include "regexp-macro-assembler.h" 37 38 namespace v8 { 39 namespace internal { 40 41 #define __ ACCESS_MASM(masm) 42 43 void ToNumberStub::Generate(MacroAssembler* masm) { 44 // The ToNumber stub takes one argument in eax. 45 NearLabel check_heap_number, call_builtin; 46 __ test(eax, Immediate(kSmiTagMask)); 47 __ j(not_zero, &check_heap_number); 48 __ ret(0); 49 50 __ bind(&check_heap_number); 51 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 52 Factory* factory = masm->isolate()->factory(); 53 __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); 54 __ j(not_equal, &call_builtin); 55 __ ret(0); 56 57 __ bind(&call_builtin); 58 __ pop(ecx); // Pop return address. 59 __ push(eax); 60 __ push(ecx); // Push return address. 61 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); 62 } 63 64 65 void FastNewClosureStub::Generate(MacroAssembler* masm) { 66 // Create a new closure from the given function info in new 67 // space. Set the context to the current context in esi. 68 Label gc; 69 __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); 70 71 // Get the function info from the stack. 72 __ mov(edx, Operand(esp, 1 * kPointerSize)); 73 74 int map_index = strict_mode_ == kStrictMode 75 ? Context::STRICT_MODE_FUNCTION_MAP_INDEX 76 : Context::FUNCTION_MAP_INDEX; 77 78 // Compute the function map in the current global context and set that 79 // as the map of the allocated object. 80 __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); 81 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset)); 82 __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index))); 83 __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx); 84 85 // Initialize the rest of the function. We don't have to update the 86 // write barrier because the allocated object is in new space. 87 Factory* factory = masm->isolate()->factory(); 88 __ mov(ebx, Immediate(factory->empty_fixed_array())); 89 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); 90 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); 91 __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), 92 Immediate(factory->the_hole_value())); 93 __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); 94 __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); 95 __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); 96 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), 97 Immediate(factory->undefined_value())); 98 99 // Initialize the code pointer in the function to be the one 100 // found in the shared function info object. 101 __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); 102 __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); 103 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx); 104 105 // Return and remove the on-stack parameter. 106 __ ret(1 * kPointerSize); 107 108 // Create a new closure through the slower runtime call. 109 __ bind(&gc); 110 __ pop(ecx); // Temporarily remove return address. 111 __ pop(edx); 112 __ push(esi); 113 __ push(edx); 114 __ push(Immediate(factory->false_value())); 115 __ push(ecx); // Restore return address. 116 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 117 } 118 119 120 void FastNewContextStub::Generate(MacroAssembler* masm) { 121 // Try to allocate the context in new space. 122 Label gc; 123 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 124 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, 125 eax, ebx, ecx, &gc, TAG_OBJECT); 126 127 // Get the function from the stack. 128 __ mov(ecx, Operand(esp, 1 * kPointerSize)); 129 130 // Setup the object header. 131 Factory* factory = masm->isolate()->factory(); 132 __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->context_map()); 133 __ mov(FieldOperand(eax, Context::kLengthOffset), 134 Immediate(Smi::FromInt(length))); 135 136 // Setup the fixed slots. 137 __ Set(ebx, Immediate(0)); // Set to NULL. 138 __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx); 139 __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax); 140 __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx); 141 __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx); 142 143 // Copy the global object from the surrounding context. We go through the 144 // context in the function (ecx) to match the allocation behavior we have 145 // in the runtime system (see Heap::AllocateFunctionContext). 146 __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset)); 147 __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX))); 148 __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx); 149 150 // Initialize the rest of the slots to undefined. 151 __ mov(ebx, factory->undefined_value()); 152 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { 153 __ mov(Operand(eax, Context::SlotOffset(i)), ebx); 154 } 155 156 // Return and remove the on-stack parameter. 157 __ mov(esi, Operand(eax)); 158 __ ret(1 * kPointerSize); 159 160 // Need to collect. Call into runtime system. 161 __ bind(&gc); 162 __ TailCallRuntime(Runtime::kNewContext, 1, 1); 163 } 164 165 166 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { 167 // Stack layout on entry: 168 // 169 // [esp + kPointerSize]: constant elements. 170 // [esp + (2 * kPointerSize)]: literal index. 171 // [esp + (3 * kPointerSize)]: literals array. 172 173 // All sizes here are multiples of kPointerSize. 174 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; 175 int size = JSArray::kSize + elements_size; 176 177 // Load boilerplate object into ecx and check if we need to create a 178 // boilerplate. 179 Label slow_case; 180 __ mov(ecx, Operand(esp, 3 * kPointerSize)); 181 __ mov(eax, Operand(esp, 2 * kPointerSize)); 182 STATIC_ASSERT(kPointerSize == 4); 183 STATIC_ASSERT(kSmiTagSize == 1); 184 STATIC_ASSERT(kSmiTag == 0); 185 __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size, 186 FixedArray::kHeaderSize)); 187 Factory* factory = masm->isolate()->factory(); 188 __ cmp(ecx, factory->undefined_value()); 189 __ j(equal, &slow_case); 190 191 if (FLAG_debug_code) { 192 const char* message; 193 Handle<Map> expected_map; 194 if (mode_ == CLONE_ELEMENTS) { 195 message = "Expected (writable) fixed array"; 196 expected_map = factory->fixed_array_map(); 197 } else { 198 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); 199 message = "Expected copy-on-write fixed array"; 200 expected_map = factory->fixed_cow_array_map(); 201 } 202 __ push(ecx); 203 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); 204 __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map); 205 __ Assert(equal, message); 206 __ pop(ecx); 207 } 208 209 // Allocate both the JS array and the elements array in one big 210 // allocation. This avoids multiple limit checks. 211 __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT); 212 213 // Copy the JS array part. 214 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { 215 if ((i != JSArray::kElementsOffset) || (length_ == 0)) { 216 __ mov(ebx, FieldOperand(ecx, i)); 217 __ mov(FieldOperand(eax, i), ebx); 218 } 219 } 220 221 if (length_ > 0) { 222 // Get hold of the elements array of the boilerplate and setup the 223 // elements pointer in the resulting object. 224 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); 225 __ lea(edx, Operand(eax, JSArray::kSize)); 226 __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx); 227 228 // Copy the elements array. 229 for (int i = 0; i < elements_size; i += kPointerSize) { 230 __ mov(ebx, FieldOperand(ecx, i)); 231 __ mov(FieldOperand(edx, i), ebx); 232 } 233 } 234 235 // Return and remove the on-stack parameters. 236 __ ret(3 * kPointerSize); 237 238 __ bind(&slow_case); 239 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); 240 } 241 242 243 // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined). 244 void ToBooleanStub::Generate(MacroAssembler* masm) { 245 NearLabel false_result, true_result, not_string; 246 __ mov(eax, Operand(esp, 1 * kPointerSize)); 247 248 // 'null' => false. 249 Factory* factory = masm->isolate()->factory(); 250 __ cmp(eax, factory->null_value()); 251 __ j(equal, &false_result); 252 253 // Get the map and type of the heap object. 254 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); 255 __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset)); 256 257 // Undetectable => false. 258 __ test_b(FieldOperand(edx, Map::kBitFieldOffset), 259 1 << Map::kIsUndetectable); 260 __ j(not_zero, &false_result); 261 262 // JavaScript object => true. 263 __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE); 264 __ j(above_equal, &true_result); 265 266 // String value => false iff empty. 267 __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE); 268 __ j(above_equal, ¬_string); 269 STATIC_ASSERT(kSmiTag == 0); 270 __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0)); 271 __ j(zero, &false_result); 272 __ jmp(&true_result); 273 274 __ bind(¬_string); 275 // HeapNumber => false iff +0, -0, or NaN. 276 __ cmp(edx, factory->heap_number_map()); 277 __ j(not_equal, &true_result); 278 __ fldz(); 279 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); 280 __ FCmp(); 281 __ j(zero, &false_result); 282 // Fall through to |true_result|. 283 284 // Return 1/0 for true/false in eax. 285 __ bind(&true_result); 286 __ mov(eax, 1); 287 __ ret(1 * kPointerSize); 288 __ bind(&false_result); 289 __ mov(eax, 0); 290 __ ret(1 * kPointerSize); 291 } 292 293 294 class FloatingPointHelper : public AllStatic { 295 public: 296 297 enum ArgLocation { 298 ARGS_ON_STACK, 299 ARGS_IN_REGISTERS 300 }; 301 302 // Code pattern for loading a floating point value. Input value must 303 // be either a smi or a heap number object (fp value). Requirements: 304 // operand in register number. Returns operand as floating point number 305 // on FPU stack. 306 static void LoadFloatOperand(MacroAssembler* masm, Register number); 307 308 // Code pattern for loading floating point values. Input values must 309 // be either smi or heap number objects (fp values). Requirements: 310 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. 311 // Returns operands as floating point numbers on FPU stack. 312 static void LoadFloatOperands(MacroAssembler* masm, 313 Register scratch, 314 ArgLocation arg_location = ARGS_ON_STACK); 315 316 // Similar to LoadFloatOperand but assumes that both operands are smis. 317 // Expects operands in edx, eax. 318 static void LoadFloatSmis(MacroAssembler* masm, Register scratch); 319 320 // Test if operands are smi or number objects (fp). Requirements: 321 // operand_1 in eax, operand_2 in edx; falls through on float 322 // operands, jumps to the non_float label otherwise. 323 static void CheckFloatOperands(MacroAssembler* masm, 324 Label* non_float, 325 Register scratch); 326 327 // Checks that the two floating point numbers on top of the FPU stack 328 // have int32 values. 329 static void CheckFloatOperandsAreInt32(MacroAssembler* masm, 330 Label* non_int32); 331 332 // Takes the operands in edx and eax and loads them as integers in eax 333 // and ecx. 334 static void LoadAsIntegers(MacroAssembler* masm, 335 TypeInfo type_info, 336 bool use_sse3, 337 Label* operand_conversion_failure); 338 static void LoadNumbersAsIntegers(MacroAssembler* masm, 339 TypeInfo type_info, 340 bool use_sse3, 341 Label* operand_conversion_failure); 342 static void LoadUnknownsAsIntegers(MacroAssembler* masm, 343 bool use_sse3, 344 Label* operand_conversion_failure); 345 346 // Must only be called after LoadUnknownsAsIntegers. Assumes that the 347 // operands are pushed on the stack, and that their conversions to int32 348 // are in eax and ecx. Checks that the original numbers were in the int32 349 // range. 350 static void CheckLoadedIntegersWereInt32(MacroAssembler* masm, 351 bool use_sse3, 352 Label* not_int32); 353 354 // Assumes that operands are smis or heap numbers and loads them 355 // into xmm0 and xmm1. Operands are in edx and eax. 356 // Leaves operands unchanged. 357 static void LoadSSE2Operands(MacroAssembler* masm); 358 359 // Test if operands are numbers (smi or HeapNumber objects), and load 360 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if 361 // either operand is not a number. Operands are in edx and eax. 362 // Leaves operands unchanged. 363 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); 364 365 // Similar to LoadSSE2Operands but assumes that both operands are smis. 366 // Expects operands in edx, eax. 367 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); 368 369 // Checks that the two floating point numbers loaded into xmm0 and xmm1 370 // have int32 values. 371 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm, 372 Label* non_int32, 373 Register scratch); 374 }; 375 376 377 Handle<Code> GetTypeRecordingBinaryOpStub(int key, 378 TRBinaryOpIC::TypeInfo type_info, 379 TRBinaryOpIC::TypeInfo result_type_info) { 380 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); 381 return stub.GetCode(); 382 } 383 384 385 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 386 __ pop(ecx); // Save return address. 387 __ push(edx); 388 __ push(eax); 389 // Left and right arguments are now on top. 390 // Push this stub's key. Although the operation and the type info are 391 // encoded into the key, the encoding is opaque, so push them too. 392 __ push(Immediate(Smi::FromInt(MinorKey()))); 393 __ push(Immediate(Smi::FromInt(op_))); 394 __ push(Immediate(Smi::FromInt(operands_type_))); 395 396 __ push(ecx); // Push return address. 397 398 // Patch the caller to an appropriate specialized stub and return the 399 // operation result to the caller of the stub. 400 __ TailCallExternalReference( 401 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch), 402 masm->isolate()), 403 5, 404 1); 405 } 406 407 408 // Prepare for a type transition runtime call when the args are already on 409 // the stack, under the return address. 410 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs( 411 MacroAssembler* masm) { 412 __ pop(ecx); // Save return address. 413 // Left and right arguments are already on top of the stack. 414 // Push this stub's key. Although the operation and the type info are 415 // encoded into the key, the encoding is opaque, so push them too. 416 __ push(Immediate(Smi::FromInt(MinorKey()))); 417 __ push(Immediate(Smi::FromInt(op_))); 418 __ push(Immediate(Smi::FromInt(operands_type_))); 419 420 __ push(ecx); // Push return address. 421 422 // Patch the caller to an appropriate specialized stub and return the 423 // operation result to the caller of the stub. 424 __ TailCallExternalReference( 425 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch), 426 masm->isolate()), 427 5, 428 1); 429 } 430 431 432 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { 433 switch (operands_type_) { 434 case TRBinaryOpIC::UNINITIALIZED: 435 GenerateTypeTransition(masm); 436 break; 437 case TRBinaryOpIC::SMI: 438 GenerateSmiStub(masm); 439 break; 440 case TRBinaryOpIC::INT32: 441 GenerateInt32Stub(masm); 442 break; 443 case TRBinaryOpIC::HEAP_NUMBER: 444 GenerateHeapNumberStub(masm); 445 break; 446 case TRBinaryOpIC::ODDBALL: 447 GenerateOddballStub(masm); 448 break; 449 case TRBinaryOpIC::STRING: 450 GenerateStringStub(masm); 451 break; 452 case TRBinaryOpIC::GENERIC: 453 GenerateGeneric(masm); 454 break; 455 default: 456 UNREACHABLE(); 457 } 458 } 459 460 461 const char* TypeRecordingBinaryOpStub::GetName() { 462 if (name_ != NULL) return name_; 463 const int kMaxNameLength = 100; 464 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( 465 kMaxNameLength); 466 if (name_ == NULL) return "OOM"; 467 const char* op_name = Token::Name(op_); 468 const char* overwrite_name; 469 switch (mode_) { 470 case NO_OVERWRITE: overwrite_name = "Alloc"; break; 471 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; 472 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; 473 default: overwrite_name = "UnknownOverwrite"; break; 474 } 475 476 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), 477 "TypeRecordingBinaryOpStub_%s_%s_%s", 478 op_name, 479 overwrite_name, 480 TRBinaryOpIC::GetName(operands_type_)); 481 return name_; 482 } 483 484 485 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, 486 Label* slow, 487 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 488 // 1. Move arguments into edx, eax except for DIV and MOD, which need the 489 // dividend in eax and edx free for the division. Use eax, ebx for those. 490 Comment load_comment(masm, "-- Load arguments"); 491 Register left = edx; 492 Register right = eax; 493 if (op_ == Token::DIV || op_ == Token::MOD) { 494 left = eax; 495 right = ebx; 496 __ mov(ebx, eax); 497 __ mov(eax, edx); 498 } 499 500 501 // 2. Prepare the smi check of both operands by oring them together. 502 Comment smi_check_comment(masm, "-- Smi check arguments"); 503 Label not_smis; 504 Register combined = ecx; 505 ASSERT(!left.is(combined) && !right.is(combined)); 506 switch (op_) { 507 case Token::BIT_OR: 508 // Perform the operation into eax and smi check the result. Preserve 509 // eax in case the result is not a smi. 510 ASSERT(!left.is(ecx) && !right.is(ecx)); 511 __ mov(ecx, right); 512 __ or_(right, Operand(left)); // Bitwise or is commutative. 513 combined = right; 514 break; 515 516 case Token::BIT_XOR: 517 case Token::BIT_AND: 518 case Token::ADD: 519 case Token::SUB: 520 case Token::MUL: 521 case Token::DIV: 522 case Token::MOD: 523 __ mov(combined, right); 524 __ or_(combined, Operand(left)); 525 break; 526 527 case Token::SHL: 528 case Token::SAR: 529 case Token::SHR: 530 // Move the right operand into ecx for the shift operation, use eax 531 // for the smi check register. 532 ASSERT(!left.is(ecx) && !right.is(ecx)); 533 __ mov(ecx, right); 534 __ or_(right, Operand(left)); 535 combined = right; 536 break; 537 538 default: 539 break; 540 } 541 542 // 3. Perform the smi check of the operands. 543 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case. 544 __ test(combined, Immediate(kSmiTagMask)); 545 __ j(not_zero, ¬_smis, not_taken); 546 547 // 4. Operands are both smis, perform the operation leaving the result in 548 // eax and check the result if necessary. 549 Comment perform_smi(masm, "-- Perform smi operation"); 550 Label use_fp_on_smis; 551 switch (op_) { 552 case Token::BIT_OR: 553 // Nothing to do. 554 break; 555 556 case Token::BIT_XOR: 557 ASSERT(right.is(eax)); 558 __ xor_(right, Operand(left)); // Bitwise xor is commutative. 559 break; 560 561 case Token::BIT_AND: 562 ASSERT(right.is(eax)); 563 __ and_(right, Operand(left)); // Bitwise and is commutative. 564 break; 565 566 case Token::SHL: 567 // Remove tags from operands (but keep sign). 568 __ SmiUntag(left); 569 __ SmiUntag(ecx); 570 // Perform the operation. 571 __ shl_cl(left); 572 // Check that the *signed* result fits in a smi. 573 __ cmp(left, 0xc0000000); 574 __ j(sign, &use_fp_on_smis, not_taken); 575 // Tag the result and store it in register eax. 576 __ SmiTag(left); 577 __ mov(eax, left); 578 break; 579 580 case Token::SAR: 581 // Remove tags from operands (but keep sign). 582 __ SmiUntag(left); 583 __ SmiUntag(ecx); 584 // Perform the operation. 585 __ sar_cl(left); 586 // Tag the result and store it in register eax. 587 __ SmiTag(left); 588 __ mov(eax, left); 589 break; 590 591 case Token::SHR: 592 // Remove tags from operands (but keep sign). 593 __ SmiUntag(left); 594 __ SmiUntag(ecx); 595 // Perform the operation. 596 __ shr_cl(left); 597 // Check that the *unsigned* result fits in a smi. 598 // Neither of the two high-order bits can be set: 599 // - 0x80000000: high bit would be lost when smi tagging. 600 // - 0x40000000: this number would convert to negative when 601 // Smi tagging these two cases can only happen with shifts 602 // by 0 or 1 when handed a valid smi. 603 __ test(left, Immediate(0xc0000000)); 604 __ j(not_zero, slow, not_taken); 605 // Tag the result and store it in register eax. 606 __ SmiTag(left); 607 __ mov(eax, left); 608 break; 609 610 case Token::ADD: 611 ASSERT(right.is(eax)); 612 __ add(right, Operand(left)); // Addition is commutative. 613 __ j(overflow, &use_fp_on_smis, not_taken); 614 break; 615 616 case Token::SUB: 617 __ sub(left, Operand(right)); 618 __ j(overflow, &use_fp_on_smis, not_taken); 619 __ mov(eax, left); 620 break; 621 622 case Token::MUL: 623 // If the smi tag is 0 we can just leave the tag on one operand. 624 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case. 625 // We can't revert the multiplication if the result is not a smi 626 // so save the right operand. 627 __ mov(ebx, right); 628 // Remove tag from one of the operands (but keep sign). 629 __ SmiUntag(right); 630 // Do multiplication. 631 __ imul(right, Operand(left)); // Multiplication is commutative. 632 __ j(overflow, &use_fp_on_smis, not_taken); 633 // Check for negative zero result. Use combined = left | right. 634 __ NegativeZeroTest(right, combined, &use_fp_on_smis); 635 break; 636 637 case Token::DIV: 638 // We can't revert the division if the result is not a smi so 639 // save the left operand. 640 __ mov(edi, left); 641 // Check for 0 divisor. 642 __ test(right, Operand(right)); 643 __ j(zero, &use_fp_on_smis, not_taken); 644 // Sign extend left into edx:eax. 645 ASSERT(left.is(eax)); 646 __ cdq(); 647 // Divide edx:eax by right. 648 __ idiv(right); 649 // Check for the corner case of dividing the most negative smi by 650 // -1. We cannot use the overflow flag, since it is not set by idiv 651 // instruction. 652 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 653 __ cmp(eax, 0x40000000); 654 __ j(equal, &use_fp_on_smis); 655 // Check for negative zero result. Use combined = left | right. 656 __ NegativeZeroTest(eax, combined, &use_fp_on_smis); 657 // Check that the remainder is zero. 658 __ test(edx, Operand(edx)); 659 __ j(not_zero, &use_fp_on_smis); 660 // Tag the result and store it in register eax. 661 __ SmiTag(eax); 662 break; 663 664 case Token::MOD: 665 // Check for 0 divisor. 666 __ test(right, Operand(right)); 667 __ j(zero, ¬_smis, not_taken); 668 669 // Sign extend left into edx:eax. 670 ASSERT(left.is(eax)); 671 __ cdq(); 672 // Divide edx:eax by right. 673 __ idiv(right); 674 // Check for negative zero result. Use combined = left | right. 675 __ NegativeZeroTest(edx, combined, slow); 676 // Move remainder to register eax. 677 __ mov(eax, edx); 678 break; 679 680 default: 681 UNREACHABLE(); 682 } 683 684 // 5. Emit return of result in eax. Some operations have registers pushed. 685 switch (op_) { 686 case Token::ADD: 687 case Token::SUB: 688 case Token::MUL: 689 case Token::DIV: 690 __ ret(0); 691 break; 692 case Token::MOD: 693 case Token::BIT_OR: 694 case Token::BIT_AND: 695 case Token::BIT_XOR: 696 case Token::SAR: 697 case Token::SHL: 698 case Token::SHR: 699 __ ret(2 * kPointerSize); 700 break; 701 default: 702 UNREACHABLE(); 703 } 704 705 // 6. For some operations emit inline code to perform floating point 706 // operations on known smis (e.g., if the result of the operation 707 // overflowed the smi range). 708 if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) { 709 __ bind(&use_fp_on_smis); 710 switch (op_) { 711 // Undo the effects of some operations, and some register moves. 712 case Token::SHL: 713 // The arguments are saved on the stack, and only used from there. 714 break; 715 case Token::ADD: 716 // Revert right = right + left. 717 __ sub(right, Operand(left)); 718 break; 719 case Token::SUB: 720 // Revert left = left - right. 721 __ add(left, Operand(right)); 722 break; 723 case Token::MUL: 724 // Right was clobbered but a copy is in ebx. 725 __ mov(right, ebx); 726 break; 727 case Token::DIV: 728 // Left was clobbered but a copy is in edi. Right is in ebx for 729 // division. They should be in eax, ebx for jump to not_smi. 730 __ mov(eax, edi); 731 break; 732 default: 733 // No other operators jump to use_fp_on_smis. 734 break; 735 } 736 __ jmp(¬_smis); 737 } else { 738 ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS); 739 switch (op_) { 740 case Token::SHL: { 741 Comment perform_float(masm, "-- Perform float operation on smis"); 742 __ bind(&use_fp_on_smis); 743 // Result we want is in left == edx, so we can put the allocated heap 744 // number in eax. 745 __ AllocateHeapNumber(eax, ecx, ebx, slow); 746 // Store the result in the HeapNumber and return. 747 if (CpuFeatures::IsSupported(SSE2)) { 748 CpuFeatures::Scope use_sse2(SSE2); 749 __ cvtsi2sd(xmm0, Operand(left)); 750 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 751 } else { 752 // It's OK to overwrite the right argument on the stack because we 753 // are about to return. 754 __ mov(Operand(esp, 1 * kPointerSize), left); 755 __ fild_s(Operand(esp, 1 * kPointerSize)); 756 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 757 } 758 __ ret(2 * kPointerSize); 759 break; 760 } 761 762 case Token::ADD: 763 case Token::SUB: 764 case Token::MUL: 765 case Token::DIV: { 766 Comment perform_float(masm, "-- Perform float operation on smis"); 767 __ bind(&use_fp_on_smis); 768 // Restore arguments to edx, eax. 769 switch (op_) { 770 case Token::ADD: 771 // Revert right = right + left. 772 __ sub(right, Operand(left)); 773 break; 774 case Token::SUB: 775 // Revert left = left - right. 776 __ add(left, Operand(right)); 777 break; 778 case Token::MUL: 779 // Right was clobbered but a copy is in ebx. 780 __ mov(right, ebx); 781 break; 782 case Token::DIV: 783 // Left was clobbered but a copy is in edi. Right is in ebx for 784 // division. 785 __ mov(edx, edi); 786 __ mov(eax, right); 787 break; 788 default: UNREACHABLE(); 789 break; 790 } 791 __ AllocateHeapNumber(ecx, ebx, no_reg, slow); 792 if (CpuFeatures::IsSupported(SSE2)) { 793 CpuFeatures::Scope use_sse2(SSE2); 794 FloatingPointHelper::LoadSSE2Smis(masm, ebx); 795 switch (op_) { 796 case Token::ADD: __ addsd(xmm0, xmm1); break; 797 case Token::SUB: __ subsd(xmm0, xmm1); break; 798 case Token::MUL: __ mulsd(xmm0, xmm1); break; 799 case Token::DIV: __ divsd(xmm0, xmm1); break; 800 default: UNREACHABLE(); 801 } 802 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); 803 } else { // SSE2 not available, use FPU. 804 FloatingPointHelper::LoadFloatSmis(masm, ebx); 805 switch (op_) { 806 case Token::ADD: __ faddp(1); break; 807 case Token::SUB: __ fsubp(1); break; 808 case Token::MUL: __ fmulp(1); break; 809 case Token::DIV: __ fdivp(1); break; 810 default: UNREACHABLE(); 811 } 812 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset)); 813 } 814 __ mov(eax, ecx); 815 __ ret(0); 816 break; 817 } 818 819 default: 820 break; 821 } 822 } 823 824 // 7. Non-smi operands, fall out to the non-smi code with the operands in 825 // edx and eax. 826 Comment done_comment(masm, "-- Enter non-smi code"); 827 __ bind(¬_smis); 828 switch (op_) { 829 case Token::BIT_OR: 830 case Token::SHL: 831 case Token::SAR: 832 case Token::SHR: 833 // Right operand is saved in ecx and eax was destroyed by the smi 834 // check. 835 __ mov(eax, ecx); 836 break; 837 838 case Token::DIV: 839 case Token::MOD: 840 // Operands are in eax, ebx at this point. 841 __ mov(edx, eax); 842 __ mov(eax, ebx); 843 break; 844 845 default: 846 break; 847 } 848 } 849 850 851 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 852 Label call_runtime; 853 854 switch (op_) { 855 case Token::ADD: 856 case Token::SUB: 857 case Token::MUL: 858 case Token::DIV: 859 break; 860 case Token::MOD: 861 case Token::BIT_OR: 862 case Token::BIT_AND: 863 case Token::BIT_XOR: 864 case Token::SAR: 865 case Token::SHL: 866 case Token::SHR: 867 GenerateRegisterArgsPush(masm); 868 break; 869 default: 870 UNREACHABLE(); 871 } 872 873 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || 874 result_type_ == TRBinaryOpIC::SMI) { 875 GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS); 876 } else { 877 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 878 } 879 __ bind(&call_runtime); 880 switch (op_) { 881 case Token::ADD: 882 case Token::SUB: 883 case Token::MUL: 884 case Token::DIV: 885 GenerateTypeTransition(masm); 886 break; 887 case Token::MOD: 888 case Token::BIT_OR: 889 case Token::BIT_AND: 890 case Token::BIT_XOR: 891 case Token::SAR: 892 case Token::SHL: 893 case Token::SHR: 894 GenerateTypeTransitionWithSavedArgs(masm); 895 break; 896 default: 897 UNREACHABLE(); 898 } 899 } 900 901 902 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { 903 ASSERT(operands_type_ == TRBinaryOpIC::STRING); 904 ASSERT(op_ == Token::ADD); 905 // Try to add arguments as strings, otherwise, transition to the generic 906 // TRBinaryOpIC type. 907 GenerateAddStrings(masm); 908 GenerateTypeTransition(masm); 909 } 910 911 912 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 913 Label call_runtime; 914 ASSERT(operands_type_ == TRBinaryOpIC::INT32); 915 916 // Floating point case. 917 switch (op_) { 918 case Token::ADD: 919 case Token::SUB: 920 case Token::MUL: 921 case Token::DIV: { 922 Label not_floats; 923 Label not_int32; 924 if (CpuFeatures::IsSupported(SSE2)) { 925 CpuFeatures::Scope use_sse2(SSE2); 926 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); 927 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx); 928 switch (op_) { 929 case Token::ADD: __ addsd(xmm0, xmm1); break; 930 case Token::SUB: __ subsd(xmm0, xmm1); break; 931 case Token::MUL: __ mulsd(xmm0, xmm1); break; 932 case Token::DIV: __ divsd(xmm0, xmm1); break; 933 default: UNREACHABLE(); 934 } 935 // Check result type if it is currently Int32. 936 if (result_type_ <= TRBinaryOpIC::INT32) { 937 __ cvttsd2si(ecx, Operand(xmm0)); 938 __ cvtsi2sd(xmm2, Operand(ecx)); 939 __ ucomisd(xmm0, xmm2); 940 __ j(not_zero, ¬_int32); 941 __ j(carry, ¬_int32); 942 } 943 GenerateHeapResultAllocation(masm, &call_runtime); 944 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 945 __ ret(0); 946 } else { // SSE2 not available, use FPU. 947 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); 948 FloatingPointHelper::LoadFloatOperands( 949 masm, 950 ecx, 951 FloatingPointHelper::ARGS_IN_REGISTERS); 952 FloatingPointHelper::CheckFloatOperandsAreInt32(masm, ¬_int32); 953 switch (op_) { 954 case Token::ADD: __ faddp(1); break; 955 case Token::SUB: __ fsubp(1); break; 956 case Token::MUL: __ fmulp(1); break; 957 case Token::DIV: __ fdivp(1); break; 958 default: UNREACHABLE(); 959 } 960 Label after_alloc_failure; 961 GenerateHeapResultAllocation(masm, &after_alloc_failure); 962 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 963 __ ret(0); 964 __ bind(&after_alloc_failure); 965 __ ffree(); 966 __ jmp(&call_runtime); 967 } 968 969 __ bind(¬_floats); 970 __ bind(¬_int32); 971 GenerateTypeTransition(masm); 972 break; 973 } 974 975 case Token::MOD: { 976 // For MOD we go directly to runtime in the non-smi case. 977 break; 978 } 979 case Token::BIT_OR: 980 case Token::BIT_AND: 981 case Token::BIT_XOR: 982 case Token::SAR: 983 case Token::SHL: 984 case Token::SHR: { 985 GenerateRegisterArgsPush(masm); 986 Label not_floats; 987 Label not_int32; 988 Label non_smi_result; 989 /* { 990 CpuFeatures::Scope use_sse2(SSE2); 991 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); 992 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx); 993 }*/ 994 FloatingPointHelper::LoadUnknownsAsIntegers(masm, 995 use_sse3_, 996 ¬_floats); 997 FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_, 998 ¬_int32); 999 switch (op_) { 1000 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; 1001 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; 1002 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; 1003 case Token::SAR: __ sar_cl(eax); break; 1004 case Token::SHL: __ shl_cl(eax); break; 1005 case Token::SHR: __ shr_cl(eax); break; 1006 default: UNREACHABLE(); 1007 } 1008 if (op_ == Token::SHR) { 1009 // Check if result is non-negative and fits in a smi. 1010 __ test(eax, Immediate(0xc0000000)); 1011 __ j(not_zero, &call_runtime); 1012 } else { 1013 // Check if result fits in a smi. 1014 __ cmp(eax, 0xc0000000); 1015 __ j(negative, &non_smi_result); 1016 } 1017 // Tag smi result and return. 1018 __ SmiTag(eax); 1019 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1020 1021 // All ops except SHR return a signed int32 that we load in 1022 // a HeapNumber. 1023 if (op_ != Token::SHR) { 1024 __ bind(&non_smi_result); 1025 // Allocate a heap number if needed. 1026 __ mov(ebx, Operand(eax)); // ebx: result 1027 NearLabel skip_allocation; 1028 switch (mode_) { 1029 case OVERWRITE_LEFT: 1030 case OVERWRITE_RIGHT: 1031 // If the operand was an object, we skip the 1032 // allocation of a heap number. 1033 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? 1034 1 * kPointerSize : 2 * kPointerSize)); 1035 __ test(eax, Immediate(kSmiTagMask)); 1036 __ j(not_zero, &skip_allocation, not_taken); 1037 // Fall through! 1038 case NO_OVERWRITE: 1039 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1040 __ bind(&skip_allocation); 1041 break; 1042 default: UNREACHABLE(); 1043 } 1044 // Store the result in the HeapNumber and return. 1045 if (CpuFeatures::IsSupported(SSE2)) { 1046 CpuFeatures::Scope use_sse2(SSE2); 1047 __ cvtsi2sd(xmm0, Operand(ebx)); 1048 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1049 } else { 1050 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1051 __ fild_s(Operand(esp, 1 * kPointerSize)); 1052 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1053 } 1054 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1055 } 1056 1057 __ bind(¬_floats); 1058 __ bind(¬_int32); 1059 GenerateTypeTransitionWithSavedArgs(masm); 1060 break; 1061 } 1062 default: UNREACHABLE(); break; 1063 } 1064 1065 // If an allocation fails, or SHR or MOD hit a hard case, 1066 // use the runtime system to get the correct result. 1067 __ bind(&call_runtime); 1068 1069 switch (op_) { 1070 case Token::ADD: 1071 GenerateRegisterArgsPush(masm); 1072 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); 1073 break; 1074 case Token::SUB: 1075 GenerateRegisterArgsPush(masm); 1076 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); 1077 break; 1078 case Token::MUL: 1079 GenerateRegisterArgsPush(masm); 1080 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); 1081 break; 1082 case Token::DIV: 1083 GenerateRegisterArgsPush(masm); 1084 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); 1085 break; 1086 case Token::MOD: 1087 GenerateRegisterArgsPush(masm); 1088 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); 1089 break; 1090 case Token::BIT_OR: 1091 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); 1092 break; 1093 case Token::BIT_AND: 1094 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); 1095 break; 1096 case Token::BIT_XOR: 1097 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); 1098 break; 1099 case Token::SAR: 1100 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); 1101 break; 1102 case Token::SHL: 1103 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); 1104 break; 1105 case Token::SHR: 1106 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); 1107 break; 1108 default: 1109 UNREACHABLE(); 1110 } 1111 } 1112 1113 1114 void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { 1115 Label call_runtime; 1116 1117 if (op_ == Token::ADD) { 1118 // Handle string addition here, because it is the only operation 1119 // that does not do a ToNumber conversion on the operands. 1120 GenerateAddStrings(masm); 1121 } 1122 1123 // Convert odd ball arguments to numbers. 1124 NearLabel check, done; 1125 __ cmp(edx, FACTORY->undefined_value()); 1126 __ j(not_equal, &check); 1127 if (Token::IsBitOp(op_)) { 1128 __ xor_(edx, Operand(edx)); 1129 } else { 1130 __ mov(edx, Immediate(FACTORY->nan_value())); 1131 } 1132 __ jmp(&done); 1133 __ bind(&check); 1134 __ cmp(eax, FACTORY->undefined_value()); 1135 __ j(not_equal, &done); 1136 if (Token::IsBitOp(op_)) { 1137 __ xor_(eax, Operand(eax)); 1138 } else { 1139 __ mov(eax, Immediate(FACTORY->nan_value())); 1140 } 1141 __ bind(&done); 1142 1143 GenerateHeapNumberStub(masm); 1144 } 1145 1146 1147 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 1148 Label call_runtime; 1149 1150 // Floating point case. 1151 switch (op_) { 1152 case Token::ADD: 1153 case Token::SUB: 1154 case Token::MUL: 1155 case Token::DIV: { 1156 Label not_floats; 1157 if (CpuFeatures::IsSupported(SSE2)) { 1158 CpuFeatures::Scope use_sse2(SSE2); 1159 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); 1160 1161 switch (op_) { 1162 case Token::ADD: __ addsd(xmm0, xmm1); break; 1163 case Token::SUB: __ subsd(xmm0, xmm1); break; 1164 case Token::MUL: __ mulsd(xmm0, xmm1); break; 1165 case Token::DIV: __ divsd(xmm0, xmm1); break; 1166 default: UNREACHABLE(); 1167 } 1168 GenerateHeapResultAllocation(masm, &call_runtime); 1169 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1170 __ ret(0); 1171 } else { // SSE2 not available, use FPU. 1172 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); 1173 FloatingPointHelper::LoadFloatOperands( 1174 masm, 1175 ecx, 1176 FloatingPointHelper::ARGS_IN_REGISTERS); 1177 switch (op_) { 1178 case Token::ADD: __ faddp(1); break; 1179 case Token::SUB: __ fsubp(1); break; 1180 case Token::MUL: __ fmulp(1); break; 1181 case Token::DIV: __ fdivp(1); break; 1182 default: UNREACHABLE(); 1183 } 1184 Label after_alloc_failure; 1185 GenerateHeapResultAllocation(masm, &after_alloc_failure); 1186 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1187 __ ret(0); 1188 __ bind(&after_alloc_failure); 1189 __ ffree(); 1190 __ jmp(&call_runtime); 1191 } 1192 1193 __ bind(¬_floats); 1194 GenerateTypeTransition(masm); 1195 break; 1196 } 1197 1198 case Token::MOD: { 1199 // For MOD we go directly to runtime in the non-smi case. 1200 break; 1201 } 1202 case Token::BIT_OR: 1203 case Token::BIT_AND: 1204 case Token::BIT_XOR: 1205 case Token::SAR: 1206 case Token::SHL: 1207 case Token::SHR: { 1208 GenerateRegisterArgsPush(masm); 1209 Label not_floats; 1210 Label non_smi_result; 1211 FloatingPointHelper::LoadUnknownsAsIntegers(masm, 1212 use_sse3_, 1213 ¬_floats); 1214 switch (op_) { 1215 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; 1216 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; 1217 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; 1218 case Token::SAR: __ sar_cl(eax); break; 1219 case Token::SHL: __ shl_cl(eax); break; 1220 case Token::SHR: __ shr_cl(eax); break; 1221 default: UNREACHABLE(); 1222 } 1223 if (op_ == Token::SHR) { 1224 // Check if result is non-negative and fits in a smi. 1225 __ test(eax, Immediate(0xc0000000)); 1226 __ j(not_zero, &call_runtime); 1227 } else { 1228 // Check if result fits in a smi. 1229 __ cmp(eax, 0xc0000000); 1230 __ j(negative, &non_smi_result); 1231 } 1232 // Tag smi result and return. 1233 __ SmiTag(eax); 1234 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1235 1236 // All ops except SHR return a signed int32 that we load in 1237 // a HeapNumber. 1238 if (op_ != Token::SHR) { 1239 __ bind(&non_smi_result); 1240 // Allocate a heap number if needed. 1241 __ mov(ebx, Operand(eax)); // ebx: result 1242 NearLabel skip_allocation; 1243 switch (mode_) { 1244 case OVERWRITE_LEFT: 1245 case OVERWRITE_RIGHT: 1246 // If the operand was an object, we skip the 1247 // allocation of a heap number. 1248 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? 1249 1 * kPointerSize : 2 * kPointerSize)); 1250 __ test(eax, Immediate(kSmiTagMask)); 1251 __ j(not_zero, &skip_allocation, not_taken); 1252 // Fall through! 1253 case NO_OVERWRITE: 1254 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1255 __ bind(&skip_allocation); 1256 break; 1257 default: UNREACHABLE(); 1258 } 1259 // Store the result in the HeapNumber and return. 1260 if (CpuFeatures::IsSupported(SSE2)) { 1261 CpuFeatures::Scope use_sse2(SSE2); 1262 __ cvtsi2sd(xmm0, Operand(ebx)); 1263 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1264 } else { 1265 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1266 __ fild_s(Operand(esp, 1 * kPointerSize)); 1267 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1268 } 1269 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1270 } 1271 1272 __ bind(¬_floats); 1273 GenerateTypeTransitionWithSavedArgs(masm); 1274 break; 1275 } 1276 default: UNREACHABLE(); break; 1277 } 1278 1279 // If an allocation fails, or SHR or MOD hit a hard case, 1280 // use the runtime system to get the correct result. 1281 __ bind(&call_runtime); 1282 1283 switch (op_) { 1284 case Token::ADD: 1285 GenerateRegisterArgsPush(masm); 1286 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); 1287 break; 1288 case Token::SUB: 1289 GenerateRegisterArgsPush(masm); 1290 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); 1291 break; 1292 case Token::MUL: 1293 GenerateRegisterArgsPush(masm); 1294 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); 1295 break; 1296 case Token::DIV: 1297 GenerateRegisterArgsPush(masm); 1298 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); 1299 break; 1300 case Token::MOD: 1301 GenerateRegisterArgsPush(masm); 1302 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); 1303 break; 1304 case Token::BIT_OR: 1305 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); 1306 break; 1307 case Token::BIT_AND: 1308 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); 1309 break; 1310 case Token::BIT_XOR: 1311 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); 1312 break; 1313 case Token::SAR: 1314 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); 1315 break; 1316 case Token::SHL: 1317 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); 1318 break; 1319 case Token::SHR: 1320 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); 1321 break; 1322 default: 1323 UNREACHABLE(); 1324 } 1325 } 1326 1327 1328 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 1329 Label call_runtime; 1330 1331 Counters* counters = masm->isolate()->counters(); 1332 __ IncrementCounter(counters->generic_binary_stub_calls(), 1); 1333 1334 switch (op_) { 1335 case Token::ADD: 1336 case Token::SUB: 1337 case Token::MUL: 1338 case Token::DIV: 1339 break; 1340 case Token::MOD: 1341 case Token::BIT_OR: 1342 case Token::BIT_AND: 1343 case Token::BIT_XOR: 1344 case Token::SAR: 1345 case Token::SHL: 1346 case Token::SHR: 1347 GenerateRegisterArgsPush(masm); 1348 break; 1349 default: 1350 UNREACHABLE(); 1351 } 1352 1353 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 1354 1355 // Floating point case. 1356 switch (op_) { 1357 case Token::ADD: 1358 case Token::SUB: 1359 case Token::MUL: 1360 case Token::DIV: { 1361 Label not_floats; 1362 if (CpuFeatures::IsSupported(SSE2)) { 1363 CpuFeatures::Scope use_sse2(SSE2); 1364 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); 1365 1366 switch (op_) { 1367 case Token::ADD: __ addsd(xmm0, xmm1); break; 1368 case Token::SUB: __ subsd(xmm0, xmm1); break; 1369 case Token::MUL: __ mulsd(xmm0, xmm1); break; 1370 case Token::DIV: __ divsd(xmm0, xmm1); break; 1371 default: UNREACHABLE(); 1372 } 1373 GenerateHeapResultAllocation(masm, &call_runtime); 1374 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1375 __ ret(0); 1376 } else { // SSE2 not available, use FPU. 1377 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); 1378 FloatingPointHelper::LoadFloatOperands( 1379 masm, 1380 ecx, 1381 FloatingPointHelper::ARGS_IN_REGISTERS); 1382 switch (op_) { 1383 case Token::ADD: __ faddp(1); break; 1384 case Token::SUB: __ fsubp(1); break; 1385 case Token::MUL: __ fmulp(1); break; 1386 case Token::DIV: __ fdivp(1); break; 1387 default: UNREACHABLE(); 1388 } 1389 Label after_alloc_failure; 1390 GenerateHeapResultAllocation(masm, &after_alloc_failure); 1391 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1392 __ ret(0); 1393 __ bind(&after_alloc_failure); 1394 __ ffree(); 1395 __ jmp(&call_runtime); 1396 } 1397 __ bind(¬_floats); 1398 break; 1399 } 1400 case Token::MOD: { 1401 // For MOD we go directly to runtime in the non-smi case. 1402 break; 1403 } 1404 case Token::BIT_OR: 1405 case Token::BIT_AND: 1406 case Token::BIT_XOR: 1407 case Token::SAR: 1408 case Token::SHL: 1409 case Token::SHR: { 1410 Label non_smi_result; 1411 FloatingPointHelper::LoadUnknownsAsIntegers(masm, 1412 use_sse3_, 1413 &call_runtime); 1414 switch (op_) { 1415 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; 1416 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; 1417 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; 1418 case Token::SAR: __ sar_cl(eax); break; 1419 case Token::SHL: __ shl_cl(eax); break; 1420 case Token::SHR: __ shr_cl(eax); break; 1421 default: UNREACHABLE(); 1422 } 1423 if (op_ == Token::SHR) { 1424 // Check if result is non-negative and fits in a smi. 1425 __ test(eax, Immediate(0xc0000000)); 1426 __ j(not_zero, &call_runtime); 1427 } else { 1428 // Check if result fits in a smi. 1429 __ cmp(eax, 0xc0000000); 1430 __ j(negative, &non_smi_result); 1431 } 1432 // Tag smi result and return. 1433 __ SmiTag(eax); 1434 __ ret(2 * kPointerSize); // Drop the arguments from the stack. 1435 1436 // All ops except SHR return a signed int32 that we load in 1437 // a HeapNumber. 1438 if (op_ != Token::SHR) { 1439 __ bind(&non_smi_result); 1440 // Allocate a heap number if needed. 1441 __ mov(ebx, Operand(eax)); // ebx: result 1442 NearLabel skip_allocation; 1443 switch (mode_) { 1444 case OVERWRITE_LEFT: 1445 case OVERWRITE_RIGHT: 1446 // If the operand was an object, we skip the 1447 // allocation of a heap number. 1448 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? 1449 1 * kPointerSize : 2 * kPointerSize)); 1450 __ test(eax, Immediate(kSmiTagMask)); 1451 __ j(not_zero, &skip_allocation, not_taken); 1452 // Fall through! 1453 case NO_OVERWRITE: 1454 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1455 __ bind(&skip_allocation); 1456 break; 1457 default: UNREACHABLE(); 1458 } 1459 // Store the result in the HeapNumber and return. 1460 if (CpuFeatures::IsSupported(SSE2)) { 1461 CpuFeatures::Scope use_sse2(SSE2); 1462 __ cvtsi2sd(xmm0, Operand(ebx)); 1463 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1464 } else { 1465 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1466 __ fild_s(Operand(esp, 1 * kPointerSize)); 1467 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1468 } 1469 __ ret(2 * kPointerSize); 1470 } 1471 break; 1472 } 1473 default: UNREACHABLE(); break; 1474 } 1475 1476 // If all else fails, use the runtime system to get the correct 1477 // result. 1478 __ bind(&call_runtime); 1479 switch (op_) { 1480 case Token::ADD: { 1481 GenerateAddStrings(masm); 1482 GenerateRegisterArgsPush(masm); 1483 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); 1484 break; 1485 } 1486 case Token::SUB: 1487 GenerateRegisterArgsPush(masm); 1488 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); 1489 break; 1490 case Token::MUL: 1491 GenerateRegisterArgsPush(masm); 1492 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); 1493 break; 1494 case Token::DIV: 1495 GenerateRegisterArgsPush(masm); 1496 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); 1497 break; 1498 case Token::MOD: 1499 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); 1500 break; 1501 case Token::BIT_OR: 1502 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); 1503 break; 1504 case Token::BIT_AND: 1505 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); 1506 break; 1507 case Token::BIT_XOR: 1508 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); 1509 break; 1510 case Token::SAR: 1511 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); 1512 break; 1513 case Token::SHL: 1514 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); 1515 break; 1516 case Token::SHR: 1517 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); 1518 break; 1519 default: 1520 UNREACHABLE(); 1521 } 1522 } 1523 1524 1525 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { 1526 ASSERT(op_ == Token::ADD); 1527 NearLabel left_not_string, call_runtime; 1528 1529 // Registers containing left and right operands respectively. 1530 Register left = edx; 1531 Register right = eax; 1532 1533 // Test if left operand is a string. 1534 __ test(left, Immediate(kSmiTagMask)); 1535 __ j(zero, &left_not_string); 1536 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx); 1537 __ j(above_equal, &left_not_string); 1538 1539 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); 1540 GenerateRegisterArgsPush(masm); 1541 __ TailCallStub(&string_add_left_stub); 1542 1543 // Left operand is not a string, test right. 1544 __ bind(&left_not_string); 1545 __ test(right, Immediate(kSmiTagMask)); 1546 __ j(zero, &call_runtime); 1547 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx); 1548 __ j(above_equal, &call_runtime); 1549 1550 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); 1551 GenerateRegisterArgsPush(masm); 1552 __ TailCallStub(&string_add_right_stub); 1553 1554 // Neither argument is a string. 1555 __ bind(&call_runtime); 1556 } 1557 1558 1559 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( 1560 MacroAssembler* masm, 1561 Label* alloc_failure) { 1562 Label skip_allocation; 1563 OverwriteMode mode = mode_; 1564 switch (mode) { 1565 case OVERWRITE_LEFT: { 1566 // If the argument in edx is already an object, we skip the 1567 // allocation of a heap number. 1568 __ test(edx, Immediate(kSmiTagMask)); 1569 __ j(not_zero, &skip_allocation, not_taken); 1570 // Allocate a heap number for the result. Keep eax and edx intact 1571 // for the possible runtime call. 1572 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); 1573 // Now edx can be overwritten losing one of the arguments as we are 1574 // now done and will not need it any more. 1575 __ mov(edx, Operand(ebx)); 1576 __ bind(&skip_allocation); 1577 // Use object in edx as a result holder 1578 __ mov(eax, Operand(edx)); 1579 break; 1580 } 1581 case OVERWRITE_RIGHT: 1582 // If the argument in eax is already an object, we skip the 1583 // allocation of a heap number. 1584 __ test(eax, Immediate(kSmiTagMask)); 1585 __ j(not_zero, &skip_allocation, not_taken); 1586 // Fall through! 1587 case NO_OVERWRITE: 1588 // Allocate a heap number for the result. Keep eax and edx intact 1589 // for the possible runtime call. 1590 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); 1591 // Now eax can be overwritten losing one of the arguments as we are 1592 // now done and will not need it any more. 1593 __ mov(eax, ebx); 1594 __ bind(&skip_allocation); 1595 break; 1596 default: UNREACHABLE(); 1597 } 1598 } 1599 1600 1601 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 1602 __ pop(ecx); 1603 __ push(edx); 1604 __ push(eax); 1605 __ push(ecx); 1606 } 1607 1608 1609 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 1610 // TAGGED case: 1611 // Input: 1612 // esp[4]: tagged number input argument (should be number). 1613 // esp[0]: return address. 1614 // Output: 1615 // eax: tagged double result. 1616 // UNTAGGED case: 1617 // Input:: 1618 // esp[0]: return address. 1619 // xmm1: untagged double input argument 1620 // Output: 1621 // xmm1: untagged double result. 1622 1623 Label runtime_call; 1624 Label runtime_call_clear_stack; 1625 Label skip_cache; 1626 const bool tagged = (argument_type_ == TAGGED); 1627 if (tagged) { 1628 // Test that eax is a number. 1629 NearLabel input_not_smi; 1630 NearLabel loaded; 1631 __ mov(eax, Operand(esp, kPointerSize)); 1632 __ test(eax, Immediate(kSmiTagMask)); 1633 __ j(not_zero, &input_not_smi); 1634 // Input is a smi. Untag and load it onto the FPU stack. 1635 // Then load the low and high words of the double into ebx, edx. 1636 STATIC_ASSERT(kSmiTagSize == 1); 1637 __ sar(eax, 1); 1638 __ sub(Operand(esp), Immediate(2 * kPointerSize)); 1639 __ mov(Operand(esp, 0), eax); 1640 __ fild_s(Operand(esp, 0)); 1641 __ fst_d(Operand(esp, 0)); 1642 __ pop(edx); 1643 __ pop(ebx); 1644 __ jmp(&loaded); 1645 __ bind(&input_not_smi); 1646 // Check if input is a HeapNumber. 1647 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 1648 Factory* factory = masm->isolate()->factory(); 1649 __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); 1650 __ j(not_equal, &runtime_call); 1651 // Input is a HeapNumber. Push it on the FPU stack and load its 1652 // low and high words into ebx, edx. 1653 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1654 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); 1655 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); 1656 1657 __ bind(&loaded); 1658 } else { // UNTAGGED. 1659 if (CpuFeatures::IsSupported(SSE4_1)) { 1660 CpuFeatures::Scope sse4_scope(SSE4_1); 1661 __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx. 1662 } else { 1663 __ pshufd(xmm0, xmm1, 0x1); 1664 __ movd(Operand(edx), xmm0); 1665 } 1666 __ movd(Operand(ebx), xmm1); 1667 } 1668 1669 // ST[0] or xmm1 == double value 1670 // ebx = low 32 bits of double value 1671 // edx = high 32 bits of double value 1672 // Compute hash (the shifts are arithmetic): 1673 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); 1674 __ mov(ecx, ebx); 1675 __ xor_(ecx, Operand(edx)); 1676 __ mov(eax, ecx); 1677 __ sar(eax, 16); 1678 __ xor_(ecx, Operand(eax)); 1679 __ mov(eax, ecx); 1680 __ sar(eax, 8); 1681 __ xor_(ecx, Operand(eax)); 1682 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); 1683 __ and_(Operand(ecx), 1684 Immediate(TranscendentalCache::SubCache::kCacheSize - 1)); 1685 1686 // ST[0] or xmm1 == double value. 1687 // ebx = low 32 bits of double value. 1688 // edx = high 32 bits of double value. 1689 // ecx = TranscendentalCache::hash(double value). 1690 ExternalReference cache_array = 1691 ExternalReference::transcendental_cache_array_address(masm->isolate()); 1692 __ mov(eax, Immediate(cache_array)); 1693 int cache_array_index = 1694 type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]); 1695 __ mov(eax, Operand(eax, cache_array_index)); 1696 // Eax points to the cache for the type type_. 1697 // If NULL, the cache hasn't been initialized yet, so go through runtime. 1698 __ test(eax, Operand(eax)); 1699 __ j(zero, &runtime_call_clear_stack); 1700 #ifdef DEBUG 1701 // Check that the layout of cache elements match expectations. 1702 { TranscendentalCache::SubCache::Element test_elem[2]; 1703 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); 1704 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); 1705 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); 1706 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); 1707 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); 1708 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. 1709 CHECK_EQ(0, elem_in0 - elem_start); 1710 CHECK_EQ(kIntSize, elem_in1 - elem_start); 1711 CHECK_EQ(2 * kIntSize, elem_out - elem_start); 1712 } 1713 #endif 1714 // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12]. 1715 __ lea(ecx, Operand(ecx, ecx, times_2, 0)); 1716 __ lea(ecx, Operand(eax, ecx, times_4, 0)); 1717 // Check if cache matches: Double value is stored in uint32_t[2] array. 1718 NearLabel cache_miss; 1719 __ cmp(ebx, Operand(ecx, 0)); 1720 __ j(not_equal, &cache_miss); 1721 __ cmp(edx, Operand(ecx, kIntSize)); 1722 __ j(not_equal, &cache_miss); 1723 // Cache hit! 1724 __ mov(eax, Operand(ecx, 2 * kIntSize)); 1725 if (tagged) { 1726 __ fstp(0); 1727 __ ret(kPointerSize); 1728 } else { // UNTAGGED. 1729 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 1730 __ Ret(); 1731 } 1732 1733 __ bind(&cache_miss); 1734 // Update cache with new value. 1735 // We are short on registers, so use no_reg as scratch. 1736 // This gives slightly larger code. 1737 if (tagged) { 1738 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); 1739 } else { // UNTAGGED. 1740 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); 1741 __ sub(Operand(esp), Immediate(kDoubleSize)); 1742 __ movdbl(Operand(esp, 0), xmm1); 1743 __ fld_d(Operand(esp, 0)); 1744 __ add(Operand(esp), Immediate(kDoubleSize)); 1745 } 1746 GenerateOperation(masm); 1747 __ mov(Operand(ecx, 0), ebx); 1748 __ mov(Operand(ecx, kIntSize), edx); 1749 __ mov(Operand(ecx, 2 * kIntSize), eax); 1750 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1751 if (tagged) { 1752 __ ret(kPointerSize); 1753 } else { // UNTAGGED. 1754 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 1755 __ Ret(); 1756 1757 // Skip cache and return answer directly, only in untagged case. 1758 __ bind(&skip_cache); 1759 __ sub(Operand(esp), Immediate(kDoubleSize)); 1760 __ movdbl(Operand(esp, 0), xmm1); 1761 __ fld_d(Operand(esp, 0)); 1762 GenerateOperation(masm); 1763 __ fstp_d(Operand(esp, 0)); 1764 __ movdbl(xmm1, Operand(esp, 0)); 1765 __ add(Operand(esp), Immediate(kDoubleSize)); 1766 // We return the value in xmm1 without adding it to the cache, but 1767 // we cause a scavenging GC so that future allocations will succeed. 1768 __ EnterInternalFrame(); 1769 // Allocate an unused object bigger than a HeapNumber. 1770 __ push(Immediate(Smi::FromInt(2 * kDoubleSize))); 1771 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 1772 __ LeaveInternalFrame(); 1773 __ Ret(); 1774 } 1775 1776 // Call runtime, doing whatever allocation and cleanup is necessary. 1777 if (tagged) { 1778 __ bind(&runtime_call_clear_stack); 1779 __ fstp(0); 1780 __ bind(&runtime_call); 1781 ExternalReference runtime = 1782 ExternalReference(RuntimeFunction(), masm->isolate()); 1783 __ TailCallExternalReference(runtime, 1, 1); 1784 } else { // UNTAGGED. 1785 __ bind(&runtime_call_clear_stack); 1786 __ bind(&runtime_call); 1787 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); 1788 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); 1789 __ EnterInternalFrame(); 1790 __ push(eax); 1791 __ CallRuntime(RuntimeFunction(), 1); 1792 __ LeaveInternalFrame(); 1793 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 1794 __ Ret(); 1795 } 1796 } 1797 1798 1799 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 1800 switch (type_) { 1801 case TranscendentalCache::SIN: return Runtime::kMath_sin; 1802 case TranscendentalCache::COS: return Runtime::kMath_cos; 1803 case TranscendentalCache::LOG: return Runtime::kMath_log; 1804 default: 1805 UNIMPLEMENTED(); 1806 return Runtime::kAbort; 1807 } 1808 } 1809 1810 1811 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { 1812 // Only free register is edi. 1813 // Input value is on FP stack, and also in ebx/edx. 1814 // Input value is possibly in xmm1. 1815 // Address of result (a newly allocated HeapNumber) may be in eax. 1816 if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) { 1817 // Both fsin and fcos require arguments in the range +/-2^63 and 1818 // return NaN for infinities and NaN. They can share all code except 1819 // the actual fsin/fcos operation. 1820 NearLabel in_range, done; 1821 // If argument is outside the range -2^63..2^63, fsin/cos doesn't 1822 // work. We must reduce it to the appropriate range. 1823 __ mov(edi, edx); 1824 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. 1825 int supported_exponent_limit = 1826 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; 1827 __ cmp(Operand(edi), Immediate(supported_exponent_limit)); 1828 __ j(below, &in_range, taken); 1829 // Check for infinity and NaN. Both return NaN for sin. 1830 __ cmp(Operand(edi), Immediate(0x7ff00000)); 1831 NearLabel non_nan_result; 1832 __ j(not_equal, &non_nan_result, taken); 1833 // Input is +/-Infinity or NaN. Result is NaN. 1834 __ fstp(0); 1835 // NaN is represented by 0x7ff8000000000000. 1836 __ push(Immediate(0x7ff80000)); 1837 __ push(Immediate(0)); 1838 __ fld_d(Operand(esp, 0)); 1839 __ add(Operand(esp), Immediate(2 * kPointerSize)); 1840 __ jmp(&done); 1841 1842 __ bind(&non_nan_result); 1843 1844 // Use fpmod to restrict argument to the range +/-2*PI. 1845 __ mov(edi, eax); // Save eax before using fnstsw_ax. 1846 __ fldpi(); 1847 __ fadd(0); 1848 __ fld(1); 1849 // FPU Stack: input, 2*pi, input. 1850 { 1851 NearLabel no_exceptions; 1852 __ fwait(); 1853 __ fnstsw_ax(); 1854 // Clear if Illegal Operand or Zero Division exceptions are set. 1855 __ test(Operand(eax), Immediate(5)); 1856 __ j(zero, &no_exceptions); 1857 __ fnclex(); 1858 __ bind(&no_exceptions); 1859 } 1860 1861 // Compute st(0) % st(1) 1862 { 1863 NearLabel partial_remainder_loop; 1864 __ bind(&partial_remainder_loop); 1865 __ fprem1(); 1866 __ fwait(); 1867 __ fnstsw_ax(); 1868 __ test(Operand(eax), Immediate(0x400 /* C2 */)); 1869 // If C2 is set, computation only has partial result. Loop to 1870 // continue computation. 1871 __ j(not_zero, &partial_remainder_loop); 1872 } 1873 // FPU Stack: input, 2*pi, input % 2*pi 1874 __ fstp(2); 1875 __ fstp(0); 1876 __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer). 1877 1878 // FPU Stack: input % 2*pi 1879 __ bind(&in_range); 1880 switch (type_) { 1881 case TranscendentalCache::SIN: 1882 __ fsin(); 1883 break; 1884 case TranscendentalCache::COS: 1885 __ fcos(); 1886 break; 1887 default: 1888 UNREACHABLE(); 1889 } 1890 __ bind(&done); 1891 } else { 1892 ASSERT(type_ == TranscendentalCache::LOG); 1893 __ fldln2(); 1894 __ fxch(); 1895 __ fyl2x(); 1896 } 1897 } 1898 1899 1900 // Get the integer part of a heap number. Surprisingly, all this bit twiddling 1901 // is faster than using the built-in instructions on floating point registers. 1902 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the 1903 // trashed registers. 1904 void IntegerConvert(MacroAssembler* masm, 1905 Register source, 1906 TypeInfo type_info, 1907 bool use_sse3, 1908 Label* conversion_failure) { 1909 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); 1910 Label done, right_exponent, normal_exponent; 1911 Register scratch = ebx; 1912 Register scratch2 = edi; 1913 if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) { 1914 CpuFeatures::Scope scope(SSE2); 1915 __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset)); 1916 return; 1917 } 1918 if (!type_info.IsInteger32() || !use_sse3) { 1919 // Get exponent word. 1920 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); 1921 // Get exponent alone in scratch2. 1922 __ mov(scratch2, scratch); 1923 __ and_(scratch2, HeapNumber::kExponentMask); 1924 } 1925 if (use_sse3) { 1926 CpuFeatures::Scope scope(SSE3); 1927 if (!type_info.IsInteger32()) { 1928 // Check whether the exponent is too big for a 64 bit signed integer. 1929 static const uint32_t kTooBigExponent = 1930 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; 1931 __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); 1932 __ j(greater_equal, conversion_failure); 1933 } 1934 // Load x87 register with heap number. 1935 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); 1936 // Reserve space for 64 bit answer. 1937 __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. 1938 // Do conversion, which cannot fail because we checked the exponent. 1939 __ fisttp_d(Operand(esp, 0)); 1940 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. 1941 __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. 1942 } else { 1943 // Load ecx with zero. We use this either for the final shift or 1944 // for the answer. 1945 __ xor_(ecx, Operand(ecx)); 1946 // Check whether the exponent matches a 32 bit signed int that cannot be 1947 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the 1948 // exponent is 30 (biased). This is the exponent that we are fastest at and 1949 // also the highest exponent we can handle here. 1950 const uint32_t non_smi_exponent = 1951 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; 1952 __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); 1953 // If we have a match of the int32-but-not-Smi exponent then skip some 1954 // logic. 1955 __ j(equal, &right_exponent); 1956 // If the exponent is higher than that then go to slow case. This catches 1957 // numbers that don't fit in a signed int32, infinities and NaNs. 1958 __ j(less, &normal_exponent); 1959 1960 { 1961 // Handle a big exponent. The only reason we have this code is that the 1962 // >>> operator has a tendency to generate numbers with an exponent of 31. 1963 const uint32_t big_non_smi_exponent = 1964 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; 1965 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); 1966 __ j(not_equal, conversion_failure); 1967 // We have the big exponent, typically from >>>. This means the number is 1968 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. 1969 __ mov(scratch2, scratch); 1970 __ and_(scratch2, HeapNumber::kMantissaMask); 1971 // Put back the implicit 1. 1972 __ or_(scratch2, 1 << HeapNumber::kExponentShift); 1973 // Shift up the mantissa bits to take up the space the exponent used to 1974 // take. We just orred in the implicit bit so that took care of one and 1975 // we want to use the full unsigned range so we subtract 1 bit from the 1976 // shift distance. 1977 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; 1978 __ shl(scratch2, big_shift_distance); 1979 // Get the second half of the double. 1980 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); 1981 // Shift down 21 bits to get the most significant 11 bits or the low 1982 // mantissa word. 1983 __ shr(ecx, 32 - big_shift_distance); 1984 __ or_(ecx, Operand(scratch2)); 1985 // We have the answer in ecx, but we may need to negate it. 1986 __ test(scratch, Operand(scratch)); 1987 __ j(positive, &done); 1988 __ neg(ecx); 1989 __ jmp(&done); 1990 } 1991 1992 __ bind(&normal_exponent); 1993 // Exponent word in scratch, exponent part of exponent word in scratch2. 1994 // Zero in ecx. 1995 // We know the exponent is smaller than 30 (biased). If it is less than 1996 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie 1997 // it rounds to zero. 1998 const uint32_t zero_exponent = 1999 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; 2000 __ sub(Operand(scratch2), Immediate(zero_exponent)); 2001 // ecx already has a Smi zero. 2002 __ j(less, &done); 2003 2004 // We have a shifted exponent between 0 and 30 in scratch2. 2005 __ shr(scratch2, HeapNumber::kExponentShift); 2006 __ mov(ecx, Immediate(30)); 2007 __ sub(ecx, Operand(scratch2)); 2008 2009 __ bind(&right_exponent); 2010 // Here ecx is the shift, scratch is the exponent word. 2011 // Get the top bits of the mantissa. 2012 __ and_(scratch, HeapNumber::kMantissaMask); 2013 // Put back the implicit 1. 2014 __ or_(scratch, 1 << HeapNumber::kExponentShift); 2015 // Shift up the mantissa bits to take up the space the exponent used to 2016 // take. We have kExponentShift + 1 significant bits int he low end of the 2017 // word. Shift them to the top bits. 2018 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; 2019 __ shl(scratch, shift_distance); 2020 // Get the second half of the double. For some exponents we don't 2021 // actually need this because the bits get shifted out again, but 2022 // it's probably slower to test than just to do it. 2023 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); 2024 // Shift down 22 bits to get the most significant 10 bits or the low 2025 // mantissa word. 2026 __ shr(scratch2, 32 - shift_distance); 2027 __ or_(scratch2, Operand(scratch)); 2028 // Move down according to the exponent. 2029 __ shr_cl(scratch2); 2030 // Now the unsigned answer is in scratch2. We need to move it to ecx and 2031 // we may need to fix the sign. 2032 NearLabel negative; 2033 __ xor_(ecx, Operand(ecx)); 2034 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); 2035 __ j(greater, &negative); 2036 __ mov(ecx, scratch2); 2037 __ jmp(&done); 2038 __ bind(&negative); 2039 __ sub(ecx, Operand(scratch2)); 2040 __ bind(&done); 2041 } 2042 } 2043 2044 2045 // Input: edx, eax are the left and right objects of a bit op. 2046 // Output: eax, ecx are left and right integers for a bit op. 2047 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, 2048 TypeInfo type_info, 2049 bool use_sse3, 2050 Label* conversion_failure) { 2051 // Check float operands. 2052 Label arg1_is_object, check_undefined_arg1; 2053 Label arg2_is_object, check_undefined_arg2; 2054 Label load_arg2, done; 2055 2056 if (!type_info.IsDouble()) { 2057 if (!type_info.IsSmi()) { 2058 __ test(edx, Immediate(kSmiTagMask)); 2059 __ j(not_zero, &arg1_is_object); 2060 } else { 2061 if (FLAG_debug_code) __ AbortIfNotSmi(edx); 2062 } 2063 __ SmiUntag(edx); 2064 __ jmp(&load_arg2); 2065 } 2066 2067 __ bind(&arg1_is_object); 2068 2069 // Get the untagged integer version of the edx heap number in ecx. 2070 IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure); 2071 __ mov(edx, ecx); 2072 2073 // Here edx has the untagged integer, eax has a Smi or a heap number. 2074 __ bind(&load_arg2); 2075 if (!type_info.IsDouble()) { 2076 // Test if arg2 is a Smi. 2077 if (!type_info.IsSmi()) { 2078 __ test(eax, Immediate(kSmiTagMask)); 2079 __ j(not_zero, &arg2_is_object); 2080 } else { 2081 if (FLAG_debug_code) __ AbortIfNotSmi(eax); 2082 } 2083 __ SmiUntag(eax); 2084 __ mov(ecx, eax); 2085 __ jmp(&done); 2086 } 2087 2088 __ bind(&arg2_is_object); 2089 2090 // Get the untagged integer version of the eax heap number in ecx. 2091 IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure); 2092 __ bind(&done); 2093 __ mov(eax, edx); 2094 } 2095 2096 2097 // Input: edx, eax are the left and right objects of a bit op. 2098 // Output: eax, ecx are left and right integers for a bit op. 2099 void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, 2100 bool use_sse3, 2101 Label* conversion_failure) { 2102 // Check float operands. 2103 Label arg1_is_object, check_undefined_arg1; 2104 Label arg2_is_object, check_undefined_arg2; 2105 Label load_arg2, done; 2106 2107 // Test if arg1 is a Smi. 2108 __ test(edx, Immediate(kSmiTagMask)); 2109 __ j(not_zero, &arg1_is_object); 2110 2111 __ SmiUntag(edx); 2112 __ jmp(&load_arg2); 2113 2114 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). 2115 __ bind(&check_undefined_arg1); 2116 Factory* factory = masm->isolate()->factory(); 2117 __ cmp(edx, factory->undefined_value()); 2118 __ j(not_equal, conversion_failure); 2119 __ mov(edx, Immediate(0)); 2120 __ jmp(&load_arg2); 2121 2122 __ bind(&arg1_is_object); 2123 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); 2124 __ cmp(ebx, factory->heap_number_map()); 2125 __ j(not_equal, &check_undefined_arg1); 2126 2127 // Get the untagged integer version of the edx heap number in ecx. 2128 IntegerConvert(masm, 2129 edx, 2130 TypeInfo::Unknown(), 2131 use_sse3, 2132 conversion_failure); 2133 __ mov(edx, ecx); 2134 2135 // Here edx has the untagged integer, eax has a Smi or a heap number. 2136 __ bind(&load_arg2); 2137 2138 // Test if arg2 is a Smi. 2139 __ test(eax, Immediate(kSmiTagMask)); 2140 __ j(not_zero, &arg2_is_object); 2141 2142 __ SmiUntag(eax); 2143 __ mov(ecx, eax); 2144 __ jmp(&done); 2145 2146 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). 2147 __ bind(&check_undefined_arg2); 2148 __ cmp(eax, factory->undefined_value()); 2149 __ j(not_equal, conversion_failure); 2150 __ mov(ecx, Immediate(0)); 2151 __ jmp(&done); 2152 2153 __ bind(&arg2_is_object); 2154 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 2155 __ cmp(ebx, factory->heap_number_map()); 2156 __ j(not_equal, &check_undefined_arg2); 2157 2158 // Get the untagged integer version of the eax heap number in ecx. 2159 IntegerConvert(masm, 2160 eax, 2161 TypeInfo::Unknown(), 2162 use_sse3, 2163 conversion_failure); 2164 __ bind(&done); 2165 __ mov(eax, edx); 2166 } 2167 2168 2169 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, 2170 TypeInfo type_info, 2171 bool use_sse3, 2172 Label* conversion_failure) { 2173 if (type_info.IsNumber()) { 2174 LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure); 2175 } else { 2176 LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure); 2177 } 2178 } 2179 2180 2181 void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm, 2182 bool use_sse3, 2183 Label* not_int32) { 2184 return; 2185 } 2186 2187 2188 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, 2189 Register number) { 2190 NearLabel load_smi, done; 2191 2192 __ test(number, Immediate(kSmiTagMask)); 2193 __ j(zero, &load_smi, not_taken); 2194 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); 2195 __ jmp(&done); 2196 2197 __ bind(&load_smi); 2198 __ SmiUntag(number); 2199 __ push(number); 2200 __ fild_s(Operand(esp, 0)); 2201 __ pop(number); 2202 2203 __ bind(&done); 2204 } 2205 2206 2207 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { 2208 NearLabel load_smi_edx, load_eax, load_smi_eax, done; 2209 // Load operand in edx into xmm0. 2210 __ test(edx, Immediate(kSmiTagMask)); 2211 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. 2212 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 2213 2214 __ bind(&load_eax); 2215 // Load operand in eax into xmm1. 2216 __ test(eax, Immediate(kSmiTagMask)); 2217 __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. 2218 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2219 __ jmp(&done); 2220 2221 __ bind(&load_smi_edx); 2222 __ SmiUntag(edx); // Untag smi before converting to float. 2223 __ cvtsi2sd(xmm0, Operand(edx)); 2224 __ SmiTag(edx); // Retag smi for heap number overwriting test. 2225 __ jmp(&load_eax); 2226 2227 __ bind(&load_smi_eax); 2228 __ SmiUntag(eax); // Untag smi before converting to float. 2229 __ cvtsi2sd(xmm1, Operand(eax)); 2230 __ SmiTag(eax); // Retag smi for heap number overwriting test. 2231 2232 __ bind(&done); 2233 } 2234 2235 2236 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, 2237 Label* not_numbers) { 2238 NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; 2239 // Load operand in edx into xmm0, or branch to not_numbers. 2240 __ test(edx, Immediate(kSmiTagMask)); 2241 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. 2242 Factory* factory = masm->isolate()->factory(); 2243 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map()); 2244 __ j(not_equal, not_numbers); // Argument in edx is not a number. 2245 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 2246 __ bind(&load_eax); 2247 // Load operand in eax into xmm1, or branch to not_numbers. 2248 __ test(eax, Immediate(kSmiTagMask)); 2249 __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. 2250 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map()); 2251 __ j(equal, &load_float_eax); 2252 __ jmp(not_numbers); // Argument in eax is not a number. 2253 __ bind(&load_smi_edx); 2254 __ SmiUntag(edx); // Untag smi before converting to float. 2255 __ cvtsi2sd(xmm0, Operand(edx)); 2256 __ SmiTag(edx); // Retag smi for heap number overwriting test. 2257 __ jmp(&load_eax); 2258 __ bind(&load_smi_eax); 2259 __ SmiUntag(eax); // Untag smi before converting to float. 2260 __ cvtsi2sd(xmm1, Operand(eax)); 2261 __ SmiTag(eax); // Retag smi for heap number overwriting test. 2262 __ jmp(&done); 2263 __ bind(&load_float_eax); 2264 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2265 __ bind(&done); 2266 } 2267 2268 2269 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, 2270 Register scratch) { 2271 const Register left = edx; 2272 const Register right = eax; 2273 __ mov(scratch, left); 2274 ASSERT(!scratch.is(right)); // We're about to clobber scratch. 2275 __ SmiUntag(scratch); 2276 __ cvtsi2sd(xmm0, Operand(scratch)); 2277 2278 __ mov(scratch, right); 2279 __ SmiUntag(scratch); 2280 __ cvtsi2sd(xmm1, Operand(scratch)); 2281 } 2282 2283 2284 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm, 2285 Label* non_int32, 2286 Register scratch) { 2287 __ cvttsd2si(scratch, Operand(xmm0)); 2288 __ cvtsi2sd(xmm2, Operand(scratch)); 2289 __ ucomisd(xmm0, xmm2); 2290 __ j(not_zero, non_int32); 2291 __ j(carry, non_int32); 2292 __ cvttsd2si(scratch, Operand(xmm1)); 2293 __ cvtsi2sd(xmm2, Operand(scratch)); 2294 __ ucomisd(xmm1, xmm2); 2295 __ j(not_zero, non_int32); 2296 __ j(carry, non_int32); 2297 } 2298 2299 2300 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 2301 Register scratch, 2302 ArgLocation arg_location) { 2303 NearLabel load_smi_1, load_smi_2, done_load_1, done; 2304 if (arg_location == ARGS_IN_REGISTERS) { 2305 __ mov(scratch, edx); 2306 } else { 2307 __ mov(scratch, Operand(esp, 2 * kPointerSize)); 2308 } 2309 __ test(scratch, Immediate(kSmiTagMask)); 2310 __ j(zero, &load_smi_1, not_taken); 2311 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); 2312 __ bind(&done_load_1); 2313 2314 if (arg_location == ARGS_IN_REGISTERS) { 2315 __ mov(scratch, eax); 2316 } else { 2317 __ mov(scratch, Operand(esp, 1 * kPointerSize)); 2318 } 2319 __ test(scratch, Immediate(kSmiTagMask)); 2320 __ j(zero, &load_smi_2, not_taken); 2321 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); 2322 __ jmp(&done); 2323 2324 __ bind(&load_smi_1); 2325 __ SmiUntag(scratch); 2326 __ push(scratch); 2327 __ fild_s(Operand(esp, 0)); 2328 __ pop(scratch); 2329 __ jmp(&done_load_1); 2330 2331 __ bind(&load_smi_2); 2332 __ SmiUntag(scratch); 2333 __ push(scratch); 2334 __ fild_s(Operand(esp, 0)); 2335 __ pop(scratch); 2336 2337 __ bind(&done); 2338 } 2339 2340 2341 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, 2342 Register scratch) { 2343 const Register left = edx; 2344 const Register right = eax; 2345 __ mov(scratch, left); 2346 ASSERT(!scratch.is(right)); // We're about to clobber scratch. 2347 __ SmiUntag(scratch); 2348 __ push(scratch); 2349 __ fild_s(Operand(esp, 0)); 2350 2351 __ mov(scratch, right); 2352 __ SmiUntag(scratch); 2353 __ mov(Operand(esp, 0), scratch); 2354 __ fild_s(Operand(esp, 0)); 2355 __ pop(scratch); 2356 } 2357 2358 2359 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, 2360 Label* non_float, 2361 Register scratch) { 2362 NearLabel test_other, done; 2363 // Test if both operands are floats or smi -> scratch=k_is_float; 2364 // Otherwise scratch = k_not_float. 2365 __ test(edx, Immediate(kSmiTagMask)); 2366 __ j(zero, &test_other, not_taken); // argument in edx is OK 2367 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); 2368 Factory* factory = masm->isolate()->factory(); 2369 __ cmp(scratch, factory->heap_number_map()); 2370 __ j(not_equal, non_float); // argument in edx is not a number -> NaN 2371 2372 __ bind(&test_other); 2373 __ test(eax, Immediate(kSmiTagMask)); 2374 __ j(zero, &done); // argument in eax is OK 2375 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); 2376 __ cmp(scratch, factory->heap_number_map()); 2377 __ j(not_equal, non_float); // argument in eax is not a number -> NaN 2378 2379 // Fall-through: Both operands are numbers. 2380 __ bind(&done); 2381 } 2382 2383 2384 void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm, 2385 Label* non_int32) { 2386 return; 2387 } 2388 2389 2390 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { 2391 Label slow, done, undo; 2392 2393 if (op_ == Token::SUB) { 2394 if (include_smi_code_) { 2395 // Check whether the value is a smi. 2396 NearLabel try_float; 2397 __ test(eax, Immediate(kSmiTagMask)); 2398 __ j(not_zero, &try_float, not_taken); 2399 2400 if (negative_zero_ == kStrictNegativeZero) { 2401 // Go slow case if the value of the expression is zero 2402 // to make sure that we switch between 0 and -0. 2403 __ test(eax, Operand(eax)); 2404 __ j(zero, &slow, not_taken); 2405 } 2406 2407 // The value of the expression is a smi that is not zero. Try 2408 // optimistic subtraction '0 - value'. 2409 __ mov(edx, Operand(eax)); 2410 __ Set(eax, Immediate(0)); 2411 __ sub(eax, Operand(edx)); 2412 __ j(overflow, &undo, not_taken); 2413 __ StubReturn(1); 2414 2415 // Try floating point case. 2416 __ bind(&try_float); 2417 } else if (FLAG_debug_code) { 2418 __ AbortIfSmi(eax); 2419 } 2420 2421 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); 2422 __ cmp(edx, masm->isolate()->factory()->heap_number_map()); 2423 __ j(not_equal, &slow); 2424 if (overwrite_ == UNARY_OVERWRITE) { 2425 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); 2426 __ xor_(edx, HeapNumber::kSignMask); // Flip sign. 2427 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx); 2428 } else { 2429 __ mov(edx, Operand(eax)); 2430 // edx: operand 2431 __ AllocateHeapNumber(eax, ebx, ecx, &undo); 2432 // eax: allocated 'empty' number 2433 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); 2434 __ xor_(ecx, HeapNumber::kSignMask); // Flip sign. 2435 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx); 2436 __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset)); 2437 __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx); 2438 } 2439 } else if (op_ == Token::BIT_NOT) { 2440 if (include_smi_code_) { 2441 Label non_smi; 2442 __ test(eax, Immediate(kSmiTagMask)); 2443 __ j(not_zero, &non_smi); 2444 __ not_(eax); 2445 __ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag. 2446 __ ret(0); 2447 __ bind(&non_smi); 2448 } else if (FLAG_debug_code) { 2449 __ AbortIfSmi(eax); 2450 } 2451 2452 // Check if the operand is a heap number. 2453 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); 2454 __ cmp(edx, masm->isolate()->factory()->heap_number_map()); 2455 __ j(not_equal, &slow, not_taken); 2456 2457 // Convert the heap number in eax to an untagged integer in ecx. 2458 IntegerConvert(masm, 2459 eax, 2460 TypeInfo::Unknown(), 2461 CpuFeatures::IsSupported(SSE3), 2462 &slow); 2463 2464 // Do the bitwise operation and check if the result fits in a smi. 2465 NearLabel try_float; 2466 __ not_(ecx); 2467 __ cmp(ecx, 0xc0000000); 2468 __ j(sign, &try_float, not_taken); 2469 2470 // Tag the result as a smi and we're done. 2471 STATIC_ASSERT(kSmiTagSize == 1); 2472 __ lea(eax, Operand(ecx, times_2, kSmiTag)); 2473 __ jmp(&done); 2474 2475 // Try to store the result in a heap number. 2476 __ bind(&try_float); 2477 if (overwrite_ == UNARY_NO_OVERWRITE) { 2478 // Allocate a fresh heap number, but don't overwrite eax until 2479 // we're sure we can do it without going through the slow case 2480 // that needs the value in eax. 2481 __ AllocateHeapNumber(ebx, edx, edi, &slow); 2482 __ mov(eax, Operand(ebx)); 2483 } 2484 if (CpuFeatures::IsSupported(SSE2)) { 2485 CpuFeatures::Scope use_sse2(SSE2); 2486 __ cvtsi2sd(xmm0, Operand(ecx)); 2487 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 2488 } else { 2489 __ push(ecx); 2490 __ fild_s(Operand(esp, 0)); 2491 __ pop(ecx); 2492 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 2493 } 2494 } else { 2495 UNIMPLEMENTED(); 2496 } 2497 2498 // Return from the stub. 2499 __ bind(&done); 2500 __ StubReturn(1); 2501 2502 // Restore eax and go slow case. 2503 __ bind(&undo); 2504 __ mov(eax, Operand(edx)); 2505 2506 // Handle the slow case by jumping to the JavaScript builtin. 2507 __ bind(&slow); 2508 __ pop(ecx); // pop return address. 2509 __ push(eax); 2510 __ push(ecx); // push return address 2511 switch (op_) { 2512 case Token::SUB: 2513 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); 2514 break; 2515 case Token::BIT_NOT: 2516 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); 2517 break; 2518 default: 2519 UNREACHABLE(); 2520 } 2521 } 2522 2523 2524 void MathPowStub::Generate(MacroAssembler* masm) { 2525 // Registers are used as follows: 2526 // edx = base 2527 // eax = exponent 2528 // ecx = temporary, result 2529 2530 CpuFeatures::Scope use_sse2(SSE2); 2531 Label allocate_return, call_runtime; 2532 2533 // Load input parameters. 2534 __ mov(edx, Operand(esp, 2 * kPointerSize)); 2535 __ mov(eax, Operand(esp, 1 * kPointerSize)); 2536 2537 // Save 1 in xmm3 - we need this several times later on. 2538 __ mov(ecx, Immediate(1)); 2539 __ cvtsi2sd(xmm3, Operand(ecx)); 2540 2541 Label exponent_nonsmi; 2542 Label base_nonsmi; 2543 // If the exponent is a heap number go to that specific case. 2544 __ test(eax, Immediate(kSmiTagMask)); 2545 __ j(not_zero, &exponent_nonsmi); 2546 __ test(edx, Immediate(kSmiTagMask)); 2547 __ j(not_zero, &base_nonsmi); 2548 2549 // Optimized version when both exponent and base are smis. 2550 Label powi; 2551 __ SmiUntag(edx); 2552 __ cvtsi2sd(xmm0, Operand(edx)); 2553 __ jmp(&powi); 2554 // exponent is smi and base is a heapnumber. 2555 __ bind(&base_nonsmi); 2556 Factory* factory = masm->isolate()->factory(); 2557 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), 2558 factory->heap_number_map()); 2559 __ j(not_equal, &call_runtime); 2560 2561 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 2562 2563 // Optimized version of pow if exponent is a smi. 2564 // xmm0 contains the base. 2565 __ bind(&powi); 2566 __ SmiUntag(eax); 2567 2568 // Save exponent in base as we need to check if exponent is negative later. 2569 // We know that base and exponent are in different registers. 2570 __ mov(edx, eax); 2571 2572 // Get absolute value of exponent. 2573 NearLabel no_neg; 2574 __ cmp(eax, 0); 2575 __ j(greater_equal, &no_neg); 2576 __ neg(eax); 2577 __ bind(&no_neg); 2578 2579 // Load xmm1 with 1. 2580 __ movsd(xmm1, xmm3); 2581 NearLabel while_true; 2582 NearLabel no_multiply; 2583 2584 __ bind(&while_true); 2585 __ shr(eax, 1); 2586 __ j(not_carry, &no_multiply); 2587 __ mulsd(xmm1, xmm0); 2588 __ bind(&no_multiply); 2589 __ mulsd(xmm0, xmm0); 2590 __ j(not_zero, &while_true); 2591 2592 // base has the original value of the exponent - if the exponent is 2593 // negative return 1/result. 2594 __ test(edx, Operand(edx)); 2595 __ j(positive, &allocate_return); 2596 // Special case if xmm1 has reached infinity. 2597 __ mov(ecx, Immediate(0x7FB00000)); 2598 __ movd(xmm0, Operand(ecx)); 2599 __ cvtss2sd(xmm0, xmm0); 2600 __ ucomisd(xmm0, xmm1); 2601 __ j(equal, &call_runtime); 2602 __ divsd(xmm3, xmm1); 2603 __ movsd(xmm1, xmm3); 2604 __ jmp(&allocate_return); 2605 2606 // exponent (or both) is a heapnumber - no matter what we should now work 2607 // on doubles. 2608 __ bind(&exponent_nonsmi); 2609 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), 2610 factory->heap_number_map()); 2611 __ j(not_equal, &call_runtime); 2612 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2613 // Test if exponent is nan. 2614 __ ucomisd(xmm1, xmm1); 2615 __ j(parity_even, &call_runtime); 2616 2617 NearLabel base_not_smi; 2618 NearLabel handle_special_cases; 2619 __ test(edx, Immediate(kSmiTagMask)); 2620 __ j(not_zero, &base_not_smi); 2621 __ SmiUntag(edx); 2622 __ cvtsi2sd(xmm0, Operand(edx)); 2623 __ jmp(&handle_special_cases); 2624 2625 __ bind(&base_not_smi); 2626 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), 2627 factory->heap_number_map()); 2628 __ j(not_equal, &call_runtime); 2629 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); 2630 __ and_(ecx, HeapNumber::kExponentMask); 2631 __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask)); 2632 // base is NaN or +/-Infinity 2633 __ j(greater_equal, &call_runtime); 2634 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 2635 2636 // base is in xmm0 and exponent is in xmm1. 2637 __ bind(&handle_special_cases); 2638 NearLabel not_minus_half; 2639 // Test for -0.5. 2640 // Load xmm2 with -0.5. 2641 __ mov(ecx, Immediate(0xBF000000)); 2642 __ movd(xmm2, Operand(ecx)); 2643 __ cvtss2sd(xmm2, xmm2); 2644 // xmm2 now has -0.5. 2645 __ ucomisd(xmm2, xmm1); 2646 __ j(not_equal, ¬_minus_half); 2647 2648 // Calculates reciprocal of square root. 2649 // sqrtsd returns -0 when input is -0. ECMA spec requires +0. 2650 __ xorpd(xmm1, xmm1); 2651 __ addsd(xmm1, xmm0); 2652 __ sqrtsd(xmm1, xmm1); 2653 __ divsd(xmm3, xmm1); 2654 __ movsd(xmm1, xmm3); 2655 __ jmp(&allocate_return); 2656 2657 // Test for 0.5. 2658 __ bind(¬_minus_half); 2659 // Load xmm2 with 0.5. 2660 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. 2661 __ addsd(xmm2, xmm3); 2662 // xmm2 now has 0.5. 2663 __ ucomisd(xmm2, xmm1); 2664 __ j(not_equal, &call_runtime); 2665 // Calculates square root. 2666 // sqrtsd returns -0 when input is -0. ECMA spec requires +0. 2667 __ xorpd(xmm1, xmm1); 2668 __ addsd(xmm1, xmm0); 2669 __ sqrtsd(xmm1, xmm1); 2670 2671 __ bind(&allocate_return); 2672 __ AllocateHeapNumber(ecx, eax, edx, &call_runtime); 2673 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1); 2674 __ mov(eax, ecx); 2675 __ ret(2 * kPointerSize); 2676 2677 __ bind(&call_runtime); 2678 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); 2679 } 2680 2681 2682 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 2683 // The key is in edx and the parameter count is in eax. 2684 2685 // The displacement is used for skipping the frame pointer on the 2686 // stack. It is the offset of the last parameter (if any) relative 2687 // to the frame pointer. 2688 static const int kDisplacement = 1 * kPointerSize; 2689 2690 // Check that the key is a smi. 2691 Label slow; 2692 __ test(edx, Immediate(kSmiTagMask)); 2693 __ j(not_zero, &slow, not_taken); 2694 2695 // Check if the calling frame is an arguments adaptor frame. 2696 NearLabel adaptor; 2697 __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2698 __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); 2699 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 2700 __ j(equal, &adaptor); 2701 2702 // Check index against formal parameters count limit passed in 2703 // through register eax. Use unsigned comparison to get negative 2704 // check for free. 2705 __ cmp(edx, Operand(eax)); 2706 __ j(above_equal, &slow, not_taken); 2707 2708 // Read the argument from the stack and return it. 2709 STATIC_ASSERT(kSmiTagSize == 1); 2710 STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. 2711 __ lea(ebx, Operand(ebp, eax, times_2, 0)); 2712 __ neg(edx); 2713 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); 2714 __ ret(0); 2715 2716 // Arguments adaptor case: Check index against actual arguments 2717 // limit found in the arguments adaptor frame. Use unsigned 2718 // comparison to get negative check for free. 2719 __ bind(&adaptor); 2720 __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); 2721 __ cmp(edx, Operand(ecx)); 2722 __ j(above_equal, &slow, not_taken); 2723 2724 // Read the argument from the stack and return it. 2725 STATIC_ASSERT(kSmiTagSize == 1); 2726 STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. 2727 __ lea(ebx, Operand(ebx, ecx, times_2, 0)); 2728 __ neg(edx); 2729 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); 2730 __ ret(0); 2731 2732 // Slow-case: Handle non-smi or out-of-bounds access to arguments 2733 // by calling the runtime system. 2734 __ bind(&slow); 2735 __ pop(ebx); // Return address. 2736 __ push(edx); 2737 __ push(ebx); 2738 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); 2739 } 2740 2741 2742 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { 2743 // esp[0] : return address 2744 // esp[4] : number of parameters 2745 // esp[8] : receiver displacement 2746 // esp[16] : function 2747 2748 // The displacement is used for skipping the return address and the 2749 // frame pointer on the stack. It is the offset of the last 2750 // parameter (if any) relative to the frame pointer. 2751 static const int kDisplacement = 2 * kPointerSize; 2752 2753 // Check if the calling frame is an arguments adaptor frame. 2754 Label adaptor_frame, try_allocate, runtime; 2755 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); 2756 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); 2757 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 2758 __ j(equal, &adaptor_frame); 2759 2760 // Get the length from the frame. 2761 __ mov(ecx, Operand(esp, 1 * kPointerSize)); 2762 __ jmp(&try_allocate); 2763 2764 // Patch the arguments.length and the parameters pointer. 2765 __ bind(&adaptor_frame); 2766 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); 2767 __ mov(Operand(esp, 1 * kPointerSize), ecx); 2768 __ lea(edx, Operand(edx, ecx, times_2, kDisplacement)); 2769 __ mov(Operand(esp, 2 * kPointerSize), edx); 2770 2771 // Try the new space allocation. Start out with computing the size of 2772 // the arguments object and the elements array. 2773 NearLabel add_arguments_object; 2774 __ bind(&try_allocate); 2775 __ test(ecx, Operand(ecx)); 2776 __ j(zero, &add_arguments_object); 2777 __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); 2778 __ bind(&add_arguments_object); 2779 __ add(Operand(ecx), Immediate(GetArgumentsObjectSize())); 2780 2781 // Do the allocation of both objects in one go. 2782 __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); 2783 2784 // Get the arguments boilerplate from the current (global) context. 2785 __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); 2786 __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); 2787 __ mov(edi, Operand(edi, 2788 Context::SlotOffset(GetArgumentsBoilerplateIndex()))); 2789 2790 // Copy the JS object part. 2791 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { 2792 __ mov(ebx, FieldOperand(edi, i)); 2793 __ mov(FieldOperand(eax, i), ebx); 2794 } 2795 2796 if (type_ == NEW_NON_STRICT) { 2797 // Setup the callee in-object property. 2798 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); 2799 __ mov(ebx, Operand(esp, 3 * kPointerSize)); 2800 __ mov(FieldOperand(eax, JSObject::kHeaderSize + 2801 Heap::kArgumentsCalleeIndex * kPointerSize), 2802 ebx); 2803 } 2804 2805 // Get the length (smi tagged) and set that as an in-object property too. 2806 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 2807 __ mov(ecx, Operand(esp, 1 * kPointerSize)); 2808 __ mov(FieldOperand(eax, JSObject::kHeaderSize + 2809 Heap::kArgumentsLengthIndex * kPointerSize), 2810 ecx); 2811 2812 // If there are no actual arguments, we're done. 2813 Label done; 2814 __ test(ecx, Operand(ecx)); 2815 __ j(zero, &done); 2816 2817 // Get the parameters pointer from the stack. 2818 __ mov(edx, Operand(esp, 2 * kPointerSize)); 2819 2820 // Setup the elements pointer in the allocated arguments object and 2821 // initialize the header in the elements fixed array. 2822 __ lea(edi, Operand(eax, GetArgumentsObjectSize())); 2823 __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); 2824 __ mov(FieldOperand(edi, FixedArray::kMapOffset), 2825 Immediate(masm->isolate()->factory()->fixed_array_map())); 2826 2827 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); 2828 // Untag the length for the loop below. 2829 __ SmiUntag(ecx); 2830 2831 // Copy the fixed array slots. 2832 NearLabel loop; 2833 __ bind(&loop); 2834 __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. 2835 __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); 2836 __ add(Operand(edi), Immediate(kPointerSize)); 2837 __ sub(Operand(edx), Immediate(kPointerSize)); 2838 __ dec(ecx); 2839 __ j(not_zero, &loop); 2840 2841 // Return and remove the on-stack parameters. 2842 __ bind(&done); 2843 __ ret(3 * kPointerSize); 2844 2845 // Do the runtime call to allocate the arguments object. 2846 __ bind(&runtime); 2847 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 2848 } 2849 2850 2851 void RegExpExecStub::Generate(MacroAssembler* masm) { 2852 // Just jump directly to runtime if native RegExp is not selected at compile 2853 // time or if regexp entry in generated code is turned off runtime switch or 2854 // at compilation. 2855 #ifdef V8_INTERPRETED_REGEXP 2856 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 2857 #else // V8_INTERPRETED_REGEXP 2858 if (!FLAG_regexp_entry_native) { 2859 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 2860 return; 2861 } 2862 2863 // Stack frame on entry. 2864 // esp[0]: return address 2865 // esp[4]: last_match_info (expected JSArray) 2866 // esp[8]: previous index 2867 // esp[12]: subject string 2868 // esp[16]: JSRegExp object 2869 2870 static const int kLastMatchInfoOffset = 1 * kPointerSize; 2871 static const int kPreviousIndexOffset = 2 * kPointerSize; 2872 static const int kSubjectOffset = 3 * kPointerSize; 2873 static const int kJSRegExpOffset = 4 * kPointerSize; 2874 2875 Label runtime, invoke_regexp; 2876 2877 // Ensure that a RegExp stack is allocated. 2878 ExternalReference address_of_regexp_stack_memory_address = 2879 ExternalReference::address_of_regexp_stack_memory_address( 2880 masm->isolate()); 2881 ExternalReference address_of_regexp_stack_memory_size = 2882 ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); 2883 __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); 2884 __ test(ebx, Operand(ebx)); 2885 __ j(zero, &runtime, not_taken); 2886 2887 // Check that the first argument is a JSRegExp object. 2888 __ mov(eax, Operand(esp, kJSRegExpOffset)); 2889 STATIC_ASSERT(kSmiTag == 0); 2890 __ test(eax, Immediate(kSmiTagMask)); 2891 __ j(zero, &runtime); 2892 __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); 2893 __ j(not_equal, &runtime); 2894 // Check that the RegExp has been compiled (data contains a fixed array). 2895 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); 2896 if (FLAG_debug_code) { 2897 __ test(ecx, Immediate(kSmiTagMask)); 2898 __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected"); 2899 __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx); 2900 __ Check(equal, "Unexpected type for RegExp data, FixedArray expected"); 2901 } 2902 2903 // ecx: RegExp data (FixedArray) 2904 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. 2905 __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); 2906 __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); 2907 __ j(not_equal, &runtime); 2908 2909 // ecx: RegExp data (FixedArray) 2910 // Check that the number of captures fit in the static offsets vector buffer. 2911 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); 2912 // Calculate number of capture registers (number_of_captures + 1) * 2. This 2913 // uses the asumption that smis are 2 * their untagged value. 2914 STATIC_ASSERT(kSmiTag == 0); 2915 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 2916 __ add(Operand(edx), Immediate(2)); // edx was a smi. 2917 // Check that the static offsets vector buffer is large enough. 2918 __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); 2919 __ j(above, &runtime); 2920 2921 // ecx: RegExp data (FixedArray) 2922 // edx: Number of capture registers 2923 // Check that the second argument is a string. 2924 __ mov(eax, Operand(esp, kSubjectOffset)); 2925 __ test(eax, Immediate(kSmiTagMask)); 2926 __ j(zero, &runtime); 2927 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); 2928 __ j(NegateCondition(is_string), &runtime); 2929 // Get the length of the string to ebx. 2930 __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); 2931 2932 // ebx: Length of subject string as a smi 2933 // ecx: RegExp data (FixedArray) 2934 // edx: Number of capture registers 2935 // Check that the third argument is a positive smi less than the subject 2936 // string length. A negative value will be greater (unsigned comparison). 2937 __ mov(eax, Operand(esp, kPreviousIndexOffset)); 2938 __ test(eax, Immediate(kSmiTagMask)); 2939 __ j(not_zero, &runtime); 2940 __ cmp(eax, Operand(ebx)); 2941 __ j(above_equal, &runtime); 2942 2943 // ecx: RegExp data (FixedArray) 2944 // edx: Number of capture registers 2945 // Check that the fourth object is a JSArray object. 2946 __ mov(eax, Operand(esp, kLastMatchInfoOffset)); 2947 __ test(eax, Immediate(kSmiTagMask)); 2948 __ j(zero, &runtime); 2949 __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); 2950 __ j(not_equal, &runtime); 2951 // Check that the JSArray is in fast case. 2952 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); 2953 __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); 2954 Factory* factory = masm->isolate()->factory(); 2955 __ cmp(eax, factory->fixed_array_map()); 2956 __ j(not_equal, &runtime); 2957 // Check that the last match info has space for the capture registers and the 2958 // additional information. 2959 __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); 2960 __ SmiUntag(eax); 2961 __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); 2962 __ cmp(edx, Operand(eax)); 2963 __ j(greater, &runtime); 2964 2965 // ecx: RegExp data (FixedArray) 2966 // Check the representation and encoding of the subject string. 2967 Label seq_ascii_string, seq_two_byte_string, check_code; 2968 __ mov(eax, Operand(esp, kSubjectOffset)); 2969 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 2970 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); 2971 // First check for flat two byte string. 2972 __ and_(ebx, 2973 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); 2974 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); 2975 __ j(zero, &seq_two_byte_string); 2976 // Any other flat string must be a flat ascii string. 2977 __ test(Operand(ebx), 2978 Immediate(kIsNotStringMask | kStringRepresentationMask)); 2979 __ j(zero, &seq_ascii_string); 2980 2981 // Check for flat cons string. 2982 // A flat cons string is a cons string where the second part is the empty 2983 // string. In that case the subject string is just the first part of the cons 2984 // string. Also in this case the first part of the cons string is known to be 2985 // a sequential string or an external string. 2986 STATIC_ASSERT(kExternalStringTag != 0); 2987 STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); 2988 __ test(Operand(ebx), 2989 Immediate(kIsNotStringMask | kExternalStringTag)); 2990 __ j(not_zero, &runtime); 2991 // String is a cons string. 2992 __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset)); 2993 __ cmp(Operand(edx), factory->empty_string()); 2994 __ j(not_equal, &runtime); 2995 __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); 2996 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 2997 // String is a cons string with empty second part. 2998 // eax: first part of cons string. 2999 // ebx: map of first part of cons string. 3000 // Is first part a flat two byte string? 3001 __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), 3002 kStringRepresentationMask | kStringEncodingMask); 3003 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); 3004 __ j(zero, &seq_two_byte_string); 3005 // Any other flat string must be ascii. 3006 __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), 3007 kStringRepresentationMask); 3008 __ j(not_zero, &runtime); 3009 3010 __ bind(&seq_ascii_string); 3011 // eax: subject string (flat ascii) 3012 // ecx: RegExp data (FixedArray) 3013 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset)); 3014 __ Set(edi, Immediate(1)); // Type is ascii. 3015 __ jmp(&check_code); 3016 3017 __ bind(&seq_two_byte_string); 3018 // eax: subject string (flat two byte) 3019 // ecx: RegExp data (FixedArray) 3020 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); 3021 __ Set(edi, Immediate(0)); // Type is two byte. 3022 3023 __ bind(&check_code); 3024 // Check that the irregexp code has been generated for the actual string 3025 // encoding. If it has, the field contains a code object otherwise it contains 3026 // the hole. 3027 __ CmpObjectType(edx, CODE_TYPE, ebx); 3028 __ j(not_equal, &runtime); 3029 3030 // eax: subject string 3031 // edx: code 3032 // edi: encoding of subject string (1 if ascii, 0 if two_byte); 3033 // Load used arguments before starting to push arguments for call to native 3034 // RegExp code to avoid handling changing stack height. 3035 __ mov(ebx, Operand(esp, kPreviousIndexOffset)); 3036 __ SmiUntag(ebx); // Previous index from smi. 3037 3038 // eax: subject string 3039 // ebx: previous index 3040 // edx: code 3041 // edi: encoding of subject string (1 if ascii 0 if two_byte); 3042 // All checks done. Now push arguments for native regexp code. 3043 Counters* counters = masm->isolate()->counters(); 3044 __ IncrementCounter(counters->regexp_entry_native(), 1); 3045 3046 // Isolates: note we add an additional parameter here (isolate pointer). 3047 static const int kRegExpExecuteArguments = 8; 3048 __ EnterApiExitFrame(kRegExpExecuteArguments); 3049 3050 // Argument 8: Pass current isolate address. 3051 __ mov(Operand(esp, 7 * kPointerSize), 3052 Immediate(ExternalReference::isolate_address())); 3053 3054 // Argument 7: Indicate that this is a direct call from JavaScript. 3055 __ mov(Operand(esp, 6 * kPointerSize), Immediate(1)); 3056 3057 // Argument 6: Start (high end) of backtracking stack memory area. 3058 __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address)); 3059 __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); 3060 __ mov(Operand(esp, 5 * kPointerSize), ecx); 3061 3062 // Argument 5: static offsets vector buffer. 3063 __ mov(Operand(esp, 4 * kPointerSize), 3064 Immediate(ExternalReference::address_of_static_offsets_vector( 3065 masm->isolate()))); 3066 3067 // Argument 4: End of string data 3068 // Argument 3: Start of string data 3069 NearLabel setup_two_byte, setup_rest; 3070 __ test(edi, Operand(edi)); 3071 __ mov(edi, FieldOperand(eax, String::kLengthOffset)); 3072 __ j(zero, &setup_two_byte); 3073 __ SmiUntag(edi); 3074 __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize)); 3075 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. 3076 __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize)); 3077 __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. 3078 __ jmp(&setup_rest); 3079 3080 __ bind(&setup_two_byte); 3081 STATIC_ASSERT(kSmiTag == 0); 3082 STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2). 3083 __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize)); 3084 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. 3085 __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); 3086 __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. 3087 3088 __ bind(&setup_rest); 3089 3090 // Argument 2: Previous index. 3091 __ mov(Operand(esp, 1 * kPointerSize), ebx); 3092 3093 // Argument 1: Subject string. 3094 __ mov(Operand(esp, 0 * kPointerSize), eax); 3095 3096 // Locate the code entry and call it. 3097 __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); 3098 __ call(Operand(edx)); 3099 3100 // Drop arguments and come back to JS mode. 3101 __ LeaveApiExitFrame(); 3102 3103 // Check the result. 3104 Label success; 3105 __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS); 3106 __ j(equal, &success, taken); 3107 Label failure; 3108 __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); 3109 __ j(equal, &failure, taken); 3110 __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION); 3111 // If not exception it can only be retry. Handle that in the runtime system. 3112 __ j(not_equal, &runtime); 3113 // Result must now be exception. If there is no pending exception already a 3114 // stack overflow (on the backtrack stack) was detected in RegExp code but 3115 // haven't created the exception yet. Handle that in the runtime system. 3116 // TODO(592): Rerunning the RegExp to get the stack overflow exception. 3117 ExternalReference pending_exception(Isolate::k_pending_exception_address, 3118 masm->isolate()); 3119 __ mov(edx, 3120 Operand::StaticVariable(ExternalReference::the_hole_value_location( 3121 masm->isolate()))); 3122 __ mov(eax, Operand::StaticVariable(pending_exception)); 3123 __ cmp(edx, Operand(eax)); 3124 __ j(equal, &runtime); 3125 // For exception, throw the exception again. 3126 3127 // Clear the pending exception variable. 3128 __ mov(Operand::StaticVariable(pending_exception), edx); 3129 3130 // Special handling of termination exceptions which are uncatchable 3131 // by javascript code. 3132 __ cmp(eax, factory->termination_exception()); 3133 Label throw_termination_exception; 3134 __ j(equal, &throw_termination_exception); 3135 3136 // Handle normal exception by following handler chain. 3137 __ Throw(eax); 3138 3139 __ bind(&throw_termination_exception); 3140 __ ThrowUncatchable(TERMINATION, eax); 3141 3142 __ bind(&failure); 3143 // For failure to match, return null. 3144 __ mov(Operand(eax), factory->null_value()); 3145 __ ret(4 * kPointerSize); 3146 3147 // Load RegExp data. 3148 __ bind(&success); 3149 __ mov(eax, Operand(esp, kJSRegExpOffset)); 3150 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); 3151 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); 3152 // Calculate number of capture registers (number_of_captures + 1) * 2. 3153 STATIC_ASSERT(kSmiTag == 0); 3154 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 3155 __ add(Operand(edx), Immediate(2)); // edx was a smi. 3156 3157 // edx: Number of capture registers 3158 // Load last_match_info which is still known to be a fast case JSArray. 3159 __ mov(eax, Operand(esp, kLastMatchInfoOffset)); 3160 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); 3161 3162 // ebx: last_match_info backing store (FixedArray) 3163 // edx: number of capture registers 3164 // Store the capture count. 3165 __ SmiTag(edx); // Number of capture registers to smi. 3166 __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); 3167 __ SmiUntag(edx); // Number of capture registers back from smi. 3168 // Store last subject and last input. 3169 __ mov(eax, Operand(esp, kSubjectOffset)); 3170 __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); 3171 __ mov(ecx, ebx); 3172 __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi); 3173 __ mov(eax, Operand(esp, kSubjectOffset)); 3174 __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); 3175 __ mov(ecx, ebx); 3176 __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi); 3177 3178 // Get the static offsets vector filled by the native regexp code. 3179 ExternalReference address_of_static_offsets_vector = 3180 ExternalReference::address_of_static_offsets_vector(masm->isolate()); 3181 __ mov(ecx, Immediate(address_of_static_offsets_vector)); 3182 3183 // ebx: last_match_info backing store (FixedArray) 3184 // ecx: offsets vector 3185 // edx: number of capture registers 3186 NearLabel next_capture, done; 3187 // Capture register counter starts from number of capture registers and 3188 // counts down until wraping after zero. 3189 __ bind(&next_capture); 3190 __ sub(Operand(edx), Immediate(1)); 3191 __ j(negative, &done); 3192 // Read the value from the static offsets vector buffer. 3193 __ mov(edi, Operand(ecx, edx, times_int_size, 0)); 3194 __ SmiTag(edi); 3195 // Store the smi value in the last match info. 3196 __ mov(FieldOperand(ebx, 3197 edx, 3198 times_pointer_size, 3199 RegExpImpl::kFirstCaptureOffset), 3200 edi); 3201 __ jmp(&next_capture); 3202 __ bind(&done); 3203 3204 // Return last match info. 3205 __ mov(eax, Operand(esp, kLastMatchInfoOffset)); 3206 __ ret(4 * kPointerSize); 3207 3208 // Do the runtime call to execute the regexp. 3209 __ bind(&runtime); 3210 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 3211 #endif // V8_INTERPRETED_REGEXP 3212 } 3213 3214 3215 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { 3216 const int kMaxInlineLength = 100; 3217 Label slowcase; 3218 NearLabel done; 3219 __ mov(ebx, Operand(esp, kPointerSize * 3)); 3220 __ test(ebx, Immediate(kSmiTagMask)); 3221 __ j(not_zero, &slowcase); 3222 __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength))); 3223 __ j(above, &slowcase); 3224 // Smi-tagging is equivalent to multiplying by 2. 3225 STATIC_ASSERT(kSmiTag == 0); 3226 STATIC_ASSERT(kSmiTagSize == 1); 3227 // Allocate RegExpResult followed by FixedArray with size in ebx. 3228 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] 3229 // Elements: [Map][Length][..elements..] 3230 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, 3231 times_half_pointer_size, 3232 ebx, // In: Number of elements (times 2, being a smi) 3233 eax, // Out: Start of allocation (tagged). 3234 ecx, // Out: End of allocation. 3235 edx, // Scratch register 3236 &slowcase, 3237 TAG_OBJECT); 3238 // eax: Start of allocated area, object-tagged. 3239 3240 // Set JSArray map to global.regexp_result_map(). 3241 // Set empty properties FixedArray. 3242 // Set elements to point to FixedArray allocated right after the JSArray. 3243 // Interleave operations for better latency. 3244 __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX)); 3245 Factory* factory = masm->isolate()->factory(); 3246 __ mov(ecx, Immediate(factory->empty_fixed_array())); 3247 __ lea(ebx, Operand(eax, JSRegExpResult::kSize)); 3248 __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset)); 3249 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); 3250 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx); 3251 __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX)); 3252 __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx); 3253 3254 // Set input, index and length fields from arguments. 3255 __ mov(ecx, Operand(esp, kPointerSize * 1)); 3256 __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx); 3257 __ mov(ecx, Operand(esp, kPointerSize * 2)); 3258 __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx); 3259 __ mov(ecx, Operand(esp, kPointerSize * 3)); 3260 __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx); 3261 3262 // Fill out the elements FixedArray. 3263 // eax: JSArray. 3264 // ebx: FixedArray. 3265 // ecx: Number of elements in array, as smi. 3266 3267 // Set map. 3268 __ mov(FieldOperand(ebx, HeapObject::kMapOffset), 3269 Immediate(factory->fixed_array_map())); 3270 // Set length. 3271 __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx); 3272 // Fill contents of fixed-array with the-hole. 3273 __ SmiUntag(ecx); 3274 __ mov(edx, Immediate(factory->the_hole_value())); 3275 __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize)); 3276 // Fill fixed array elements with hole. 3277 // eax: JSArray. 3278 // ecx: Number of elements to fill. 3279 // ebx: Start of elements in FixedArray. 3280 // edx: the hole. 3281 Label loop; 3282 __ test(ecx, Operand(ecx)); 3283 __ bind(&loop); 3284 __ j(less_equal, &done); // Jump if ecx is negative or zero. 3285 __ sub(Operand(ecx), Immediate(1)); 3286 __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx); 3287 __ jmp(&loop); 3288 3289 __ bind(&done); 3290 __ ret(3 * kPointerSize); 3291 3292 __ bind(&slowcase); 3293 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); 3294 } 3295 3296 3297 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, 3298 Register object, 3299 Register result, 3300 Register scratch1, 3301 Register scratch2, 3302 bool object_is_smi, 3303 Label* not_found) { 3304 // Use of registers. Register result is used as a temporary. 3305 Register number_string_cache = result; 3306 Register mask = scratch1; 3307 Register scratch = scratch2; 3308 3309 // Load the number string cache. 3310 ExternalReference roots_address = 3311 ExternalReference::roots_address(masm->isolate()); 3312 __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex)); 3313 __ mov(number_string_cache, 3314 Operand::StaticArray(scratch, times_pointer_size, roots_address)); 3315 // Make the hash mask from the length of the number string cache. It 3316 // contains two elements (number and string) for each cache entry. 3317 __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); 3318 __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. 3319 __ sub(Operand(mask), Immediate(1)); // Make mask. 3320 3321 // Calculate the entry in the number string cache. The hash value in the 3322 // number string cache for smis is just the smi value, and the hash for 3323 // doubles is the xor of the upper and lower words. See 3324 // Heap::GetNumberStringCache. 3325 NearLabel smi_hash_calculated; 3326 NearLabel load_result_from_cache; 3327 if (object_is_smi) { 3328 __ mov(scratch, object); 3329 __ SmiUntag(scratch); 3330 } else { 3331 NearLabel not_smi, hash_calculated; 3332 STATIC_ASSERT(kSmiTag == 0); 3333 __ test(object, Immediate(kSmiTagMask)); 3334 __ j(not_zero, ¬_smi); 3335 __ mov(scratch, object); 3336 __ SmiUntag(scratch); 3337 __ jmp(&smi_hash_calculated); 3338 __ bind(¬_smi); 3339 __ cmp(FieldOperand(object, HeapObject::kMapOffset), 3340 masm->isolate()->factory()->heap_number_map()); 3341 __ j(not_equal, not_found); 3342 STATIC_ASSERT(8 == kDoubleSize); 3343 __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); 3344 __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); 3345 // Object is heap number and hash is now in scratch. Calculate cache index. 3346 __ and_(scratch, Operand(mask)); 3347 Register index = scratch; 3348 Register probe = mask; 3349 __ mov(probe, 3350 FieldOperand(number_string_cache, 3351 index, 3352 times_twice_pointer_size, 3353 FixedArray::kHeaderSize)); 3354 __ test(probe, Immediate(kSmiTagMask)); 3355 __ j(zero, not_found); 3356 if (CpuFeatures::IsSupported(SSE2)) { 3357 CpuFeatures::Scope fscope(SSE2); 3358 __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); 3359 __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); 3360 __ ucomisd(xmm0, xmm1); 3361 } else { 3362 __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); 3363 __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); 3364 __ FCmp(); 3365 } 3366 __ j(parity_even, not_found); // Bail out if NaN is involved. 3367 __ j(not_equal, not_found); // The cache did not contain this value. 3368 __ jmp(&load_result_from_cache); 3369 } 3370 3371 __ bind(&smi_hash_calculated); 3372 // Object is smi and hash is now in scratch. Calculate cache index. 3373 __ and_(scratch, Operand(mask)); 3374 Register index = scratch; 3375 // Check if the entry is the smi we are looking for. 3376 __ cmp(object, 3377 FieldOperand(number_string_cache, 3378 index, 3379 times_twice_pointer_size, 3380 FixedArray::kHeaderSize)); 3381 __ j(not_equal, not_found); 3382 3383 // Get the result from the cache. 3384 __ bind(&load_result_from_cache); 3385 __ mov(result, 3386 FieldOperand(number_string_cache, 3387 index, 3388 times_twice_pointer_size, 3389 FixedArray::kHeaderSize + kPointerSize)); 3390 Counters* counters = masm->isolate()->counters(); 3391 __ IncrementCounter(counters->number_to_string_native(), 1); 3392 } 3393 3394 3395 void NumberToStringStub::Generate(MacroAssembler* masm) { 3396 Label runtime; 3397 3398 __ mov(ebx, Operand(esp, kPointerSize)); 3399 3400 // Generate code to lookup number in the number string cache. 3401 GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime); 3402 __ ret(1 * kPointerSize); 3403 3404 __ bind(&runtime); 3405 // Handle number to string in the runtime system if not found in the cache. 3406 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); 3407 } 3408 3409 3410 static int NegativeComparisonResult(Condition cc) { 3411 ASSERT(cc != equal); 3412 ASSERT((cc == less) || (cc == less_equal) 3413 || (cc == greater) || (cc == greater_equal)); 3414 return (cc == greater || cc == greater_equal) ? LESS : GREATER; 3415 } 3416 3417 void CompareStub::Generate(MacroAssembler* masm) { 3418 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); 3419 3420 Label check_unequal_objects, done; 3421 3422 // Compare two smis if required. 3423 if (include_smi_compare_) { 3424 Label non_smi, smi_done; 3425 __ mov(ecx, Operand(edx)); 3426 __ or_(ecx, Operand(eax)); 3427 __ test(ecx, Immediate(kSmiTagMask)); 3428 __ j(not_zero, &non_smi, not_taken); 3429 __ sub(edx, Operand(eax)); // Return on the result of the subtraction. 3430 __ j(no_overflow, &smi_done); 3431 __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. 3432 __ bind(&smi_done); 3433 __ mov(eax, edx); 3434 __ ret(0); 3435 __ bind(&non_smi); 3436 } else if (FLAG_debug_code) { 3437 __ mov(ecx, Operand(edx)); 3438 __ or_(ecx, Operand(eax)); 3439 __ test(ecx, Immediate(kSmiTagMask)); 3440 __ Assert(not_zero, "Unexpected smi operands."); 3441 } 3442 3443 // NOTICE! This code is only reached after a smi-fast-case check, so 3444 // it is certain that at least one operand isn't a smi. 3445 3446 // Identical objects can be compared fast, but there are some tricky cases 3447 // for NaN and undefined. 3448 { 3449 Label not_identical; 3450 __ cmp(eax, Operand(edx)); 3451 __ j(not_equal, ¬_identical); 3452 3453 if (cc_ != equal) { 3454 // Check for undefined. undefined OP undefined is false even though 3455 // undefined == undefined. 3456 NearLabel check_for_nan; 3457 __ cmp(edx, masm->isolate()->factory()->undefined_value()); 3458 __ j(not_equal, &check_for_nan); 3459 __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); 3460 __ ret(0); 3461 __ bind(&check_for_nan); 3462 } 3463 3464 // Test for NaN. Sadly, we can't just compare to factory->nan_value(), 3465 // so we do the second best thing - test it ourselves. 3466 // Note: if cc_ != equal, never_nan_nan_ is not used. 3467 if (never_nan_nan_ && (cc_ == equal)) { 3468 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 3469 __ ret(0); 3470 } else { 3471 NearLabel heap_number; 3472 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), 3473 Immediate(masm->isolate()->factory()->heap_number_map())); 3474 __ j(equal, &heap_number); 3475 if (cc_ != equal) { 3476 // Call runtime on identical JSObjects. Otherwise return equal. 3477 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); 3478 __ j(above_equal, ¬_identical); 3479 } 3480 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 3481 __ ret(0); 3482 3483 __ bind(&heap_number); 3484 // It is a heap number, so return non-equal if it's NaN and equal if 3485 // it's not NaN. 3486 // The representation of NaN values has all exponent bits (52..62) set, 3487 // and not all mantissa bits (0..51) clear. 3488 // We only accept QNaNs, which have bit 51 set. 3489 // Read top bits of double representation (second word of value). 3490 3491 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., 3492 // all bits in the mask are set. We only need to check the word 3493 // that contains the exponent and high bit of the mantissa. 3494 STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); 3495 __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); 3496 __ Set(eax, Immediate(0)); 3497 // Shift value and mask so kQuietNaNHighBitsMask applies to topmost 3498 // bits. 3499 __ add(edx, Operand(edx)); 3500 __ cmp(edx, kQuietNaNHighBitsMask << 1); 3501 if (cc_ == equal) { 3502 STATIC_ASSERT(EQUAL != 1); 3503 __ setcc(above_equal, eax); 3504 __ ret(0); 3505 } else { 3506 NearLabel nan; 3507 __ j(above_equal, &nan); 3508 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 3509 __ ret(0); 3510 __ bind(&nan); 3511 __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); 3512 __ ret(0); 3513 } 3514 } 3515 3516 __ bind(¬_identical); 3517 } 3518 3519 // Strict equality can quickly decide whether objects are equal. 3520 // Non-strict object equality is slower, so it is handled later in the stub. 3521 if (cc_ == equal && strict_) { 3522 Label slow; // Fallthrough label. 3523 NearLabel not_smis; 3524 // If we're doing a strict equality comparison, we don't have to do 3525 // type conversion, so we generate code to do fast comparison for objects 3526 // and oddballs. Non-smi numbers and strings still go through the usual 3527 // slow-case code. 3528 // If either is a Smi (we know that not both are), then they can only 3529 // be equal if the other is a HeapNumber. If so, use the slow case. 3530 STATIC_ASSERT(kSmiTag == 0); 3531 ASSERT_EQ(0, Smi::FromInt(0)); 3532 __ mov(ecx, Immediate(kSmiTagMask)); 3533 __ and_(ecx, Operand(eax)); 3534 __ test(ecx, Operand(edx)); 3535 __ j(not_zero, ¬_smis); 3536 // One operand is a smi. 3537 3538 // Check whether the non-smi is a heap number. 3539 STATIC_ASSERT(kSmiTagMask == 1); 3540 // ecx still holds eax & kSmiTag, which is either zero or one. 3541 __ sub(Operand(ecx), Immediate(0x01)); 3542 __ mov(ebx, edx); 3543 __ xor_(ebx, Operand(eax)); 3544 __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. 3545 __ xor_(ebx, Operand(eax)); 3546 // if eax was smi, ebx is now edx, else eax. 3547 3548 // Check if the non-smi operand is a heap number. 3549 __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), 3550 Immediate(masm->isolate()->factory()->heap_number_map())); 3551 // If heap number, handle it in the slow case. 3552 __ j(equal, &slow); 3553 // Return non-equal (ebx is not zero) 3554 __ mov(eax, ebx); 3555 __ ret(0); 3556 3557 __ bind(¬_smis); 3558 // If either operand is a JSObject or an oddball value, then they are not 3559 // equal since their pointers are different 3560 // There is no test for undetectability in strict equality. 3561 3562 // Get the type of the first operand. 3563 // If the first object is a JS object, we have done pointer comparison. 3564 NearLabel first_non_object; 3565 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); 3566 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); 3567 __ j(below, &first_non_object); 3568 3569 // Return non-zero (eax is not zero) 3570 NearLabel return_not_equal; 3571 STATIC_ASSERT(kHeapObjectTag != 0); 3572 __ bind(&return_not_equal); 3573 __ ret(0); 3574 3575 __ bind(&first_non_object); 3576 // Check for oddballs: true, false, null, undefined. 3577 __ CmpInstanceType(ecx, ODDBALL_TYPE); 3578 __ j(equal, &return_not_equal); 3579 3580 __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx); 3581 __ j(above_equal, &return_not_equal); 3582 3583 // Check for oddballs: true, false, null, undefined. 3584 __ CmpInstanceType(ecx, ODDBALL_TYPE); 3585 __ j(equal, &return_not_equal); 3586 3587 // Fall through to the general case. 3588 __ bind(&slow); 3589 } 3590 3591 // Generate the number comparison code. 3592 if (include_number_compare_) { 3593 Label non_number_comparison; 3594 Label unordered; 3595 if (CpuFeatures::IsSupported(SSE2)) { 3596 CpuFeatures::Scope use_sse2(SSE2); 3597 CpuFeatures::Scope use_cmov(CMOV); 3598 3599 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); 3600 __ ucomisd(xmm0, xmm1); 3601 3602 // Don't base result on EFLAGS when a NaN is involved. 3603 __ j(parity_even, &unordered, not_taken); 3604 // Return a result of -1, 0, or 1, based on EFLAGS. 3605 __ mov(eax, 0); // equal 3606 __ mov(ecx, Immediate(Smi::FromInt(1))); 3607 __ cmov(above, eax, Operand(ecx)); 3608 __ mov(ecx, Immediate(Smi::FromInt(-1))); 3609 __ cmov(below, eax, Operand(ecx)); 3610 __ ret(0); 3611 } else { 3612 FloatingPointHelper::CheckFloatOperands( 3613 masm, &non_number_comparison, ebx); 3614 FloatingPointHelper::LoadFloatOperand(masm, eax); 3615 FloatingPointHelper::LoadFloatOperand(masm, edx); 3616 __ FCmp(); 3617 3618 // Don't base result on EFLAGS when a NaN is involved. 3619 __ j(parity_even, &unordered, not_taken); 3620 3621 NearLabel below_label, above_label; 3622 // Return a result of -1, 0, or 1, based on EFLAGS. 3623 __ j(below, &below_label, not_taken); 3624 __ j(above, &above_label, not_taken); 3625 3626 __ Set(eax, Immediate(0)); 3627 __ ret(0); 3628 3629 __ bind(&below_label); 3630 __ mov(eax, Immediate(Smi::FromInt(-1))); 3631 __ ret(0); 3632 3633 __ bind(&above_label); 3634 __ mov(eax, Immediate(Smi::FromInt(1))); 3635 __ ret(0); 3636 } 3637 3638 // If one of the numbers was NaN, then the result is always false. 3639 // The cc is never not-equal. 3640 __ bind(&unordered); 3641 ASSERT(cc_ != not_equal); 3642 if (cc_ == less || cc_ == less_equal) { 3643 __ mov(eax, Immediate(Smi::FromInt(1))); 3644 } else { 3645 __ mov(eax, Immediate(Smi::FromInt(-1))); 3646 } 3647 __ ret(0); 3648 3649 // The number comparison code did not provide a valid result. 3650 __ bind(&non_number_comparison); 3651 } 3652 3653 // Fast negative check for symbol-to-symbol equality. 3654 Label check_for_strings; 3655 if (cc_ == equal) { 3656 BranchIfNonSymbol(masm, &check_for_strings, eax, ecx); 3657 BranchIfNonSymbol(masm, &check_for_strings, edx, ecx); 3658 3659 // We've already checked for object identity, so if both operands 3660 // are symbols they aren't equal. Register eax already holds a 3661 // non-zero value, which indicates not equal, so just return. 3662 __ ret(0); 3663 } 3664 3665 __ bind(&check_for_strings); 3666 3667 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, 3668 &check_unequal_objects); 3669 3670 // Inline comparison of ascii strings. 3671 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, 3672 edx, 3673 eax, 3674 ecx, 3675 ebx, 3676 edi); 3677 #ifdef DEBUG 3678 __ Abort("Unexpected fall-through from string comparison"); 3679 #endif 3680 3681 __ bind(&check_unequal_objects); 3682 if (cc_ == equal && !strict_) { 3683 // Non-strict equality. Objects are unequal if 3684 // they are both JSObjects and not undetectable, 3685 // and their pointers are different. 3686 NearLabel not_both_objects; 3687 NearLabel return_unequal; 3688 // At most one is a smi, so we can test for smi by adding the two. 3689 // A smi plus a heap object has the low bit set, a heap object plus 3690 // a heap object has the low bit clear. 3691 STATIC_ASSERT(kSmiTag == 0); 3692 STATIC_ASSERT(kSmiTagMask == 1); 3693 __ lea(ecx, Operand(eax, edx, times_1, 0)); 3694 __ test(ecx, Immediate(kSmiTagMask)); 3695 __ j(not_zero, ¬_both_objects); 3696 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); 3697 __ j(below, ¬_both_objects); 3698 __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx); 3699 __ j(below, ¬_both_objects); 3700 // We do not bail out after this point. Both are JSObjects, and 3701 // they are equal if and only if both are undetectable. 3702 // The and of the undetectable flags is 1 if and only if they are equal. 3703 __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 3704 1 << Map::kIsUndetectable); 3705 __ j(zero, &return_unequal); 3706 __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), 3707 1 << Map::kIsUndetectable); 3708 __ j(zero, &return_unequal); 3709 // The objects are both undetectable, so they both compare as the value 3710 // undefined, and are equal. 3711 __ Set(eax, Immediate(EQUAL)); 3712 __ bind(&return_unequal); 3713 // Return non-equal by returning the non-zero object pointer in eax, 3714 // or return equal if we fell through to here. 3715 __ ret(0); // rax, rdx were pushed 3716 __ bind(¬_both_objects); 3717 } 3718 3719 // Push arguments below the return address. 3720 __ pop(ecx); 3721 __ push(edx); 3722 __ push(eax); 3723 3724 // Figure out which native to call and setup the arguments. 3725 Builtins::JavaScript builtin; 3726 if (cc_ == equal) { 3727 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; 3728 } else { 3729 builtin = Builtins::COMPARE; 3730 __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); 3731 } 3732 3733 // Restore return address on the stack. 3734 __ push(ecx); 3735 3736 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 3737 // tagged as a small integer. 3738 __ InvokeBuiltin(builtin, JUMP_FUNCTION); 3739 } 3740 3741 3742 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, 3743 Label* label, 3744 Register object, 3745 Register scratch) { 3746 __ test(object, Immediate(kSmiTagMask)); 3747 __ j(zero, label); 3748 __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); 3749 __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); 3750 __ and_(scratch, kIsSymbolMask | kIsNotStringMask); 3751 __ cmp(scratch, kSymbolTag | kStringTag); 3752 __ j(not_equal, label); 3753 } 3754 3755 3756 void StackCheckStub::Generate(MacroAssembler* masm) { 3757 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); 3758 } 3759 3760 3761 void CallFunctionStub::Generate(MacroAssembler* masm) { 3762 Label slow; 3763 3764 // If the receiver might be a value (string, number or boolean) check for this 3765 // and box it if it is. 3766 if (ReceiverMightBeValue()) { 3767 // Get the receiver from the stack. 3768 // +1 ~ return address 3769 Label receiver_is_value, receiver_is_js_object; 3770 __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); 3771 3772 // Check if receiver is a smi (which is a number value). 3773 __ test(eax, Immediate(kSmiTagMask)); 3774 __ j(zero, &receiver_is_value, not_taken); 3775 3776 // Check if the receiver is a valid JS object. 3777 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi); 3778 __ j(above_equal, &receiver_is_js_object); 3779 3780 // Call the runtime to box the value. 3781 __ bind(&receiver_is_value); 3782 __ EnterInternalFrame(); 3783 __ push(eax); 3784 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); 3785 __ LeaveInternalFrame(); 3786 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax); 3787 3788 __ bind(&receiver_is_js_object); 3789 } 3790 3791 // Get the function to call from the stack. 3792 // +2 ~ receiver, return address 3793 __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize)); 3794 3795 // Check that the function really is a JavaScript function. 3796 __ test(edi, Immediate(kSmiTagMask)); 3797 __ j(zero, &slow, not_taken); 3798 // Goto slow case if we do not have a function. 3799 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); 3800 __ j(not_equal, &slow, not_taken); 3801 3802 // Fast-case: Just invoke the function. 3803 ParameterCount actual(argc_); 3804 __ InvokeFunction(edi, actual, JUMP_FUNCTION); 3805 3806 // Slow-case: Non-function called. 3807 __ bind(&slow); 3808 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 3809 // of the original receiver from the call site). 3810 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi); 3811 __ Set(eax, Immediate(argc_)); 3812 __ Set(ebx, Immediate(0)); 3813 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); 3814 Handle<Code> adaptor = 3815 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); 3816 __ jmp(adaptor, RelocInfo::CODE_TARGET); 3817 } 3818 3819 3820 bool CEntryStub::NeedsImmovableCode() { 3821 return false; 3822 } 3823 3824 3825 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { 3826 __ Throw(eax); 3827 } 3828 3829 3830 void CEntryStub::GenerateCore(MacroAssembler* masm, 3831 Label* throw_normal_exception, 3832 Label* throw_termination_exception, 3833 Label* throw_out_of_memory_exception, 3834 bool do_gc, 3835 bool always_allocate_scope) { 3836 // eax: result parameter for PerformGC, if any 3837 // ebx: pointer to C function (C callee-saved) 3838 // ebp: frame pointer (restored after C call) 3839 // esp: stack pointer (restored after C call) 3840 // edi: number of arguments including receiver (C callee-saved) 3841 // esi: pointer to the first argument (C callee-saved) 3842 3843 // Result returned in eax, or eax+edx if result_size_ is 2. 3844 3845 // Check stack alignment. 3846 if (FLAG_debug_code) { 3847 __ CheckStackAlignment(); 3848 } 3849 3850 if (do_gc) { 3851 // Pass failure code returned from last attempt as first argument to 3852 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the 3853 // stack alignment is known to be correct. This function takes one argument 3854 // which is passed on the stack, and we know that the stack has been 3855 // prepared to pass at least one argument. 3856 __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. 3857 __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); 3858 } 3859 3860 ExternalReference scope_depth = 3861 ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); 3862 if (always_allocate_scope) { 3863 __ inc(Operand::StaticVariable(scope_depth)); 3864 } 3865 3866 // Call C function. 3867 __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. 3868 __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. 3869 __ mov(Operand(esp, 2 * kPointerSize), 3870 Immediate(ExternalReference::isolate_address())); 3871 __ call(Operand(ebx)); 3872 // Result is in eax or edx:eax - do not destroy these registers! 3873 3874 if (always_allocate_scope) { 3875 __ dec(Operand::StaticVariable(scope_depth)); 3876 } 3877 3878 // Make sure we're not trying to return 'the hole' from the runtime 3879 // call as this may lead to crashes in the IC code later. 3880 if (FLAG_debug_code) { 3881 NearLabel okay; 3882 __ cmp(eax, masm->isolate()->factory()->the_hole_value()); 3883 __ j(not_equal, &okay); 3884 __ int3(); 3885 __ bind(&okay); 3886 } 3887 3888 // Check for failure result. 3889 Label failure_returned; 3890 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); 3891 __ lea(ecx, Operand(eax, 1)); 3892 // Lower 2 bits of ecx are 0 iff eax has failure tag. 3893 __ test(ecx, Immediate(kFailureTagMask)); 3894 __ j(zero, &failure_returned, not_taken); 3895 3896 ExternalReference pending_exception_address( 3897 Isolate::k_pending_exception_address, masm->isolate()); 3898 3899 // Check that there is no pending exception, otherwise we 3900 // should have returned some failure value. 3901 if (FLAG_debug_code) { 3902 __ push(edx); 3903 __ mov(edx, Operand::StaticVariable( 3904 ExternalReference::the_hole_value_location(masm->isolate()))); 3905 NearLabel okay; 3906 __ cmp(edx, Operand::StaticVariable(pending_exception_address)); 3907 // Cannot use check here as it attempts to generate call into runtime. 3908 __ j(equal, &okay); 3909 __ int3(); 3910 __ bind(&okay); 3911 __ pop(edx); 3912 } 3913 3914 // Exit the JavaScript to C++ exit frame. 3915 __ LeaveExitFrame(save_doubles_); 3916 __ ret(0); 3917 3918 // Handling of failure. 3919 __ bind(&failure_returned); 3920 3921 Label retry; 3922 // If the returned exception is RETRY_AFTER_GC continue at retry label 3923 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); 3924 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); 3925 __ j(zero, &retry, taken); 3926 3927 // Special handling of out of memory exceptions. 3928 __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); 3929 __ j(equal, throw_out_of_memory_exception); 3930 3931 // Retrieve the pending exception and clear the variable. 3932 ExternalReference the_hole_location = 3933 ExternalReference::the_hole_value_location(masm->isolate()); 3934 __ mov(eax, Operand::StaticVariable(pending_exception_address)); 3935 __ mov(edx, Operand::StaticVariable(the_hole_location)); 3936 __ mov(Operand::StaticVariable(pending_exception_address), edx); 3937 3938 // Special handling of termination exceptions which are uncatchable 3939 // by javascript code. 3940 __ cmp(eax, masm->isolate()->factory()->termination_exception()); 3941 __ j(equal, throw_termination_exception); 3942 3943 // Handle normal exception. 3944 __ jmp(throw_normal_exception); 3945 3946 // Retry. 3947 __ bind(&retry); 3948 } 3949 3950 3951 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, 3952 UncatchableExceptionType type) { 3953 __ ThrowUncatchable(type, eax); 3954 } 3955 3956 3957 void CEntryStub::Generate(MacroAssembler* masm) { 3958 // eax: number of arguments including receiver 3959 // ebx: pointer to C function (C callee-saved) 3960 // ebp: frame pointer (restored after C call) 3961 // esp: stack pointer (restored after C call) 3962 // esi: current context (C callee-saved) 3963 // edi: JS function of the caller (C callee-saved) 3964 3965 // NOTE: Invocations of builtins may return failure objects instead 3966 // of a proper result. The builtin entry handles this by performing 3967 // a garbage collection and retrying the builtin (twice). 3968 3969 // Enter the exit frame that transitions from JavaScript to C++. 3970 __ EnterExitFrame(save_doubles_); 3971 3972 // eax: result parameter for PerformGC, if any (setup below) 3973 // ebx: pointer to builtin function (C callee-saved) 3974 // ebp: frame pointer (restored after C call) 3975 // esp: stack pointer (restored after C call) 3976 // edi: number of arguments including receiver (C callee-saved) 3977 // esi: argv pointer (C callee-saved) 3978 3979 Label throw_normal_exception; 3980 Label throw_termination_exception; 3981 Label throw_out_of_memory_exception; 3982 3983 // Call into the runtime system. 3984 GenerateCore(masm, 3985 &throw_normal_exception, 3986 &throw_termination_exception, 3987 &throw_out_of_memory_exception, 3988 false, 3989 false); 3990 3991 // Do space-specific GC and retry runtime call. 3992 GenerateCore(masm, 3993 &throw_normal_exception, 3994 &throw_termination_exception, 3995 &throw_out_of_memory_exception, 3996 true, 3997 false); 3998 3999 // Do full GC and retry runtime call one final time. 4000 Failure* failure = Failure::InternalError(); 4001 __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure))); 4002 GenerateCore(masm, 4003 &throw_normal_exception, 4004 &throw_termination_exception, 4005 &throw_out_of_memory_exception, 4006 true, 4007 true); 4008 4009 __ bind(&throw_out_of_memory_exception); 4010 GenerateThrowUncatchable(masm, OUT_OF_MEMORY); 4011 4012 __ bind(&throw_termination_exception); 4013 GenerateThrowUncatchable(masm, TERMINATION); 4014 4015 __ bind(&throw_normal_exception); 4016 GenerateThrowTOS(masm); 4017 } 4018 4019 4020 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { 4021 Label invoke, exit; 4022 #ifdef ENABLE_LOGGING_AND_PROFILING 4023 Label not_outermost_js, not_outermost_js_2; 4024 #endif 4025 4026 // Setup frame. 4027 __ push(ebp); 4028 __ mov(ebp, Operand(esp)); 4029 4030 // Push marker in two places. 4031 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; 4032 __ push(Immediate(Smi::FromInt(marker))); // context slot 4033 __ push(Immediate(Smi::FromInt(marker))); // function slot 4034 // Save callee-saved registers (C calling conventions). 4035 __ push(edi); 4036 __ push(esi); 4037 __ push(ebx); 4038 4039 // Save copies of the top frame descriptor on the stack. 4040 ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate()); 4041 __ push(Operand::StaticVariable(c_entry_fp)); 4042 4043 #ifdef ENABLE_LOGGING_AND_PROFILING 4044 // If this is the outermost JS call, set js_entry_sp value. 4045 ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, 4046 masm->isolate()); 4047 __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); 4048 __ j(not_equal, ¬_outermost_js); 4049 __ mov(Operand::StaticVariable(js_entry_sp), ebp); 4050 __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); 4051 Label cont; 4052 __ jmp(&cont); 4053 __ bind(¬_outermost_js); 4054 __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); 4055 __ bind(&cont); 4056 #endif 4057 4058 // Call a faked try-block that does the invoke. 4059 __ call(&invoke); 4060 4061 // Caught exception: Store result (exception) in the pending 4062 // exception field in the JSEnv and return a failure sentinel. 4063 ExternalReference pending_exception(Isolate::k_pending_exception_address, 4064 masm->isolate()); 4065 __ mov(Operand::StaticVariable(pending_exception), eax); 4066 __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception())); 4067 __ jmp(&exit); 4068 4069 // Invoke: Link this frame into the handler chain. 4070 __ bind(&invoke); 4071 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); 4072 4073 // Clear any pending exceptions. 4074 ExternalReference the_hole_location = 4075 ExternalReference::the_hole_value_location(masm->isolate()); 4076 __ mov(edx, Operand::StaticVariable(the_hole_location)); 4077 __ mov(Operand::StaticVariable(pending_exception), edx); 4078 4079 // Fake a receiver (NULL). 4080 __ push(Immediate(0)); // receiver 4081 4082 // Invoke the function by calling through JS entry trampoline 4083 // builtin and pop the faked function when we return. Notice that we 4084 // cannot store a reference to the trampoline code directly in this 4085 // stub, because the builtin stubs may not have been generated yet. 4086 if (is_construct) { 4087 ExternalReference construct_entry( 4088 Builtins::kJSConstructEntryTrampoline, 4089 masm->isolate()); 4090 __ mov(edx, Immediate(construct_entry)); 4091 } else { 4092 ExternalReference entry(Builtins::kJSEntryTrampoline, 4093 masm->isolate()); 4094 __ mov(edx, Immediate(entry)); 4095 } 4096 __ mov(edx, Operand(edx, 0)); // deref address 4097 __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); 4098 __ call(Operand(edx)); 4099 4100 // Unlink this frame from the handler chain. 4101 __ PopTryHandler(); 4102 4103 __ bind(&exit); 4104 #ifdef ENABLE_LOGGING_AND_PROFILING 4105 // Check if the current stack frame is marked as the outermost JS frame. 4106 __ pop(ebx); 4107 __ cmp(Operand(ebx), 4108 Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); 4109 __ j(not_equal, ¬_outermost_js_2); 4110 __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); 4111 __ bind(¬_outermost_js_2); 4112 #endif 4113 4114 // Restore the top frame descriptor from the stack. 4115 __ pop(Operand::StaticVariable(ExternalReference( 4116 Isolate::k_c_entry_fp_address, 4117 masm->isolate()))); 4118 4119 // Restore callee-saved registers (C calling conventions). 4120 __ pop(ebx); 4121 __ pop(esi); 4122 __ pop(edi); 4123 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers 4124 4125 // Restore frame pointer and return. 4126 __ pop(ebp); 4127 __ ret(0); 4128 } 4129 4130 4131 // Generate stub code for instanceof. 4132 // This code can patch a call site inlined cache of the instance of check, 4133 // which looks like this. 4134 // 4135 // 81 ff XX XX XX XX cmp edi, <the hole, patched to a map> 4136 // 75 0a jne <some near label> 4137 // b8 XX XX XX XX mov eax, <the hole, patched to either true or false> 4138 // 4139 // If call site patching is requested the stack will have the delta from the 4140 // return address to the cmp instruction just below the return address. This 4141 // also means that call site patching can only take place with arguments in 4142 // registers. TOS looks like this when call site patching is requested 4143 // 4144 // esp[0] : return address 4145 // esp[4] : delta from return address to cmp instruction 4146 // 4147 void InstanceofStub::Generate(MacroAssembler* masm) { 4148 // Call site inlining and patching implies arguments in registers. 4149 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); 4150 4151 // Fixed register usage throughout the stub. 4152 Register object = eax; // Object (lhs). 4153 Register map = ebx; // Map of the object. 4154 Register function = edx; // Function (rhs). 4155 Register prototype = edi; // Prototype of the function. 4156 Register scratch = ecx; 4157 4158 // Constants describing the call site code to patch. 4159 static const int kDeltaToCmpImmediate = 2; 4160 static const int kDeltaToMov = 8; 4161 static const int kDeltaToMovImmediate = 9; 4162 static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81); 4163 static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff); 4164 static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8); 4165 4166 ExternalReference roots_address = 4167 ExternalReference::roots_address(masm->isolate()); 4168 4169 ASSERT_EQ(object.code(), InstanceofStub::left().code()); 4170 ASSERT_EQ(function.code(), InstanceofStub::right().code()); 4171 4172 // Get the object and function - they are always both needed. 4173 Label slow, not_js_object; 4174 if (!HasArgsInRegisters()) { 4175 __ mov(object, Operand(esp, 2 * kPointerSize)); 4176 __ mov(function, Operand(esp, 1 * kPointerSize)); 4177 } 4178 4179 // Check that the left hand is a JS object. 4180 __ test(object, Immediate(kSmiTagMask)); 4181 __ j(zero, ¬_js_object, not_taken); 4182 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); 4183 4184 // If there is a call site cache don't look in the global cache, but do the 4185 // real lookup and update the call site cache. 4186 if (!HasCallSiteInlineCheck()) { 4187 // Look up the function and the map in the instanceof cache. 4188 NearLabel miss; 4189 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); 4190 __ cmp(function, 4191 Operand::StaticArray(scratch, times_pointer_size, roots_address)); 4192 __ j(not_equal, &miss); 4193 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex)); 4194 __ cmp(map, Operand::StaticArray( 4195 scratch, times_pointer_size, roots_address)); 4196 __ j(not_equal, &miss); 4197 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 4198 __ mov(eax, Operand::StaticArray( 4199 scratch, times_pointer_size, roots_address)); 4200 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 4201 __ bind(&miss); 4202 } 4203 4204 // Get the prototype of the function. 4205 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); 4206 4207 // Check that the function prototype is a JS object. 4208 __ test(prototype, Immediate(kSmiTagMask)); 4209 __ j(zero, &slow, not_taken); 4210 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); 4211 4212 // Update the global instanceof or call site inlined cache with the current 4213 // map and function. The cached answer will be set when it is known below. 4214 if (!HasCallSiteInlineCheck()) { 4215 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex)); 4216 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map); 4217 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); 4218 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), 4219 function); 4220 } else { 4221 // The constants for the code patching are based on no push instructions 4222 // at the call site. 4223 ASSERT(HasArgsInRegisters()); 4224 // Get return address and delta to inlined map check. 4225 __ mov(scratch, Operand(esp, 0 * kPointerSize)); 4226 __ sub(scratch, Operand(esp, 1 * kPointerSize)); 4227 if (FLAG_debug_code) { 4228 __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1); 4229 __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)"); 4230 __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2); 4231 __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)"); 4232 } 4233 __ mov(Operand(scratch, kDeltaToCmpImmediate), map); 4234 } 4235 4236 // Loop through the prototype chain of the object looking for the function 4237 // prototype. 4238 __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset)); 4239 NearLabel loop, is_instance, is_not_instance; 4240 __ bind(&loop); 4241 __ cmp(scratch, Operand(prototype)); 4242 __ j(equal, &is_instance); 4243 Factory* factory = masm->isolate()->factory(); 4244 __ cmp(Operand(scratch), Immediate(factory->null_value())); 4245 __ j(equal, &is_not_instance); 4246 __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); 4247 __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); 4248 __ jmp(&loop); 4249 4250 __ bind(&is_instance); 4251 if (!HasCallSiteInlineCheck()) { 4252 __ Set(eax, Immediate(0)); 4253 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 4254 __ mov(Operand::StaticArray(scratch, 4255 times_pointer_size, roots_address), eax); 4256 } else { 4257 // Get return address and delta to inlined map check. 4258 __ mov(eax, factory->true_value()); 4259 __ mov(scratch, Operand(esp, 0 * kPointerSize)); 4260 __ sub(scratch, Operand(esp, 1 * kPointerSize)); 4261 if (FLAG_debug_code) { 4262 __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte); 4263 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)"); 4264 } 4265 __ mov(Operand(scratch, kDeltaToMovImmediate), eax); 4266 if (!ReturnTrueFalseObject()) { 4267 __ Set(eax, Immediate(0)); 4268 } 4269 } 4270 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 4271 4272 __ bind(&is_not_instance); 4273 if (!HasCallSiteInlineCheck()) { 4274 __ Set(eax, Immediate(Smi::FromInt(1))); 4275 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); 4276 __ mov(Operand::StaticArray( 4277 scratch, times_pointer_size, roots_address), eax); 4278 } else { 4279 // Get return address and delta to inlined map check. 4280 __ mov(eax, factory->false_value()); 4281 __ mov(scratch, Operand(esp, 0 * kPointerSize)); 4282 __ sub(scratch, Operand(esp, 1 * kPointerSize)); 4283 if (FLAG_debug_code) { 4284 __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte); 4285 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)"); 4286 } 4287 __ mov(Operand(scratch, kDeltaToMovImmediate), eax); 4288 if (!ReturnTrueFalseObject()) { 4289 __ Set(eax, Immediate(Smi::FromInt(1))); 4290 } 4291 } 4292 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 4293 4294 Label object_not_null, object_not_null_or_smi; 4295 __ bind(¬_js_object); 4296 // Before null, smi and string value checks, check that the rhs is a function 4297 // as for a non-function rhs an exception needs to be thrown. 4298 __ test(function, Immediate(kSmiTagMask)); 4299 __ j(zero, &slow, not_taken); 4300 __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch); 4301 __ j(not_equal, &slow, not_taken); 4302 4303 // Null is not instance of anything. 4304 __ cmp(object, factory->null_value()); 4305 __ j(not_equal, &object_not_null); 4306 __ Set(eax, Immediate(Smi::FromInt(1))); 4307 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 4308 4309 __ bind(&object_not_null); 4310 // Smi values is not instance of anything. 4311 __ test(object, Immediate(kSmiTagMask)); 4312 __ j(not_zero, &object_not_null_or_smi, not_taken); 4313 __ Set(eax, Immediate(Smi::FromInt(1))); 4314 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 4315 4316 __ bind(&object_not_null_or_smi); 4317 // String values is not instance of anything. 4318 Condition is_string = masm->IsObjectStringType(object, scratch, scratch); 4319 __ j(NegateCondition(is_string), &slow); 4320 __ Set(eax, Immediate(Smi::FromInt(1))); 4321 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 4322 4323 // Slow-case: Go through the JavaScript implementation. 4324 __ bind(&slow); 4325 if (!ReturnTrueFalseObject()) { 4326 // Tail call the builtin which returns 0 or 1. 4327 if (HasArgsInRegisters()) { 4328 // Push arguments below return address. 4329 __ pop(scratch); 4330 __ push(object); 4331 __ push(function); 4332 __ push(scratch); 4333 } 4334 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 4335 } else { 4336 // Call the builtin and convert 0/1 to true/false. 4337 __ EnterInternalFrame(); 4338 __ push(object); 4339 __ push(function); 4340 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); 4341 __ LeaveInternalFrame(); 4342 NearLabel true_value, done; 4343 __ test(eax, Operand(eax)); 4344 __ j(zero, &true_value); 4345 __ mov(eax, factory->false_value()); 4346 __ jmp(&done); 4347 __ bind(&true_value); 4348 __ mov(eax, factory->true_value()); 4349 __ bind(&done); 4350 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize); 4351 } 4352 } 4353 4354 4355 Register InstanceofStub::left() { return eax; } 4356 4357 4358 Register InstanceofStub::right() { return edx; } 4359 4360 4361 int CompareStub::MinorKey() { 4362 // Encode the three parameters in a unique 16 bit value. To avoid duplicate 4363 // stubs the never NaN NaN condition is only taken into account if the 4364 // condition is equals. 4365 ASSERT(static_cast<unsigned>(cc_) < (1 << 12)); 4366 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); 4367 return ConditionField::encode(static_cast<unsigned>(cc_)) 4368 | RegisterField::encode(false) // lhs_ and rhs_ are not used 4369 | StrictField::encode(strict_) 4370 | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) 4371 | IncludeNumberCompareField::encode(include_number_compare_) 4372 | IncludeSmiCompareField::encode(include_smi_compare_); 4373 } 4374 4375 4376 // Unfortunately you have to run without snapshots to see most of these 4377 // names in the profile since most compare stubs end up in the snapshot. 4378 const char* CompareStub::GetName() { 4379 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); 4380 4381 if (name_ != NULL) return name_; 4382 const int kMaxNameLength = 100; 4383 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( 4384 kMaxNameLength); 4385 if (name_ == NULL) return "OOM"; 4386 4387 const char* cc_name; 4388 switch (cc_) { 4389 case less: cc_name = "LT"; break; 4390 case greater: cc_name = "GT"; break; 4391 case less_equal: cc_name = "LE"; break; 4392 case greater_equal: cc_name = "GE"; break; 4393 case equal: cc_name = "EQ"; break; 4394 case not_equal: cc_name = "NE"; break; 4395 default: cc_name = "UnknownCondition"; break; 4396 } 4397 4398 const char* strict_name = ""; 4399 if (strict_ && (cc_ == equal || cc_ == not_equal)) { 4400 strict_name = "_STRICT"; 4401 } 4402 4403 const char* never_nan_nan_name = ""; 4404 if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { 4405 never_nan_nan_name = "_NO_NAN"; 4406 } 4407 4408 const char* include_number_compare_name = ""; 4409 if (!include_number_compare_) { 4410 include_number_compare_name = "_NO_NUMBER"; 4411 } 4412 4413 const char* include_smi_compare_name = ""; 4414 if (!include_smi_compare_) { 4415 include_smi_compare_name = "_NO_SMI"; 4416 } 4417 4418 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), 4419 "CompareStub_%s%s%s%s%s", 4420 cc_name, 4421 strict_name, 4422 never_nan_nan_name, 4423 include_number_compare_name, 4424 include_smi_compare_name); 4425 return name_; 4426 } 4427 4428 4429 // ------------------------------------------------------------------------- 4430 // StringCharCodeAtGenerator 4431 4432 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { 4433 Label flat_string; 4434 Label ascii_string; 4435 Label got_char_code; 4436 4437 // If the receiver is a smi trigger the non-string case. 4438 STATIC_ASSERT(kSmiTag == 0); 4439 __ test(object_, Immediate(kSmiTagMask)); 4440 __ j(zero, receiver_not_string_); 4441 4442 // Fetch the instance type of the receiver into result register. 4443 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); 4444 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); 4445 // If the receiver is not a string trigger the non-string case. 4446 __ test(result_, Immediate(kIsNotStringMask)); 4447 __ j(not_zero, receiver_not_string_); 4448 4449 // If the index is non-smi trigger the non-smi case. 4450 STATIC_ASSERT(kSmiTag == 0); 4451 __ test(index_, Immediate(kSmiTagMask)); 4452 __ j(not_zero, &index_not_smi_); 4453 4454 // Put smi-tagged index into scratch register. 4455 __ mov(scratch_, index_); 4456 __ bind(&got_smi_index_); 4457 4458 // Check for index out of range. 4459 __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset)); 4460 __ j(above_equal, index_out_of_range_); 4461 4462 // We need special handling for non-flat strings. 4463 STATIC_ASSERT(kSeqStringTag == 0); 4464 __ test(result_, Immediate(kStringRepresentationMask)); 4465 __ j(zero, &flat_string); 4466 4467 // Handle non-flat strings. 4468 __ test(result_, Immediate(kIsConsStringMask)); 4469 __ j(zero, &call_runtime_); 4470 4471 // ConsString. 4472 // Check whether the right hand side is the empty string (i.e. if 4473 // this is really a flat string in a cons string). If that is not 4474 // the case we would rather go to the runtime system now to flatten 4475 // the string. 4476 __ cmp(FieldOperand(object_, ConsString::kSecondOffset), 4477 Immediate(masm->isolate()->factory()->empty_string())); 4478 __ j(not_equal, &call_runtime_); 4479 // Get the first of the two strings and load its instance type. 4480 __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset)); 4481 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); 4482 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); 4483 // If the first cons component is also non-flat, then go to runtime. 4484 STATIC_ASSERT(kSeqStringTag == 0); 4485 __ test(result_, Immediate(kStringRepresentationMask)); 4486 __ j(not_zero, &call_runtime_); 4487 4488 // Check for 1-byte or 2-byte string. 4489 __ bind(&flat_string); 4490 STATIC_ASSERT(kAsciiStringTag != 0); 4491 __ test(result_, Immediate(kStringEncodingMask)); 4492 __ j(not_zero, &ascii_string); 4493 4494 // 2-byte string. 4495 // Load the 2-byte character code into the result register. 4496 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 4497 __ movzx_w(result_, FieldOperand(object_, 4498 scratch_, times_1, // Scratch is smi-tagged. 4499 SeqTwoByteString::kHeaderSize)); 4500 __ jmp(&got_char_code); 4501 4502 // ASCII string. 4503 // Load the byte into the result register. 4504 __ bind(&ascii_string); 4505 __ SmiUntag(scratch_); 4506 __ movzx_b(result_, FieldOperand(object_, 4507 scratch_, times_1, 4508 SeqAsciiString::kHeaderSize)); 4509 __ bind(&got_char_code); 4510 __ SmiTag(result_); 4511 __ bind(&exit_); 4512 } 4513 4514 4515 void StringCharCodeAtGenerator::GenerateSlow( 4516 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { 4517 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); 4518 4519 // Index is not a smi. 4520 __ bind(&index_not_smi_); 4521 // If index is a heap number, try converting it to an integer. 4522 __ CheckMap(index_, 4523 masm->isolate()->factory()->heap_number_map(), 4524 index_not_number_, 4525 true); 4526 call_helper.BeforeCall(masm); 4527 __ push(object_); 4528 __ push(index_); 4529 __ push(index_); // Consumed by runtime conversion function. 4530 if (index_flags_ == STRING_INDEX_IS_NUMBER) { 4531 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); 4532 } else { 4533 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); 4534 // NumberToSmi discards numbers that are not exact integers. 4535 __ CallRuntime(Runtime::kNumberToSmi, 1); 4536 } 4537 if (!scratch_.is(eax)) { 4538 // Save the conversion result before the pop instructions below 4539 // have a chance to overwrite it. 4540 __ mov(scratch_, eax); 4541 } 4542 __ pop(index_); 4543 __ pop(object_); 4544 // Reload the instance type. 4545 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); 4546 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); 4547 call_helper.AfterCall(masm); 4548 // If index is still not a smi, it must be out of range. 4549 STATIC_ASSERT(kSmiTag == 0); 4550 __ test(scratch_, Immediate(kSmiTagMask)); 4551 __ j(not_zero, index_out_of_range_); 4552 // Otherwise, return to the fast path. 4553 __ jmp(&got_smi_index_); 4554 4555 // Call runtime. We get here when the receiver is a string and the 4556 // index is a number, but the code of getting the actual character 4557 // is too complex (e.g., when the string needs to be flattened). 4558 __ bind(&call_runtime_); 4559 call_helper.BeforeCall(masm); 4560 __ push(object_); 4561 __ push(index_); 4562 __ CallRuntime(Runtime::kStringCharCodeAt, 2); 4563 if (!result_.is(eax)) { 4564 __ mov(result_, eax); 4565 } 4566 call_helper.AfterCall(masm); 4567 __ jmp(&exit_); 4568 4569 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); 4570 } 4571 4572 4573 // ------------------------------------------------------------------------- 4574 // StringCharFromCodeGenerator 4575 4576 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { 4577 // Fast case of Heap::LookupSingleCharacterStringFromCode. 4578 STATIC_ASSERT(kSmiTag == 0); 4579 STATIC_ASSERT(kSmiShiftSize == 0); 4580 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); 4581 __ test(code_, 4582 Immediate(kSmiTagMask | 4583 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); 4584 __ j(not_zero, &slow_case_, not_taken); 4585 4586 Factory* factory = masm->isolate()->factory(); 4587 __ Set(result_, Immediate(factory->single_character_string_cache())); 4588 STATIC_ASSERT(kSmiTag == 0); 4589 STATIC_ASSERT(kSmiTagSize == 1); 4590 STATIC_ASSERT(kSmiShiftSize == 0); 4591 // At this point code register contains smi tagged ascii char code. 4592 __ mov(result_, FieldOperand(result_, 4593 code_, times_half_pointer_size, 4594 FixedArray::kHeaderSize)); 4595 __ cmp(result_, factory->undefined_value()); 4596 __ j(equal, &slow_case_, not_taken); 4597 __ bind(&exit_); 4598 } 4599 4600 4601 void StringCharFromCodeGenerator::GenerateSlow( 4602 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { 4603 __ Abort("Unexpected fallthrough to CharFromCode slow case"); 4604 4605 __ bind(&slow_case_); 4606 call_helper.BeforeCall(masm); 4607 __ push(code_); 4608 __ CallRuntime(Runtime::kCharFromCode, 1); 4609 if (!result_.is(eax)) { 4610 __ mov(result_, eax); 4611 } 4612 call_helper.AfterCall(masm); 4613 __ jmp(&exit_); 4614 4615 __ Abort("Unexpected fallthrough from CharFromCode slow case"); 4616 } 4617 4618 4619 // ------------------------------------------------------------------------- 4620 // StringCharAtGenerator 4621 4622 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { 4623 char_code_at_generator_.GenerateFast(masm); 4624 char_from_code_generator_.GenerateFast(masm); 4625 } 4626 4627 4628 void StringCharAtGenerator::GenerateSlow( 4629 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { 4630 char_code_at_generator_.GenerateSlow(masm, call_helper); 4631 char_from_code_generator_.GenerateSlow(masm, call_helper); 4632 } 4633 4634 4635 void StringAddStub::Generate(MacroAssembler* masm) { 4636 Label string_add_runtime, call_builtin; 4637 Builtins::JavaScript builtin_id = Builtins::ADD; 4638 4639 // Load the two arguments. 4640 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. 4641 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. 4642 4643 // Make sure that both arguments are strings if not known in advance. 4644 if (flags_ == NO_STRING_ADD_FLAGS) { 4645 __ test(eax, Immediate(kSmiTagMask)); 4646 __ j(zero, &string_add_runtime); 4647 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx); 4648 __ j(above_equal, &string_add_runtime); 4649 4650 // First argument is a a string, test second. 4651 __ test(edx, Immediate(kSmiTagMask)); 4652 __ j(zero, &string_add_runtime); 4653 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx); 4654 __ j(above_equal, &string_add_runtime); 4655 } else { 4656 // Here at least one of the arguments is definitely a string. 4657 // We convert the one that is not known to be a string. 4658 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { 4659 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); 4660 GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi, 4661 &call_builtin); 4662 builtin_id = Builtins::STRING_ADD_RIGHT; 4663 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { 4664 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); 4665 GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi, 4666 &call_builtin); 4667 builtin_id = Builtins::STRING_ADD_LEFT; 4668 } 4669 } 4670 4671 // Both arguments are strings. 4672 // eax: first string 4673 // edx: second string 4674 // Check if either of the strings are empty. In that case return the other. 4675 NearLabel second_not_zero_length, both_not_zero_length; 4676 __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); 4677 STATIC_ASSERT(kSmiTag == 0); 4678 __ test(ecx, Operand(ecx)); 4679 __ j(not_zero, &second_not_zero_length); 4680 // Second string is empty, result is first string which is already in eax. 4681 Counters* counters = masm->isolate()->counters(); 4682 __ IncrementCounter(counters->string_add_native(), 1); 4683 __ ret(2 * kPointerSize); 4684 __ bind(&second_not_zero_length); 4685 __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); 4686 STATIC_ASSERT(kSmiTag == 0); 4687 __ test(ebx, Operand(ebx)); 4688 __ j(not_zero, &both_not_zero_length); 4689 // First string is empty, result is second string which is in edx. 4690 __ mov(eax, edx); 4691 __ IncrementCounter(counters->string_add_native(), 1); 4692 __ ret(2 * kPointerSize); 4693 4694 // Both strings are non-empty. 4695 // eax: first string 4696 // ebx: length of first string as a smi 4697 // ecx: length of second string as a smi 4698 // edx: second string 4699 // Look at the length of the result of adding the two strings. 4700 Label string_add_flat_result, longer_than_two; 4701 __ bind(&both_not_zero_length); 4702 __ add(ebx, Operand(ecx)); 4703 STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); 4704 // Handle exceptionally long strings in the runtime system. 4705 __ j(overflow, &string_add_runtime); 4706 // Use the symbol table when adding two one character strings, as it 4707 // helps later optimizations to return a symbol here. 4708 __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); 4709 __ j(not_equal, &longer_than_two); 4710 4711 // Check that both strings are non-external ascii strings. 4712 __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, 4713 &string_add_runtime); 4714 4715 // Get the two characters forming the new string. 4716 __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); 4717 __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); 4718 4719 // Try to lookup two character string in symbol table. If it is not found 4720 // just allocate a new one. 4721 Label make_two_character_string, make_two_character_string_no_reload; 4722 StringHelper::GenerateTwoCharacterSymbolTableProbe( 4723 masm, ebx, ecx, eax, edx, edi, 4724 &make_two_character_string_no_reload, &make_two_character_string); 4725 __ IncrementCounter(counters->string_add_native(), 1); 4726 __ ret(2 * kPointerSize); 4727 4728 // Allocate a two character string. 4729 __ bind(&make_two_character_string); 4730 // Reload the arguments. 4731 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. 4732 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. 4733 // Get the two characters forming the new string. 4734 __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); 4735 __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); 4736 __ bind(&make_two_character_string_no_reload); 4737 __ IncrementCounter(counters->string_add_make_two_char(), 1); 4738 __ AllocateAsciiString(eax, // Result. 4739 2, // Length. 4740 edi, // Scratch 1. 4741 edx, // Scratch 2. 4742 &string_add_runtime); 4743 // Pack both characters in ebx. 4744 __ shl(ecx, kBitsPerByte); 4745 __ or_(ebx, Operand(ecx)); 4746 // Set the characters in the new string. 4747 __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx); 4748 __ IncrementCounter(counters->string_add_native(), 1); 4749 __ ret(2 * kPointerSize); 4750 4751 __ bind(&longer_than_two); 4752 // Check if resulting string will be flat. 4753 __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); 4754 __ j(below, &string_add_flat_result); 4755 4756 // If result is not supposed to be flat allocate a cons string object. If both 4757 // strings are ascii the result is an ascii cons string. 4758 Label non_ascii, allocated, ascii_data; 4759 __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset)); 4760 __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); 4761 __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); 4762 __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); 4763 __ and_(ecx, Operand(edi)); 4764 STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); 4765 __ test(ecx, Immediate(kAsciiStringTag)); 4766 __ j(zero, &non_ascii); 4767 __ bind(&ascii_data); 4768 // Allocate an acsii cons string. 4769 __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime); 4770 __ bind(&allocated); 4771 // Fill the fields of the cons string. 4772 if (FLAG_debug_code) __ AbortIfNotSmi(ebx); 4773 __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx); 4774 __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset), 4775 Immediate(String::kEmptyHashField)); 4776 __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax); 4777 __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx); 4778 __ mov(eax, ecx); 4779 __ IncrementCounter(counters->string_add_native(), 1); 4780 __ ret(2 * kPointerSize); 4781 __ bind(&non_ascii); 4782 // At least one of the strings is two-byte. Check whether it happens 4783 // to contain only ascii characters. 4784 // ecx: first instance type AND second instance type. 4785 // edi: second instance type. 4786 __ test(ecx, Immediate(kAsciiDataHintMask)); 4787 __ j(not_zero, &ascii_data); 4788 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); 4789 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); 4790 __ xor_(edi, Operand(ecx)); 4791 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); 4792 __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); 4793 __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); 4794 __ j(equal, &ascii_data); 4795 // Allocate a two byte cons string. 4796 __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime); 4797 __ jmp(&allocated); 4798 4799 // Handle creating a flat result. First check that both strings are not 4800 // external strings. 4801 // eax: first string 4802 // ebx: length of resulting flat string as a smi 4803 // edx: second string 4804 __ bind(&string_add_flat_result); 4805 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); 4806 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); 4807 __ and_(ecx, kStringRepresentationMask); 4808 __ cmp(ecx, kExternalStringTag); 4809 __ j(equal, &string_add_runtime); 4810 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); 4811 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); 4812 __ and_(ecx, kStringRepresentationMask); 4813 __ cmp(ecx, kExternalStringTag); 4814 __ j(equal, &string_add_runtime); 4815 // Now check if both strings are ascii strings. 4816 // eax: first string 4817 // ebx: length of resulting flat string as a smi 4818 // edx: second string 4819 Label non_ascii_string_add_flat_result; 4820 STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); 4821 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); 4822 __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); 4823 __ j(zero, &non_ascii_string_add_flat_result); 4824 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); 4825 __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); 4826 __ j(zero, &string_add_runtime); 4827 4828 // Both strings are ascii strings. As they are short they are both flat. 4829 // ebx: length of resulting flat string as a smi 4830 __ SmiUntag(ebx); 4831 __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime); 4832 // eax: result string 4833 __ mov(ecx, eax); 4834 // Locate first character of result. 4835 __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 4836 // Load first argument and locate first character. 4837 __ mov(edx, Operand(esp, 2 * kPointerSize)); 4838 __ mov(edi, FieldOperand(edx, String::kLengthOffset)); 4839 __ SmiUntag(edi); 4840 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 4841 // eax: result string 4842 // ecx: first character of result 4843 // edx: first char of first argument 4844 // edi: length of first argument 4845 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); 4846 // Load second argument and locate first character. 4847 __ mov(edx, Operand(esp, 1 * kPointerSize)); 4848 __ mov(edi, FieldOperand(edx, String::kLengthOffset)); 4849 __ SmiUntag(edi); 4850 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 4851 // eax: result string 4852 // ecx: next character of result 4853 // edx: first char of second argument 4854 // edi: length of second argument 4855 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); 4856 __ IncrementCounter(counters->string_add_native(), 1); 4857 __ ret(2 * kPointerSize); 4858 4859 // Handle creating a flat two byte result. 4860 // eax: first string - known to be two byte 4861 // ebx: length of resulting flat string as a smi 4862 // edx: second string 4863 __ bind(&non_ascii_string_add_flat_result); 4864 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); 4865 __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); 4866 __ j(not_zero, &string_add_runtime); 4867 // Both strings are two byte strings. As they are short they are both 4868 // flat. 4869 __ SmiUntag(ebx); 4870 __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime); 4871 // eax: result string 4872 __ mov(ecx, eax); 4873 // Locate first character of result. 4874 __ add(Operand(ecx), 4875 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 4876 // Load first argument and locate first character. 4877 __ mov(edx, Operand(esp, 2 * kPointerSize)); 4878 __ mov(edi, FieldOperand(edx, String::kLengthOffset)); 4879 __ SmiUntag(edi); 4880 __ add(Operand(edx), 4881 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 4882 // eax: result string 4883 // ecx: first character of result 4884 // edx: first char of first argument 4885 // edi: length of first argument 4886 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); 4887 // Load second argument and locate first character. 4888 __ mov(edx, Operand(esp, 1 * kPointerSize)); 4889 __ mov(edi, FieldOperand(edx, String::kLengthOffset)); 4890 __ SmiUntag(edi); 4891 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 4892 // eax: result string 4893 // ecx: next character of result 4894 // edx: first char of second argument 4895 // edi: length of second argument 4896 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); 4897 __ IncrementCounter(counters->string_add_native(), 1); 4898 __ ret(2 * kPointerSize); 4899 4900 // Just jump to runtime to add the two strings. 4901 __ bind(&string_add_runtime); 4902 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 4903 4904 if (call_builtin.is_linked()) { 4905 __ bind(&call_builtin); 4906 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); 4907 } 4908 } 4909 4910 4911 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, 4912 int stack_offset, 4913 Register arg, 4914 Register scratch1, 4915 Register scratch2, 4916 Register scratch3, 4917 Label* slow) { 4918 // First check if the argument is already a string. 4919 Label not_string, done; 4920 __ test(arg, Immediate(kSmiTagMask)); 4921 __ j(zero, ¬_string); 4922 __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1); 4923 __ j(below, &done); 4924 4925 // Check the number to string cache. 4926 Label not_cached; 4927 __ bind(¬_string); 4928 // Puts the cached result into scratch1. 4929 NumberToStringStub::GenerateLookupNumberStringCache(masm, 4930 arg, 4931 scratch1, 4932 scratch2, 4933 scratch3, 4934 false, 4935 ¬_cached); 4936 __ mov(arg, scratch1); 4937 __ mov(Operand(esp, stack_offset), arg); 4938 __ jmp(&done); 4939 4940 // Check if the argument is a safe string wrapper. 4941 __ bind(¬_cached); 4942 __ test(arg, Immediate(kSmiTagMask)); 4943 __ j(zero, slow); 4944 __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1. 4945 __ j(not_equal, slow); 4946 __ test_b(FieldOperand(scratch1, Map::kBitField2Offset), 4947 1 << Map::kStringWrapperSafeForDefaultValueOf); 4948 __ j(zero, slow); 4949 __ mov(arg, FieldOperand(arg, JSValue::kValueOffset)); 4950 __ mov(Operand(esp, stack_offset), arg); 4951 4952 __ bind(&done); 4953 } 4954 4955 4956 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, 4957 Register dest, 4958 Register src, 4959 Register count, 4960 Register scratch, 4961 bool ascii) { 4962 NearLabel loop; 4963 __ bind(&loop); 4964 // This loop just copies one character at a time, as it is only used for very 4965 // short strings. 4966 if (ascii) { 4967 __ mov_b(scratch, Operand(src, 0)); 4968 __ mov_b(Operand(dest, 0), scratch); 4969 __ add(Operand(src), Immediate(1)); 4970 __ add(Operand(dest), Immediate(1)); 4971 } else { 4972 __ mov_w(scratch, Operand(src, 0)); 4973 __ mov_w(Operand(dest, 0), scratch); 4974 __ add(Operand(src), Immediate(2)); 4975 __ add(Operand(dest), Immediate(2)); 4976 } 4977 __ sub(Operand(count), Immediate(1)); 4978 __ j(not_zero, &loop); 4979 } 4980 4981 4982 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, 4983 Register dest, 4984 Register src, 4985 Register count, 4986 Register scratch, 4987 bool ascii) { 4988 // Copy characters using rep movs of doublewords. 4989 // The destination is aligned on a 4 byte boundary because we are 4990 // copying to the beginning of a newly allocated string. 4991 ASSERT(dest.is(edi)); // rep movs destination 4992 ASSERT(src.is(esi)); // rep movs source 4993 ASSERT(count.is(ecx)); // rep movs count 4994 ASSERT(!scratch.is(dest)); 4995 ASSERT(!scratch.is(src)); 4996 ASSERT(!scratch.is(count)); 4997 4998 // Nothing to do for zero characters. 4999 Label done; 5000 __ test(count, Operand(count)); 5001 __ j(zero, &done); 5002 5003 // Make count the number of bytes to copy. 5004 if (!ascii) { 5005 __ shl(count, 1); 5006 } 5007 5008 // Don't enter the rep movs if there are less than 4 bytes to copy. 5009 NearLabel last_bytes; 5010 __ test(count, Immediate(~3)); 5011 __ j(zero, &last_bytes); 5012 5013 // Copy from edi to esi using rep movs instruction. 5014 __ mov(scratch, count); 5015 __ sar(count, 2); // Number of doublewords to copy. 5016 __ cld(); 5017 __ rep_movs(); 5018 5019 // Find number of bytes left. 5020 __ mov(count, scratch); 5021 __ and_(count, 3); 5022 5023 // Check if there are more bytes to copy. 5024 __ bind(&last_bytes); 5025 __ test(count, Operand(count)); 5026 __ j(zero, &done); 5027 5028 // Copy remaining characters. 5029 NearLabel loop; 5030 __ bind(&loop); 5031 __ mov_b(scratch, Operand(src, 0)); 5032 __ mov_b(Operand(dest, 0), scratch); 5033 __ add(Operand(src), Immediate(1)); 5034 __ add(Operand(dest), Immediate(1)); 5035 __ sub(Operand(count), Immediate(1)); 5036 __ j(not_zero, &loop); 5037 5038 __ bind(&done); 5039 } 5040 5041 5042 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, 5043 Register c1, 5044 Register c2, 5045 Register scratch1, 5046 Register scratch2, 5047 Register scratch3, 5048 Label* not_probed, 5049 Label* not_found) { 5050 // Register scratch3 is the general scratch register in this function. 5051 Register scratch = scratch3; 5052 5053 // Make sure that both characters are not digits as such strings has a 5054 // different hash algorithm. Don't try to look for these in the symbol table. 5055 NearLabel not_array_index; 5056 __ mov(scratch, c1); 5057 __ sub(Operand(scratch), Immediate(static_cast<int>('0'))); 5058 __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); 5059 __ j(above, ¬_array_index); 5060 __ mov(scratch, c2); 5061 __ sub(Operand(scratch), Immediate(static_cast<int>('0'))); 5062 __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); 5063 __ j(below_equal, not_probed); 5064 5065 __ bind(¬_array_index); 5066 // Calculate the two character string hash. 5067 Register hash = scratch1; 5068 GenerateHashInit(masm, hash, c1, scratch); 5069 GenerateHashAddCharacter(masm, hash, c2, scratch); 5070 GenerateHashGetHash(masm, hash, scratch); 5071 5072 // Collect the two characters in a register. 5073 Register chars = c1; 5074 __ shl(c2, kBitsPerByte); 5075 __ or_(chars, Operand(c2)); 5076 5077 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. 5078 // hash: hash of two character string. 5079 5080 // Load the symbol table. 5081 Register symbol_table = c2; 5082 ExternalReference roots_address = 5083 ExternalReference::roots_address(masm->isolate()); 5084 __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex)); 5085 __ mov(symbol_table, 5086 Operand::StaticArray(scratch, times_pointer_size, roots_address)); 5087 5088 // Calculate capacity mask from the symbol table capacity. 5089 Register mask = scratch2; 5090 __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); 5091 __ SmiUntag(mask); 5092 __ sub(Operand(mask), Immediate(1)); 5093 5094 // Registers 5095 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. 5096 // hash: hash of two character string 5097 // symbol_table: symbol table 5098 // mask: capacity mask 5099 // scratch: - 5100 5101 // Perform a number of probes in the symbol table. 5102 static const int kProbes = 4; 5103 Label found_in_symbol_table; 5104 Label next_probe[kProbes], next_probe_pop_mask[kProbes]; 5105 for (int i = 0; i < kProbes; i++) { 5106 // Calculate entry in symbol table. 5107 __ mov(scratch, hash); 5108 if (i > 0) { 5109 __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); 5110 } 5111 __ and_(scratch, Operand(mask)); 5112 5113 // Load the entry from the symbol table. 5114 Register candidate = scratch; // Scratch register contains candidate. 5115 STATIC_ASSERT(SymbolTable::kEntrySize == 1); 5116 __ mov(candidate, 5117 FieldOperand(symbol_table, 5118 scratch, 5119 times_pointer_size, 5120 SymbolTable::kElementsStartOffset)); 5121 5122 // If entry is undefined no string with this hash can be found. 5123 Factory* factory = masm->isolate()->factory(); 5124 __ cmp(candidate, factory->undefined_value()); 5125 __ j(equal, not_found); 5126 __ cmp(candidate, factory->null_value()); 5127 __ j(equal, &next_probe[i]); 5128 5129 // If length is not 2 the string is not a candidate. 5130 __ cmp(FieldOperand(candidate, String::kLengthOffset), 5131 Immediate(Smi::FromInt(2))); 5132 __ j(not_equal, &next_probe[i]); 5133 5134 // As we are out of registers save the mask on the stack and use that 5135 // register as a temporary. 5136 __ push(mask); 5137 Register temp = mask; 5138 5139 // Check that the candidate is a non-external ascii string. 5140 __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset)); 5141 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); 5142 __ JumpIfInstanceTypeIsNotSequentialAscii( 5143 temp, temp, &next_probe_pop_mask[i]); 5144 5145 // Check if the two characters match. 5146 __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); 5147 __ and_(temp, 0x0000ffff); 5148 __ cmp(chars, Operand(temp)); 5149 __ j(equal, &found_in_symbol_table); 5150 __ bind(&next_probe_pop_mask[i]); 5151 __ pop(mask); 5152 __ bind(&next_probe[i]); 5153 } 5154 5155 // No matching 2 character string found by probing. 5156 __ jmp(not_found); 5157 5158 // Scratch register contains result when we fall through to here. 5159 Register result = scratch; 5160 __ bind(&found_in_symbol_table); 5161 __ pop(mask); // Pop saved mask from the stack. 5162 if (!result.is(eax)) { 5163 __ mov(eax, result); 5164 } 5165 } 5166 5167 5168 void StringHelper::GenerateHashInit(MacroAssembler* masm, 5169 Register hash, 5170 Register character, 5171 Register scratch) { 5172 // hash = character + (character << 10); 5173 __ mov(hash, character); 5174 __ shl(hash, 10); 5175 __ add(hash, Operand(character)); 5176 // hash ^= hash >> 6; 5177 __ mov(scratch, hash); 5178 __ sar(scratch, 6); 5179 __ xor_(hash, Operand(scratch)); 5180 } 5181 5182 5183 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, 5184 Register hash, 5185 Register character, 5186 Register scratch) { 5187 // hash += character; 5188 __ add(hash, Operand(character)); 5189 // hash += hash << 10; 5190 __ mov(scratch, hash); 5191 __ shl(scratch, 10); 5192 __ add(hash, Operand(scratch)); 5193 // hash ^= hash >> 6; 5194 __ mov(scratch, hash); 5195 __ sar(scratch, 6); 5196 __ xor_(hash, Operand(scratch)); 5197 } 5198 5199 5200 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, 5201 Register hash, 5202 Register scratch) { 5203 // hash += hash << 3; 5204 __ mov(scratch, hash); 5205 __ shl(scratch, 3); 5206 __ add(hash, Operand(scratch)); 5207 // hash ^= hash >> 11; 5208 __ mov(scratch, hash); 5209 __ sar(scratch, 11); 5210 __ xor_(hash, Operand(scratch)); 5211 // hash += hash << 15; 5212 __ mov(scratch, hash); 5213 __ shl(scratch, 15); 5214 __ add(hash, Operand(scratch)); 5215 5216 // if (hash == 0) hash = 27; 5217 NearLabel hash_not_zero; 5218 __ test(hash, Operand(hash)); 5219 __ j(not_zero, &hash_not_zero); 5220 __ mov(hash, Immediate(27)); 5221 __ bind(&hash_not_zero); 5222 } 5223 5224 5225 void SubStringStub::Generate(MacroAssembler* masm) { 5226 Label runtime; 5227 5228 // Stack frame on entry. 5229 // esp[0]: return address 5230 // esp[4]: to 5231 // esp[8]: from 5232 // esp[12]: string 5233 5234 // Make sure first argument is a string. 5235 __ mov(eax, Operand(esp, 3 * kPointerSize)); 5236 STATIC_ASSERT(kSmiTag == 0); 5237 __ test(eax, Immediate(kSmiTagMask)); 5238 __ j(zero, &runtime); 5239 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); 5240 __ j(NegateCondition(is_string), &runtime); 5241 5242 // eax: string 5243 // ebx: instance type 5244 5245 // Calculate length of sub string using the smi values. 5246 Label result_longer_than_two; 5247 __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. 5248 __ test(ecx, Immediate(kSmiTagMask)); 5249 __ j(not_zero, &runtime); 5250 __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. 5251 __ test(edx, Immediate(kSmiTagMask)); 5252 __ j(not_zero, &runtime); 5253 __ sub(ecx, Operand(edx)); 5254 __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); 5255 Label return_eax; 5256 __ j(equal, &return_eax); 5257 // Special handling of sub-strings of length 1 and 2. One character strings 5258 // are handled in the runtime system (looked up in the single character 5259 // cache). Two character strings are looked for in the symbol cache. 5260 __ SmiUntag(ecx); // Result length is no longer smi. 5261 __ cmp(ecx, 2); 5262 __ j(greater, &result_longer_than_two); 5263 __ j(less, &runtime); 5264 5265 // Sub string of length 2 requested. 5266 // eax: string 5267 // ebx: instance type 5268 // ecx: sub string length (value is 2) 5269 // edx: from index (smi) 5270 __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime); 5271 5272 // Get the two characters forming the sub string. 5273 __ SmiUntag(edx); // From index is no longer smi. 5274 __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize)); 5275 __ movzx_b(ecx, 5276 FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1)); 5277 5278 // Try to lookup two character string in symbol table. 5279 Label make_two_character_string; 5280 StringHelper::GenerateTwoCharacterSymbolTableProbe( 5281 masm, ebx, ecx, eax, edx, edi, 5282 &make_two_character_string, &make_two_character_string); 5283 __ ret(3 * kPointerSize); 5284 5285 __ bind(&make_two_character_string); 5286 // Setup registers for allocating the two character string. 5287 __ mov(eax, Operand(esp, 3 * kPointerSize)); 5288 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 5289 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); 5290 __ Set(ecx, Immediate(2)); 5291 5292 __ bind(&result_longer_than_two); 5293 // eax: string 5294 // ebx: instance type 5295 // ecx: result string length 5296 // Check for flat ascii string 5297 Label non_ascii_flat; 5298 __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat); 5299 5300 // Allocate the result. 5301 __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime); 5302 5303 // eax: result string 5304 // ecx: result string length 5305 __ mov(edx, esi); // esi used by following code. 5306 // Locate first character of result. 5307 __ mov(edi, eax); 5308 __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 5309 // Load string argument and locate character of sub string start. 5310 __ mov(esi, Operand(esp, 3 * kPointerSize)); 5311 __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 5312 __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from 5313 __ SmiUntag(ebx); 5314 __ add(esi, Operand(ebx)); 5315 5316 // eax: result string 5317 // ecx: result length 5318 // edx: original value of esi 5319 // edi: first character of result 5320 // esi: character of sub string start 5321 StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true); 5322 __ mov(esi, edx); // Restore esi. 5323 Counters* counters = masm->isolate()->counters(); 5324 __ IncrementCounter(counters->sub_string_native(), 1); 5325 __ ret(3 * kPointerSize); 5326 5327 __ bind(&non_ascii_flat); 5328 // eax: string 5329 // ebx: instance type & kStringRepresentationMask | kStringEncodingMask 5330 // ecx: result string length 5331 // Check for flat two byte string 5332 __ cmp(ebx, kSeqStringTag | kTwoByteStringTag); 5333 __ j(not_equal, &runtime); 5334 5335 // Allocate the result. 5336 __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime); 5337 5338 // eax: result string 5339 // ecx: result string length 5340 __ mov(edx, esi); // esi used by following code. 5341 // Locate first character of result. 5342 __ mov(edi, eax); 5343 __ add(Operand(edi), 5344 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5345 // Load string argument and locate character of sub string start. 5346 __ mov(esi, Operand(esp, 3 * kPointerSize)); 5347 __ add(Operand(esi), 5348 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5349 __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from 5350 // As from is a smi it is 2 times the value which matches the size of a two 5351 // byte character. 5352 STATIC_ASSERT(kSmiTag == 0); 5353 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 5354 __ add(esi, Operand(ebx)); 5355 5356 // eax: result string 5357 // ecx: result length 5358 // edx: original value of esi 5359 // edi: first character of result 5360 // esi: character of sub string start 5361 StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false); 5362 __ mov(esi, edx); // Restore esi. 5363 5364 __ bind(&return_eax); 5365 __ IncrementCounter(counters->sub_string_native(), 1); 5366 __ ret(3 * kPointerSize); 5367 5368 // Just jump to runtime to create the sub string. 5369 __ bind(&runtime); 5370 __ TailCallRuntime(Runtime::kSubString, 3, 1); 5371 } 5372 5373 5374 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, 5375 Register left, 5376 Register right, 5377 Register scratch1, 5378 Register scratch2, 5379 Register scratch3) { 5380 Label result_not_equal; 5381 Label result_greater; 5382 Label compare_lengths; 5383 5384 Counters* counters = masm->isolate()->counters(); 5385 __ IncrementCounter(counters->string_compare_native(), 1); 5386 5387 // Find minimum length. 5388 NearLabel left_shorter; 5389 __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); 5390 __ mov(scratch3, scratch1); 5391 __ sub(scratch3, FieldOperand(right, String::kLengthOffset)); 5392 5393 Register length_delta = scratch3; 5394 5395 __ j(less_equal, &left_shorter); 5396 // Right string is shorter. Change scratch1 to be length of right string. 5397 __ sub(scratch1, Operand(length_delta)); 5398 __ bind(&left_shorter); 5399 5400 Register min_length = scratch1; 5401 5402 // If either length is zero, just compare lengths. 5403 __ test(min_length, Operand(min_length)); 5404 __ j(zero, &compare_lengths); 5405 5406 // Change index to run from -min_length to -1 by adding min_length 5407 // to string start. This means that loop ends when index reaches zero, 5408 // which doesn't need an additional compare. 5409 __ SmiUntag(min_length); 5410 __ lea(left, 5411 FieldOperand(left, 5412 min_length, times_1, 5413 SeqAsciiString::kHeaderSize)); 5414 __ lea(right, 5415 FieldOperand(right, 5416 min_length, times_1, 5417 SeqAsciiString::kHeaderSize)); 5418 __ neg(min_length); 5419 5420 Register index = min_length; // index = -min_length; 5421 5422 { 5423 // Compare loop. 5424 NearLabel loop; 5425 __ bind(&loop); 5426 // Compare characters. 5427 __ mov_b(scratch2, Operand(left, index, times_1, 0)); 5428 __ cmpb(scratch2, Operand(right, index, times_1, 0)); 5429 __ j(not_equal, &result_not_equal); 5430 __ add(Operand(index), Immediate(1)); 5431 __ j(not_zero, &loop); 5432 } 5433 5434 // Compare lengths - strings up to min-length are equal. 5435 __ bind(&compare_lengths); 5436 __ test(length_delta, Operand(length_delta)); 5437 __ j(not_zero, &result_not_equal); 5438 5439 // Result is EQUAL. 5440 STATIC_ASSERT(EQUAL == 0); 5441 STATIC_ASSERT(kSmiTag == 0); 5442 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 5443 __ ret(0); 5444 5445 __ bind(&result_not_equal); 5446 __ j(greater, &result_greater); 5447 5448 // Result is LESS. 5449 __ Set(eax, Immediate(Smi::FromInt(LESS))); 5450 __ ret(0); 5451 5452 // Result is GREATER. 5453 __ bind(&result_greater); 5454 __ Set(eax, Immediate(Smi::FromInt(GREATER))); 5455 __ ret(0); 5456 } 5457 5458 5459 void StringCompareStub::Generate(MacroAssembler* masm) { 5460 Label runtime; 5461 5462 // Stack frame on entry. 5463 // esp[0]: return address 5464 // esp[4]: right string 5465 // esp[8]: left string 5466 5467 __ mov(edx, Operand(esp, 2 * kPointerSize)); // left 5468 __ mov(eax, Operand(esp, 1 * kPointerSize)); // right 5469 5470 NearLabel not_same; 5471 __ cmp(edx, Operand(eax)); 5472 __ j(not_equal, ¬_same); 5473 STATIC_ASSERT(EQUAL == 0); 5474 STATIC_ASSERT(kSmiTag == 0); 5475 __ Set(eax, Immediate(Smi::FromInt(EQUAL))); 5476 __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1); 5477 __ ret(2 * kPointerSize); 5478 5479 __ bind(¬_same); 5480 5481 // Check that both objects are sequential ascii strings. 5482 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime); 5483 5484 // Compare flat ascii strings. 5485 // Drop arguments from the stack. 5486 __ pop(ecx); 5487 __ add(Operand(esp), Immediate(2 * kPointerSize)); 5488 __ push(ecx); 5489 GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); 5490 5491 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 5492 // tagged as a small integer. 5493 __ bind(&runtime); 5494 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 5495 } 5496 5497 5498 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { 5499 ASSERT(state_ == CompareIC::SMIS); 5500 NearLabel miss; 5501 __ mov(ecx, Operand(edx)); 5502 __ or_(ecx, Operand(eax)); 5503 __ test(ecx, Immediate(kSmiTagMask)); 5504 __ j(not_zero, &miss, not_taken); 5505 5506 if (GetCondition() == equal) { 5507 // For equality we do not care about the sign of the result. 5508 __ sub(eax, Operand(edx)); 5509 } else { 5510 NearLabel done; 5511 __ sub(edx, Operand(eax)); 5512 __ j(no_overflow, &done); 5513 // Correct sign of result in case of overflow. 5514 __ not_(edx); 5515 __ bind(&done); 5516 __ mov(eax, edx); 5517 } 5518 __ ret(0); 5519 5520 __ bind(&miss); 5521 GenerateMiss(masm); 5522 } 5523 5524 5525 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { 5526 ASSERT(state_ == CompareIC::HEAP_NUMBERS); 5527 5528 NearLabel generic_stub; 5529 NearLabel unordered; 5530 NearLabel miss; 5531 __ mov(ecx, Operand(edx)); 5532 __ and_(ecx, Operand(eax)); 5533 __ test(ecx, Immediate(kSmiTagMask)); 5534 __ j(zero, &generic_stub, not_taken); 5535 5536 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); 5537 __ j(not_equal, &miss, not_taken); 5538 __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); 5539 __ j(not_equal, &miss, not_taken); 5540 5541 // Inlining the double comparison and falling back to the general compare 5542 // stub if NaN is involved or SS2 or CMOV is unsupported. 5543 if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { 5544 CpuFeatures::Scope scope1(SSE2); 5545 CpuFeatures::Scope scope2(CMOV); 5546 5547 // Load left and right operand 5548 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 5549 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 5550 5551 // Compare operands 5552 __ ucomisd(xmm0, xmm1); 5553 5554 // Don't base result on EFLAGS when a NaN is involved. 5555 __ j(parity_even, &unordered, not_taken); 5556 5557 // Return a result of -1, 0, or 1, based on EFLAGS. 5558 // Performing mov, because xor would destroy the flag register. 5559 __ mov(eax, 0); // equal 5560 __ mov(ecx, Immediate(Smi::FromInt(1))); 5561 __ cmov(above, eax, Operand(ecx)); 5562 __ mov(ecx, Immediate(Smi::FromInt(-1))); 5563 __ cmov(below, eax, Operand(ecx)); 5564 __ ret(0); 5565 5566 __ bind(&unordered); 5567 } 5568 5569 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); 5570 __ bind(&generic_stub); 5571 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); 5572 5573 __ bind(&miss); 5574 GenerateMiss(masm); 5575 } 5576 5577 5578 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { 5579 ASSERT(state_ == CompareIC::OBJECTS); 5580 NearLabel miss; 5581 __ mov(ecx, Operand(edx)); 5582 __ and_(ecx, Operand(eax)); 5583 __ test(ecx, Immediate(kSmiTagMask)); 5584 __ j(zero, &miss, not_taken); 5585 5586 __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx); 5587 __ j(not_equal, &miss, not_taken); 5588 __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx); 5589 __ j(not_equal, &miss, not_taken); 5590 5591 ASSERT(GetCondition() == equal); 5592 __ sub(eax, Operand(edx)); 5593 __ ret(0); 5594 5595 __ bind(&miss); 5596 GenerateMiss(masm); 5597 } 5598 5599 5600 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 5601 // Save the registers. 5602 __ pop(ecx); 5603 __ push(edx); 5604 __ push(eax); 5605 __ push(ecx); 5606 5607 // Call the runtime system in a fresh internal frame. 5608 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), 5609 masm->isolate()); 5610 __ EnterInternalFrame(); 5611 __ push(edx); 5612 __ push(eax); 5613 __ push(Immediate(Smi::FromInt(op_))); 5614 __ CallExternalReference(miss, 3); 5615 __ LeaveInternalFrame(); 5616 5617 // Compute the entry point of the rewritten stub. 5618 __ lea(edi, FieldOperand(eax, Code::kHeaderSize)); 5619 5620 // Restore registers. 5621 __ pop(ecx); 5622 __ pop(eax); 5623 __ pop(edx); 5624 __ push(ecx); 5625 5626 // Do a tail call to the rewritten stub. 5627 __ jmp(Operand(edi)); 5628 } 5629 5630 5631 #undef __ 5632 5633 } } // namespace v8::internal 5634 5635 #endif // V8_TARGET_ARCH_IA32 5636