1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "v8.h" 29 30 #if V8_TARGET_ARCH_ARM 31 32 #include "bootstrapper.h" 33 #include "code-stubs.h" 34 #include "regexp-macro-assembler.h" 35 #include "stub-cache.h" 36 37 namespace v8 { 38 namespace internal { 39 40 41 void ToNumberStub::InitializeInterfaceDescriptor( 42 Isolate* isolate, 43 CodeStubInterfaceDescriptor* descriptor) { 44 static Register registers[] = { r0 }; 45 descriptor->register_param_count_ = 1; 46 descriptor->register_params_ = registers; 47 descriptor->deoptimization_handler_ = NULL; 48 } 49 50 51 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( 52 Isolate* isolate, 53 CodeStubInterfaceDescriptor* descriptor) { 54 static Register registers[] = { r3, r2, r1 }; 55 descriptor->register_param_count_ = 3; 56 descriptor->register_params_ = registers; 57 descriptor->deoptimization_handler_ = 58 Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; 59 } 60 61 62 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( 63 Isolate* isolate, 64 CodeStubInterfaceDescriptor* descriptor) { 65 static Register registers[] = { r3, r2, r1, r0 }; 66 descriptor->register_param_count_ = 4; 67 descriptor->register_params_ = registers; 68 descriptor->deoptimization_handler_ = 69 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; 70 } 71 72 73 void CreateAllocationSiteStub::InitializeInterfaceDescriptor( 74 Isolate* isolate, 75 CodeStubInterfaceDescriptor* descriptor) { 76 static Register registers[] = { r2 }; 77 descriptor->register_param_count_ = 1; 78 descriptor->register_params_ = registers; 79 descriptor->deoptimization_handler_ = NULL; 80 } 81 82 83 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( 84 Isolate* isolate, 85 CodeStubInterfaceDescriptor* descriptor) { 86 static Register registers[] = { r1, r0 }; 87 descriptor->register_param_count_ = 2; 88 descriptor->register_params_ = registers; 89 descriptor->deoptimization_handler_ = 90 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); 91 } 92 93 94 void LoadFieldStub::InitializeInterfaceDescriptor( 95 Isolate* isolate, 96 CodeStubInterfaceDescriptor* descriptor) { 97 static Register registers[] = { r0 }; 98 descriptor->register_param_count_ = 1; 99 descriptor->register_params_ = registers; 100 descriptor->deoptimization_handler_ = NULL; 101 } 102 103 104 void KeyedLoadFieldStub::InitializeInterfaceDescriptor( 105 Isolate* isolate, 106 CodeStubInterfaceDescriptor* descriptor) { 107 static Register registers[] = { r1 }; 108 descriptor->register_param_count_ = 1; 109 descriptor->register_params_ = registers; 110 descriptor->deoptimization_handler_ = NULL; 111 } 112 113 114 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( 115 Isolate* isolate, 116 CodeStubInterfaceDescriptor* descriptor) { 117 static Register registers[] = { r2, r1, r0 }; 118 descriptor->register_param_count_ = 3; 119 descriptor->register_params_ = registers; 120 descriptor->deoptimization_handler_ = 121 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); 122 } 123 124 125 void TransitionElementsKindStub::InitializeInterfaceDescriptor( 126 Isolate* isolate, 127 CodeStubInterfaceDescriptor* descriptor) { 128 static Register registers[] = { r0, r1 }; 129 descriptor->register_param_count_ = 2; 130 descriptor->register_params_ = registers; 131 Address entry = 132 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; 133 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); 134 } 135 136 137 void CompareNilICStub::InitializeInterfaceDescriptor( 138 Isolate* isolate, 139 CodeStubInterfaceDescriptor* descriptor) { 140 static Register registers[] = { r0 }; 141 descriptor->register_param_count_ = 1; 142 descriptor->register_params_ = registers; 143 descriptor->deoptimization_handler_ = 144 FUNCTION_ADDR(CompareNilIC_Miss); 145 descriptor->SetMissHandler( 146 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); 147 } 148 149 150 static void InitializeArrayConstructorDescriptor( 151 Isolate* isolate, 152 CodeStubInterfaceDescriptor* descriptor, 153 int constant_stack_parameter_count) { 154 // register state 155 // r0 -- number of arguments 156 // r1 -- function 157 // r2 -- type info cell with elements kind 158 static Register registers[] = { r1, r2 }; 159 descriptor->register_param_count_ = 2; 160 if (constant_stack_parameter_count != 0) { 161 // stack param count needs (constructor pointer, and single argument) 162 descriptor->stack_parameter_count_ = &r0; 163 } 164 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; 165 descriptor->register_params_ = registers; 166 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; 167 descriptor->deoptimization_handler_ = 168 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; 169 } 170 171 172 static void InitializeInternalArrayConstructorDescriptor( 173 Isolate* isolate, 174 CodeStubInterfaceDescriptor* descriptor, 175 int constant_stack_parameter_count) { 176 // register state 177 // r0 -- number of arguments 178 // r1 -- constructor function 179 static Register registers[] = { r1 }; 180 descriptor->register_param_count_ = 1; 181 182 if (constant_stack_parameter_count != 0) { 183 // stack param count needs (constructor pointer, and single argument) 184 descriptor->stack_parameter_count_ = &r0; 185 } 186 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; 187 descriptor->register_params_ = registers; 188 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; 189 descriptor->deoptimization_handler_ = 190 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; 191 } 192 193 194 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( 195 Isolate* isolate, 196 CodeStubInterfaceDescriptor* descriptor) { 197 InitializeArrayConstructorDescriptor(isolate, descriptor, 0); 198 } 199 200 201 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( 202 Isolate* isolate, 203 CodeStubInterfaceDescriptor* descriptor) { 204 InitializeArrayConstructorDescriptor(isolate, descriptor, 1); 205 } 206 207 208 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( 209 Isolate* isolate, 210 CodeStubInterfaceDescriptor* descriptor) { 211 InitializeArrayConstructorDescriptor(isolate, descriptor, -1); 212 } 213 214 215 void ToBooleanStub::InitializeInterfaceDescriptor( 216 Isolate* isolate, 217 CodeStubInterfaceDescriptor* descriptor) { 218 static Register registers[] = { r0 }; 219 descriptor->register_param_count_ = 1; 220 descriptor->register_params_ = registers; 221 descriptor->deoptimization_handler_ = 222 FUNCTION_ADDR(ToBooleanIC_Miss); 223 descriptor->SetMissHandler( 224 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); 225 } 226 227 228 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( 229 Isolate* isolate, 230 CodeStubInterfaceDescriptor* descriptor) { 231 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); 232 } 233 234 235 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( 236 Isolate* isolate, 237 CodeStubInterfaceDescriptor* descriptor) { 238 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); 239 } 240 241 242 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( 243 Isolate* isolate, 244 CodeStubInterfaceDescriptor* descriptor) { 245 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); 246 } 247 248 249 void StoreGlobalStub::InitializeInterfaceDescriptor( 250 Isolate* isolate, 251 CodeStubInterfaceDescriptor* descriptor) { 252 static Register registers[] = { r1, r2, r0 }; 253 descriptor->register_param_count_ = 3; 254 descriptor->register_params_ = registers; 255 descriptor->deoptimization_handler_ = 256 FUNCTION_ADDR(StoreIC_MissFromStubFailure); 257 } 258 259 260 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( 261 Isolate* isolate, 262 CodeStubInterfaceDescriptor* descriptor) { 263 static Register registers[] = { r0, r3, r1, r2 }; 264 descriptor->register_param_count_ = 4; 265 descriptor->register_params_ = registers; 266 descriptor->deoptimization_handler_ = 267 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); 268 } 269 270 271 #define __ ACCESS_MASM(masm) 272 273 274 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 275 Label* slow, 276 Condition cond); 277 static void EmitSmiNonsmiComparison(MacroAssembler* masm, 278 Register lhs, 279 Register rhs, 280 Label* lhs_not_nan, 281 Label* slow, 282 bool strict); 283 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 284 Register lhs, 285 Register rhs); 286 287 288 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { 289 // Update the static counter each time a new code stub is generated. 290 Isolate* isolate = masm->isolate(); 291 isolate->counters()->code_stubs()->Increment(); 292 293 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); 294 int param_count = descriptor->register_param_count_; 295 { 296 // Call the runtime system in a fresh internal frame. 297 FrameScope scope(masm, StackFrame::INTERNAL); 298 ASSERT(descriptor->register_param_count_ == 0 || 299 r0.is(descriptor->register_params_[param_count - 1])); 300 // Push arguments 301 for (int i = 0; i < param_count; ++i) { 302 __ push(descriptor->register_params_[i]); 303 } 304 ExternalReference miss = descriptor->miss_handler(); 305 __ CallExternalReference(miss, descriptor->register_param_count_); 306 } 307 308 __ Ret(); 309 } 310 311 312 void FastNewClosureStub::Generate(MacroAssembler* masm) { 313 // Create a new closure from the given function info in new 314 // space. Set the context to the current context in cp. 315 Counters* counters = masm->isolate()->counters(); 316 317 Label gc; 318 319 // Pop the function info from the stack. 320 __ pop(r3); 321 322 // Attempt to allocate new JSFunction in new space. 323 __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT); 324 325 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); 326 327 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); 328 329 // Compute the function map in the current native context and set that 330 // as the map of the allocated object. 331 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 332 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); 333 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index))); 334 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset)); 335 336 // Initialize the rest of the function. We don't have to update the 337 // write barrier because the allocated object is in new space. 338 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); 339 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); 340 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); 341 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); 342 __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); 343 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); 344 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); 345 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); 346 347 // Initialize the code pointer in the function to be the one 348 // found in the shared function info object. 349 // But first check if there is an optimized version for our context. 350 Label check_optimized; 351 Label install_unoptimized; 352 if (FLAG_cache_optimized_code) { 353 __ ldr(r1, 354 FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset)); 355 __ tst(r1, r1); 356 __ b(ne, &check_optimized); 357 } 358 __ bind(&install_unoptimized); 359 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); 360 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); 361 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); 362 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); 363 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); 364 365 // Return result. The argument function info has been popped already. 366 __ Ret(); 367 368 __ bind(&check_optimized); 369 370 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7); 371 372 // r2 holds native context, r1 points to fixed array of 3-element entries 373 // (native context, optimized code, literals). 374 // The optimized code map must never be empty, so check the first elements. 375 Label install_optimized; 376 // Speculatively move code object into r4. 377 __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot)); 378 __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot)); 379 __ cmp(r2, r5); 380 __ b(eq, &install_optimized); 381 382 // Iterate through the rest of map backwards. r4 holds an index as a Smi. 383 Label loop; 384 __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); 385 __ bind(&loop); 386 // Do not double check first entry. 387 __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex))); 388 __ b(eq, &install_unoptimized); 389 __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); 390 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 391 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); 392 __ ldr(r5, MemOperand(r5)); 393 __ cmp(r2, r5); 394 __ b(ne, &loop); 395 // Hit: fetch the optimized code. 396 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 397 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); 398 __ add(r5, r5, Operand(kPointerSize)); 399 __ ldr(r4, MemOperand(r5)); 400 401 __ bind(&install_optimized); 402 __ IncrementCounter(counters->fast_new_closure_install_optimized(), 403 1, r6, r7); 404 405 // TODO(fschneider): Idea: store proper code pointers in the map and either 406 // unmangle them on marking or do nothing as the whole map is discarded on 407 // major GC anyway. 408 __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); 409 __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); 410 411 // Now link a function into a list of optimized functions. 412 __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); 413 414 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); 415 // No need for write barrier as JSFunction (eax) is in the new space. 416 417 __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); 418 // Store JSFunction (eax) into edx before issuing write barrier as 419 // it clobbers all the registers passed. 420 __ mov(r4, r0); 421 __ RecordWriteContextSlot( 422 r2, 423 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), 424 r4, 425 r1, 426 kLRHasNotBeenSaved, 427 kDontSaveFPRegs); 428 429 // Return result. The argument function info has been popped already. 430 __ Ret(); 431 432 // Create a new closure through the slower runtime call. 433 __ bind(&gc); 434 __ LoadRoot(r4, Heap::kFalseValueRootIndex); 435 __ Push(cp, r3, r4); 436 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 437 } 438 439 440 void FastNewContextStub::Generate(MacroAssembler* masm) { 441 // Try to allocate the context in new space. 442 Label gc; 443 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 444 445 // Attempt to allocate the context in new space. 446 __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); 447 448 // Load the function from the stack. 449 __ ldr(r3, MemOperand(sp, 0)); 450 451 // Set up the object header. 452 __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex); 453 __ mov(r2, Operand(Smi::FromInt(length))); 454 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); 455 __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 456 457 // Set up the fixed slots, copy the global object from the previous context. 458 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 459 __ mov(r1, Operand(Smi::FromInt(0))); 460 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); 461 __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); 462 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); 463 __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 464 465 // Initialize the rest of the slots to undefined. 466 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); 467 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { 468 __ str(r1, MemOperand(r0, Context::SlotOffset(i))); 469 } 470 471 // Remove the on-stack argument and return. 472 __ mov(cp, r0); 473 __ pop(); 474 __ Ret(); 475 476 // Need to collect. Call into runtime system. 477 __ bind(&gc); 478 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); 479 } 480 481 482 void FastNewBlockContextStub::Generate(MacroAssembler* masm) { 483 // Stack layout on entry: 484 // 485 // [sp]: function. 486 // [sp + kPointerSize]: serialized scope info 487 488 // Try to allocate the context in new space. 489 Label gc; 490 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 491 __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); 492 493 // Load the function from the stack. 494 __ ldr(r3, MemOperand(sp, 0)); 495 496 // Load the serialized scope info from the stack. 497 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); 498 499 // Set up the object header. 500 __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); 501 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); 502 __ mov(r2, Operand(Smi::FromInt(length))); 503 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); 504 505 // If this block context is nested in the native context we get a smi 506 // sentinel instead of a function. The block context should get the 507 // canonical empty function of the native context as its closure which 508 // we still have to look up. 509 Label after_sentinel; 510 __ JumpIfNotSmi(r3, &after_sentinel); 511 if (FLAG_debug_code) { 512 __ cmp(r3, Operand::Zero()); 513 __ Assert(eq, kExpected0AsASmiSentinel); 514 } 515 __ ldr(r3, GlobalObjectOperand()); 516 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); 517 __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); 518 __ bind(&after_sentinel); 519 520 // Set up the fixed slots, copy the global object from the previous context. 521 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); 522 __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); 523 __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); 524 __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); 525 __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX)); 526 527 // Initialize the rest of the slots to the hole value. 528 __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); 529 for (int i = 0; i < slots_; i++) { 530 __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); 531 } 532 533 // Remove the on-stack argument and return. 534 __ mov(cp, r0); 535 __ add(sp, sp, Operand(2 * kPointerSize)); 536 __ Ret(); 537 538 // Need to collect. Call into runtime system. 539 __ bind(&gc); 540 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); 541 } 542 543 544 // Takes a Smi and converts to an IEEE 64 bit floating point value in two 545 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and 546 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a 547 // scratch register. Destroys the source register. No GC occurs during this 548 // stub so you don't have to set up the frame. 549 class ConvertToDoubleStub : public PlatformCodeStub { 550 public: 551 ConvertToDoubleStub(Register result_reg_1, 552 Register result_reg_2, 553 Register source_reg, 554 Register scratch_reg) 555 : result1_(result_reg_1), 556 result2_(result_reg_2), 557 source_(source_reg), 558 zeros_(scratch_reg) { } 559 560 private: 561 Register result1_; 562 Register result2_; 563 Register source_; 564 Register zeros_; 565 566 // Minor key encoding in 16 bits. 567 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; 568 class OpBits: public BitField<Token::Value, 2, 14> {}; 569 570 Major MajorKey() { return ConvertToDouble; } 571 int MinorKey() { 572 // Encode the parameters in a unique 16 bit value. 573 return result1_.code() + 574 (result2_.code() << 4) + 575 (source_.code() << 8) + 576 (zeros_.code() << 12); 577 } 578 579 void Generate(MacroAssembler* masm); 580 }; 581 582 583 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { 584 Register exponent = result1_; 585 Register mantissa = result2_; 586 587 Label not_special; 588 __ SmiUntag(source_); 589 // Move sign bit from source to destination. This works because the sign bit 590 // in the exponent word of the double has the same position and polarity as 591 // the 2's complement sign bit in a Smi. 592 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 593 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); 594 // Subtract from 0 if source was negative. 595 __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne); 596 597 // We have -1, 0 or 1, which we treat specially. Register source_ contains 598 // absolute value: it is either equal to 1 (special case of -1 and 1), 599 // greater than 1 (not a special case) or less than 1 (special case of 0). 600 __ cmp(source_, Operand(1)); 601 __ b(gt, ¬_special); 602 603 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). 604 const uint32_t exponent_word_for_1 = 605 HeapNumber::kExponentBias << HeapNumber::kExponentShift; 606 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); 607 // 1, 0 and -1 all have 0 for the second word. 608 __ mov(mantissa, Operand::Zero()); 609 __ Ret(); 610 611 __ bind(¬_special); 612 __ clz(zeros_, source_); 613 // Compute exponent and or it into the exponent register. 614 // We use mantissa as a scratch register here. Use a fudge factor to 615 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts 616 // that fit in the ARM's constant field. 617 int fudge = 0x400; 618 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); 619 __ add(mantissa, mantissa, Operand(fudge)); 620 __ orr(exponent, 621 exponent, 622 Operand(mantissa, LSL, HeapNumber::kExponentShift)); 623 // Shift up the source chopping the top bit off. 624 __ add(zeros_, zeros_, Operand(1)); 625 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. 626 __ mov(source_, Operand(source_, LSL, zeros_)); 627 // Compute lower part of fraction (last 12 bits). 628 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); 629 // And the top (top 20 bits). 630 __ orr(exponent, 631 exponent, 632 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); 633 __ Ret(); 634 } 635 636 637 bool WriteInt32ToHeapNumberStub::IsPregenerated() { 638 // These variants are compiled ahead of time. See next method. 639 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { 640 return true; 641 } 642 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { 643 return true; 644 } 645 // Other register combinations are generated as and when they are needed, 646 // so it is unsafe to call them from stubs (we can't generate a stub while 647 // we are generating a stub). 648 return false; 649 } 650 651 652 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( 653 Isolate* isolate) { 654 WriteInt32ToHeapNumberStub stub1(r1, r0, r2); 655 WriteInt32ToHeapNumberStub stub2(r2, r0, r3); 656 stub1.GetCode(isolate)->set_is_pregenerated(true); 657 stub2.GetCode(isolate)->set_is_pregenerated(true); 658 } 659 660 661 // See comment for class. 662 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 663 Label max_negative_int; 664 // the_int_ has the answer which is a signed int32 but not a Smi. 665 // We test for the special value that has a different exponent. This test 666 // has the neat side effect of setting the flags according to the sign. 667 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 668 __ cmp(the_int_, Operand(0x80000000u)); 669 __ b(eq, &max_negative_int); 670 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 671 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). 672 uint32_t non_smi_exponent = 673 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; 674 __ mov(scratch_, Operand(non_smi_exponent)); 675 // Set the sign bit in scratch_ if the value was negative. 676 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); 677 // Subtract from 0 if the value was negative. 678 __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs); 679 // We should be masking the implict first digit of the mantissa away here, 680 // but it just ends up combining harmlessly with the last digit of the 681 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get 682 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. 683 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); 684 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; 685 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); 686 __ str(scratch_, FieldMemOperand(the_heap_number_, 687 HeapNumber::kExponentOffset)); 688 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); 689 __ str(scratch_, FieldMemOperand(the_heap_number_, 690 HeapNumber::kMantissaOffset)); 691 __ Ret(); 692 693 __ bind(&max_negative_int); 694 // The max negative int32 is stored as a positive number in the mantissa of 695 // a double because it uses a sign bit instead of using two's complement. 696 // The actual mantissa bits stored are all 0 because the implicit most 697 // significant 1 bit is not stored. 698 non_smi_exponent += 1 << HeapNumber::kExponentShift; 699 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); 700 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); 701 __ mov(ip, Operand::Zero()); 702 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); 703 __ Ret(); 704 } 705 706 707 // Handle the case where the lhs and rhs are the same object. 708 // Equality is almost reflexive (everything but NaN), so this is a test 709 // for "identity and not NaN". 710 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 711 Label* slow, 712 Condition cond) { 713 Label not_identical; 714 Label heap_number, return_equal; 715 __ cmp(r0, r1); 716 __ b(ne, ¬_identical); 717 718 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), 719 // so we do the second best thing - test it ourselves. 720 // They are both equal and they are not both Smis so both of them are not 721 // Smis. If it's not a heap number, then return equal. 722 if (cond == lt || cond == gt) { 723 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); 724 __ b(ge, slow); 725 } else { 726 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); 727 __ b(eq, &heap_number); 728 // Comparing JS objects with <=, >= is complicated. 729 if (cond != eq) { 730 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); 731 __ b(ge, slow); 732 // Normally here we fall through to return_equal, but undefined is 733 // special: (undefined == undefined) == true, but 734 // (undefined <= undefined) == false! See ECMAScript 11.8.5. 735 if (cond == le || cond == ge) { 736 __ cmp(r4, Operand(ODDBALL_TYPE)); 737 __ b(ne, &return_equal); 738 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 739 __ cmp(r0, r2); 740 __ b(ne, &return_equal); 741 if (cond == le) { 742 // undefined <= undefined should fail. 743 __ mov(r0, Operand(GREATER)); 744 } else { 745 // undefined >= undefined should fail. 746 __ mov(r0, Operand(LESS)); 747 } 748 __ Ret(); 749 } 750 } 751 } 752 753 __ bind(&return_equal); 754 if (cond == lt) { 755 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. 756 } else if (cond == gt) { 757 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. 758 } else { 759 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. 760 } 761 __ Ret(); 762 763 // For less and greater we don't have to check for NaN since the result of 764 // x < x is false regardless. For the others here is some code to check 765 // for NaN. 766 if (cond != lt && cond != gt) { 767 __ bind(&heap_number); 768 // It is a heap number, so return non-equal if it's NaN and equal if it's 769 // not NaN. 770 771 // The representation of NaN values has all exponent bits (52..62) set, 772 // and not all mantissa bits (0..51) clear. 773 // Read top bits of double representation (second word of value). 774 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 775 // Test that exponent bits are all set. 776 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); 777 // NaNs have all-one exponents so they sign extend to -1. 778 __ cmp(r3, Operand(-1)); 779 __ b(ne, &return_equal); 780 781 // Shift out flag and all exponent bits, retaining only mantissa. 782 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); 783 // Or with all low-bits of mantissa. 784 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); 785 __ orr(r0, r3, Operand(r2), SetCC); 786 // For equal we already have the right value in r0: Return zero (equal) 787 // if all bits in mantissa are zero (it's an Infinity) and non-zero if 788 // not (it's a NaN). For <= and >= we need to load r0 with the failing 789 // value if it's a NaN. 790 if (cond != eq) { 791 // All-zero means Infinity means equal. 792 __ Ret(eq); 793 if (cond == le) { 794 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. 795 } else { 796 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. 797 } 798 } 799 __ Ret(); 800 } 801 // No fall through here. 802 803 __ bind(¬_identical); 804 } 805 806 807 // See comment at call site. 808 static void EmitSmiNonsmiComparison(MacroAssembler* masm, 809 Register lhs, 810 Register rhs, 811 Label* lhs_not_nan, 812 Label* slow, 813 bool strict) { 814 ASSERT((lhs.is(r0) && rhs.is(r1)) || 815 (lhs.is(r1) && rhs.is(r0))); 816 817 Label rhs_is_smi; 818 __ JumpIfSmi(rhs, &rhs_is_smi); 819 820 // Lhs is a Smi. Check whether the rhs is a heap number. 821 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); 822 if (strict) { 823 // If rhs is not a number and lhs is a Smi then strict equality cannot 824 // succeed. Return non-equal 825 // If rhs is r0 then there is already a non zero value in it. 826 if (!rhs.is(r0)) { 827 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); 828 } 829 __ Ret(ne); 830 } else { 831 // Smi compared non-strictly with a non-Smi non-heap-number. Call 832 // the runtime. 833 __ b(ne, slow); 834 } 835 836 // Lhs is a smi, rhs is a number. 837 // Convert lhs to a double in d7. 838 __ SmiToDouble(d7, lhs); 839 // Load the double from rhs, tagged HeapNumber r0, to d6. 840 __ sub(r7, rhs, Operand(kHeapObjectTag)); 841 __ vldr(d6, r7, HeapNumber::kValueOffset); 842 843 // We now have both loaded as doubles but we can skip the lhs nan check 844 // since it's a smi. 845 __ jmp(lhs_not_nan); 846 847 __ bind(&rhs_is_smi); 848 // Rhs is a smi. Check whether the non-smi lhs is a heap number. 849 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); 850 if (strict) { 851 // If lhs is not a number and rhs is a smi then strict equality cannot 852 // succeed. Return non-equal. 853 // If lhs is r0 then there is already a non zero value in it. 854 if (!lhs.is(r0)) { 855 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); 856 } 857 __ Ret(ne); 858 } else { 859 // Smi compared non-strictly with a non-smi non-heap-number. Call 860 // the runtime. 861 __ b(ne, slow); 862 } 863 864 // Rhs is a smi, lhs is a heap number. 865 // Load the double from lhs, tagged HeapNumber r1, to d7. 866 __ sub(r7, lhs, Operand(kHeapObjectTag)); 867 __ vldr(d7, r7, HeapNumber::kValueOffset); 868 // Convert rhs to a double in d6 . 869 __ SmiToDouble(d6, rhs); 870 // Fall through to both_loaded_as_doubles. 871 } 872 873 874 // See comment at call site. 875 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 876 Register lhs, 877 Register rhs) { 878 ASSERT((lhs.is(r0) && rhs.is(r1)) || 879 (lhs.is(r1) && rhs.is(r0))); 880 881 // If either operand is a JS object or an oddball value, then they are 882 // not equal since their pointers are different. 883 // There is no test for undetectability in strict equality. 884 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); 885 Label first_non_object; 886 // Get the type of the first operand into r2 and compare it with 887 // FIRST_SPEC_OBJECT_TYPE. 888 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE); 889 __ b(lt, &first_non_object); 890 891 // Return non-zero (r0 is not zero) 892 Label return_not_equal; 893 __ bind(&return_not_equal); 894 __ Ret(); 895 896 __ bind(&first_non_object); 897 // Check for oddballs: true, false, null, undefined. 898 __ cmp(r2, Operand(ODDBALL_TYPE)); 899 __ b(eq, &return_not_equal); 900 901 __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE); 902 __ b(ge, &return_not_equal); 903 904 // Check for oddballs: true, false, null, undefined. 905 __ cmp(r3, Operand(ODDBALL_TYPE)); 906 __ b(eq, &return_not_equal); 907 908 // Now that we have the types we might as well check for 909 // internalized-internalized. 910 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); 911 __ orr(r2, r2, Operand(r3)); 912 __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask)); 913 __ b(eq, &return_not_equal); 914 } 915 916 917 // See comment at call site. 918 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, 919 Register lhs, 920 Register rhs, 921 Label* both_loaded_as_doubles, 922 Label* not_heap_numbers, 923 Label* slow) { 924 ASSERT((lhs.is(r0) && rhs.is(r1)) || 925 (lhs.is(r1) && rhs.is(r0))); 926 927 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); 928 __ b(ne, not_heap_numbers); 929 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); 930 __ cmp(r2, r3); 931 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. 932 933 // Both are heap numbers. Load them up then jump to the code we have 934 // for that. 935 __ sub(r7, rhs, Operand(kHeapObjectTag)); 936 __ vldr(d6, r7, HeapNumber::kValueOffset); 937 __ sub(r7, lhs, Operand(kHeapObjectTag)); 938 __ vldr(d7, r7, HeapNumber::kValueOffset); 939 __ jmp(both_loaded_as_doubles); 940 } 941 942 943 // Fast negative check for internalized-to-internalized equality. 944 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, 945 Register lhs, 946 Register rhs, 947 Label* possible_strings, 948 Label* not_both_strings) { 949 ASSERT((lhs.is(r0) && rhs.is(r1)) || 950 (lhs.is(r1) && rhs.is(r0))); 951 952 // r2 is object type of rhs. 953 Label object_test; 954 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); 955 __ tst(r2, Operand(kIsNotStringMask)); 956 __ b(ne, &object_test); 957 __ tst(r2, Operand(kIsNotInternalizedMask)); 958 __ b(ne, possible_strings); 959 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); 960 __ b(ge, not_both_strings); 961 __ tst(r3, Operand(kIsNotInternalizedMask)); 962 __ b(ne, possible_strings); 963 964 // Both are internalized. We already checked they weren't the same pointer 965 // so they are not equal. 966 __ mov(r0, Operand(NOT_EQUAL)); 967 __ Ret(); 968 969 __ bind(&object_test); 970 __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE)); 971 __ b(lt, not_both_strings); 972 __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE); 973 __ b(lt, not_both_strings); 974 // If both objects are undetectable, they are equal. Otherwise, they 975 // are not equal, since they are different objects and an object is not 976 // equal to undefined. 977 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); 978 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); 979 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); 980 __ and_(r0, r2, Operand(r3)); 981 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); 982 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); 983 __ Ret(); 984 } 985 986 987 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, 988 Register object, 989 Register result, 990 Register scratch1, 991 Register scratch2, 992 Register scratch3, 993 Label* not_found) { 994 // Use of registers. Register result is used as a temporary. 995 Register number_string_cache = result; 996 Register mask = scratch3; 997 998 // Load the number string cache. 999 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); 1000 1001 // Make the hash mask from the length of the number string cache. It 1002 // contains two elements (number and string) for each cache entry. 1003 __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); 1004 // Divide length by two (length is a smi). 1005 __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); 1006 __ sub(mask, mask, Operand(1)); // Make mask. 1007 1008 // Calculate the entry in the number string cache. The hash value in the 1009 // number string cache for smis is just the smi value, and the hash for 1010 // doubles is the xor of the upper and lower words. See 1011 // Heap::GetNumberStringCache. 1012 Isolate* isolate = masm->isolate(); 1013 Label is_smi; 1014 Label load_result_from_cache; 1015 __ JumpIfSmi(object, &is_smi); 1016 __ CheckMap(object, 1017 scratch1, 1018 Heap::kHeapNumberMapRootIndex, 1019 not_found, 1020 DONT_DO_SMI_CHECK); 1021 1022 STATIC_ASSERT(8 == kDoubleSize); 1023 __ add(scratch1, 1024 object, 1025 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); 1026 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); 1027 __ eor(scratch1, scratch1, Operand(scratch2)); 1028 __ and_(scratch1, scratch1, Operand(mask)); 1029 1030 // Calculate address of entry in string cache: each entry consists 1031 // of two pointer sized fields. 1032 __ add(scratch1, 1033 number_string_cache, 1034 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); 1035 1036 Register probe = mask; 1037 __ ldr(probe, 1038 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); 1039 __ JumpIfSmi(probe, not_found); 1040 __ sub(scratch2, object, Operand(kHeapObjectTag)); 1041 __ vldr(d0, scratch2, HeapNumber::kValueOffset); 1042 __ sub(probe, probe, Operand(kHeapObjectTag)); 1043 __ vldr(d1, probe, HeapNumber::kValueOffset); 1044 __ VFPCompareAndSetFlags(d0, d1); 1045 __ b(ne, not_found); // The cache did not contain this value. 1046 __ b(&load_result_from_cache); 1047 1048 __ bind(&is_smi); 1049 Register scratch = scratch1; 1050 __ and_(scratch, mask, Operand(object, ASR, 1)); 1051 // Calculate address of entry in string cache: each entry consists 1052 // of two pointer sized fields. 1053 __ add(scratch, 1054 number_string_cache, 1055 Operand(scratch, LSL, kPointerSizeLog2 + 1)); 1056 1057 // Check if the entry is the smi we are looking for. 1058 __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); 1059 __ cmp(object, probe); 1060 __ b(ne, not_found); 1061 1062 // Get the result from the cache. 1063 __ bind(&load_result_from_cache); 1064 __ ldr(result, 1065 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); 1066 __ IncrementCounter(isolate->counters()->number_to_string_native(), 1067 1, 1068 scratch1, 1069 scratch2); 1070 } 1071 1072 1073 void NumberToStringStub::Generate(MacroAssembler* masm) { 1074 Label runtime; 1075 1076 __ ldr(r1, MemOperand(sp, 0)); 1077 1078 // Generate code to lookup number in the number string cache. 1079 GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime); 1080 __ add(sp, sp, Operand(1 * kPointerSize)); 1081 __ Ret(); 1082 1083 __ bind(&runtime); 1084 // Handle number to string in the runtime system if not found in the cache. 1085 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); 1086 } 1087 1088 1089 static void ICCompareStub_CheckInputType(MacroAssembler* masm, 1090 Register input, 1091 Register scratch, 1092 CompareIC::State expected, 1093 Label* fail) { 1094 Label ok; 1095 if (expected == CompareIC::SMI) { 1096 __ JumpIfNotSmi(input, fail); 1097 } else if (expected == CompareIC::NUMBER) { 1098 __ JumpIfSmi(input, &ok); 1099 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, 1100 DONT_DO_SMI_CHECK); 1101 } 1102 // We could be strict about internalized/non-internalized here, but as long as 1103 // hydrogen doesn't care, the stub doesn't have to care either. 1104 __ bind(&ok); 1105 } 1106 1107 1108 // On entry r1 and r2 are the values to be compared. 1109 // On exit r0 is 0, positive or negative to indicate the result of 1110 // the comparison. 1111 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { 1112 Register lhs = r1; 1113 Register rhs = r0; 1114 Condition cc = GetCondition(); 1115 1116 Label miss; 1117 ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); 1118 ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); 1119 1120 Label slow; // Call builtin. 1121 Label not_smis, both_loaded_as_doubles, lhs_not_nan; 1122 1123 Label not_two_smis, smi_done; 1124 __ orr(r2, r1, r0); 1125 __ JumpIfNotSmi(r2, ¬_two_smis); 1126 __ mov(r1, Operand(r1, ASR, 1)); 1127 __ sub(r0, r1, Operand(r0, ASR, 1)); 1128 __ Ret(); 1129 __ bind(¬_two_smis); 1130 1131 // NOTICE! This code is only reached after a smi-fast-case check, so 1132 // it is certain that at least one operand isn't a smi. 1133 1134 // Handle the case where the objects are identical. Either returns the answer 1135 // or goes to slow. Only falls through if the objects were not identical. 1136 EmitIdenticalObjectComparison(masm, &slow, cc); 1137 1138 // If either is a Smi (we know that not both are), then they can only 1139 // be strictly equal if the other is a HeapNumber. 1140 STATIC_ASSERT(kSmiTag == 0); 1141 ASSERT_EQ(0, Smi::FromInt(0)); 1142 __ and_(r2, lhs, Operand(rhs)); 1143 __ JumpIfNotSmi(r2, ¬_smis); 1144 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: 1145 // 1) Return the answer. 1146 // 2) Go to slow. 1147 // 3) Fall through to both_loaded_as_doubles. 1148 // 4) Jump to lhs_not_nan. 1149 // In cases 3 and 4 we have found out we were dealing with a number-number 1150 // comparison. If VFP3 is supported the double values of the numbers have 1151 // been loaded into d7 and d6. Otherwise, the double values have been loaded 1152 // into r0, r1, r2, and r3. 1153 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); 1154 1155 __ bind(&both_loaded_as_doubles); 1156 // The arguments have been converted to doubles and stored in d6 and d7, if 1157 // VFP3 is supported, or in r0, r1, r2, and r3. 1158 Isolate* isolate = masm->isolate(); 1159 __ bind(&lhs_not_nan); 1160 Label no_nan; 1161 // ARMv7 VFP3 instructions to implement double precision comparison. 1162 __ VFPCompareAndSetFlags(d7, d6); 1163 Label nan; 1164 __ b(vs, &nan); 1165 __ mov(r0, Operand(EQUAL), LeaveCC, eq); 1166 __ mov(r0, Operand(LESS), LeaveCC, lt); 1167 __ mov(r0, Operand(GREATER), LeaveCC, gt); 1168 __ Ret(); 1169 1170 __ bind(&nan); 1171 // If one of the sides was a NaN then the v flag is set. Load r0 with 1172 // whatever it takes to make the comparison fail, since comparisons with NaN 1173 // always fail. 1174 if (cc == lt || cc == le) { 1175 __ mov(r0, Operand(GREATER)); 1176 } else { 1177 __ mov(r0, Operand(LESS)); 1178 } 1179 __ Ret(); 1180 1181 __ bind(¬_smis); 1182 // At this point we know we are dealing with two different objects, 1183 // and neither of them is a Smi. The objects are in rhs_ and lhs_. 1184 if (strict()) { 1185 // This returns non-equal for some object types, or falls through if it 1186 // was not lucky. 1187 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); 1188 } 1189 1190 Label check_for_internalized_strings; 1191 Label flat_string_check; 1192 // Check for heap-number-heap-number comparison. Can jump to slow case, 1193 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles 1194 // that case. If the inputs are not doubles then jumps to 1195 // check_for_internalized_strings. 1196 // In this case r2 will contain the type of rhs_. Never falls through. 1197 EmitCheckForTwoHeapNumbers(masm, 1198 lhs, 1199 rhs, 1200 &both_loaded_as_doubles, 1201 &check_for_internalized_strings, 1202 &flat_string_check); 1203 1204 __ bind(&check_for_internalized_strings); 1205 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of 1206 // internalized strings. 1207 if (cc == eq && !strict()) { 1208 // Returns an answer for two internalized strings or two detectable objects. 1209 // Otherwise jumps to string case or not both strings case. 1210 // Assumes that r2 is the type of rhs_ on entry. 1211 EmitCheckForInternalizedStringsOrObjects( 1212 masm, lhs, rhs, &flat_string_check, &slow); 1213 } 1214 1215 // Check for both being sequential ASCII strings, and inline if that is the 1216 // case. 1217 __ bind(&flat_string_check); 1218 1219 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); 1220 1221 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); 1222 if (cc == eq) { 1223 StringCompareStub::GenerateFlatAsciiStringEquals(masm, 1224 lhs, 1225 rhs, 1226 r2, 1227 r3, 1228 r4); 1229 } else { 1230 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, 1231 lhs, 1232 rhs, 1233 r2, 1234 r3, 1235 r4, 1236 r5); 1237 } 1238 // Never falls through to here. 1239 1240 __ bind(&slow); 1241 1242 __ Push(lhs, rhs); 1243 // Figure out which native to call and setup the arguments. 1244 Builtins::JavaScript native; 1245 if (cc == eq) { 1246 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; 1247 } else { 1248 native = Builtins::COMPARE; 1249 int ncr; // NaN compare result 1250 if (cc == lt || cc == le) { 1251 ncr = GREATER; 1252 } else { 1253 ASSERT(cc == gt || cc == ge); // remaining cases 1254 ncr = LESS; 1255 } 1256 __ mov(r0, Operand(Smi::FromInt(ncr))); 1257 __ push(r0); 1258 } 1259 1260 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 1261 // tagged as a small integer. 1262 __ InvokeBuiltin(native, JUMP_FUNCTION); 1263 1264 __ bind(&miss); 1265 GenerateMiss(masm); 1266 } 1267 1268 1269 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 1270 // We don't allow a GC during a store buffer overflow so there is no need to 1271 // store the registers in any particular way, but we do have to store and 1272 // restore them. 1273 __ stm(db_w, sp, kCallerSaved | lr.bit()); 1274 1275 const Register scratch = r1; 1276 1277 if (save_doubles_ == kSaveFPRegs) { 1278 __ SaveFPRegs(sp, scratch); 1279 } 1280 const int argument_count = 1; 1281 const int fp_argument_count = 0; 1282 1283 AllowExternalCallThatCantCauseGC scope(masm); 1284 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); 1285 __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate()))); 1286 __ CallCFunction( 1287 ExternalReference::store_buffer_overflow_function(masm->isolate()), 1288 argument_count); 1289 if (save_doubles_ == kSaveFPRegs) { 1290 __ RestoreFPRegs(sp, scratch); 1291 } 1292 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). 1293 } 1294 1295 1296 // Generates code to call a C function to do a double operation. 1297 // This code never falls through, but returns with a heap number containing 1298 // the result in r0. 1299 // Register heapnumber_result must be a heap number in which the 1300 // result of the operation will be stored. 1301 // Requires the following layout on entry: 1302 // d0: Left value. 1303 // d1: Right value. 1304 // If soft float ABI, use also r0, r1, r2, r3. 1305 static void CallCCodeForDoubleOperation(MacroAssembler* masm, 1306 Token::Value op, 1307 Register heap_number_result, 1308 Register scratch) { 1309 // Assert that heap_number_result is callee-saved. 1310 // We currently always use r5 to pass it. 1311 ASSERT(heap_number_result.is(r5)); 1312 1313 // Push the current return address before the C call. Return will be 1314 // through pop(pc) below. 1315 __ push(lr); 1316 __ PrepareCallCFunction(0, 2, scratch); 1317 if (!masm->use_eabi_hardfloat()) { 1318 __ vmov(r0, r1, d0); 1319 __ vmov(r2, r3, d1); 1320 } 1321 { 1322 AllowExternalCallThatCantCauseGC scope(masm); 1323 __ CallCFunction( 1324 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); 1325 } 1326 // Store answer in the overwritable heap number. Double returned in 1327 // registers r0 and r1 or in d0. 1328 if (masm->use_eabi_hardfloat()) { 1329 __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); 1330 } else { 1331 __ Strd(r0, r1, 1332 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); 1333 } 1334 // Place heap_number_result in r0 and return to the pushed return address. 1335 __ mov(r0, Operand(heap_number_result)); 1336 __ pop(pc); 1337 } 1338 1339 1340 void BinaryOpStub::Initialize() { 1341 platform_specific_bit_ = true; // VFP2 is a base requirement for V8 1342 } 1343 1344 1345 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 1346 Label get_result; 1347 1348 __ Push(r1, r0); 1349 1350 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); 1351 __ push(r2); 1352 1353 __ TailCallExternalReference( 1354 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), 1355 masm->isolate()), 1356 3, 1357 1); 1358 } 1359 1360 1361 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( 1362 MacroAssembler* masm) { 1363 UNIMPLEMENTED(); 1364 } 1365 1366 1367 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, 1368 Token::Value op) { 1369 Register left = r1; 1370 Register right = r0; 1371 Register scratch1 = r7; 1372 Register scratch2 = r9; 1373 1374 ASSERT(right.is(r0)); 1375 STATIC_ASSERT(kSmiTag == 0); 1376 1377 Label not_smi_result; 1378 switch (op) { 1379 case Token::ADD: 1380 __ add(right, left, Operand(right), SetCC); // Add optimistically. 1381 __ Ret(vc); 1382 __ sub(right, right, Operand(left)); // Revert optimistic add. 1383 break; 1384 case Token::SUB: 1385 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. 1386 __ Ret(vc); 1387 __ sub(right, left, Operand(right)); // Revert optimistic subtract. 1388 break; 1389 case Token::MUL: 1390 // Remove tag from one of the operands. This way the multiplication result 1391 // will be a smi if it fits the smi range. 1392 __ SmiUntag(ip, right); 1393 // Do multiplication 1394 // scratch1 = lower 32 bits of ip * left. 1395 // scratch2 = higher 32 bits of ip * left. 1396 __ smull(scratch1, scratch2, left, ip); 1397 // Check for overflowing the smi range - no overflow if higher 33 bits of 1398 // the result are identical. 1399 __ mov(ip, Operand(scratch1, ASR, 31)); 1400 __ cmp(ip, Operand(scratch2)); 1401 __ b(ne, ¬_smi_result); 1402 // Go slow on zero result to handle -0. 1403 __ cmp(scratch1, Operand::Zero()); 1404 __ mov(right, Operand(scratch1), LeaveCC, ne); 1405 __ Ret(ne); 1406 // We need -0 if we were multiplying a negative number with 0 to get 0. 1407 // We know one of them was zero. 1408 __ add(scratch2, right, Operand(left), SetCC); 1409 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); 1410 __ Ret(pl); // Return smi 0 if the non-zero one was positive. 1411 // We fall through here if we multiplied a negative number with 0, because 1412 // that would mean we should produce -0. 1413 break; 1414 case Token::DIV: { 1415 Label div_with_sdiv; 1416 1417 // Check for 0 divisor. 1418 __ cmp(right, Operand::Zero()); 1419 __ b(eq, ¬_smi_result); 1420 1421 // Check for power of two on the right hand side. 1422 __ sub(scratch1, right, Operand(1)); 1423 __ tst(scratch1, right); 1424 if (CpuFeatures::IsSupported(SUDIV)) { 1425 __ b(ne, &div_with_sdiv); 1426 // Check for no remainder. 1427 __ tst(left, scratch1); 1428 __ b(ne, ¬_smi_result); 1429 // Check for positive left hand side. 1430 __ cmp(left, Operand::Zero()); 1431 __ b(mi, &div_with_sdiv); 1432 } else { 1433 __ b(ne, ¬_smi_result); 1434 // Check for positive and no remainder. 1435 __ orr(scratch2, scratch1, Operand(0x80000000u)); 1436 __ tst(left, scratch2); 1437 __ b(ne, ¬_smi_result); 1438 } 1439 1440 // Perform division by shifting. 1441 __ clz(scratch1, scratch1); 1442 __ rsb(scratch1, scratch1, Operand(31)); 1443 __ mov(right, Operand(left, LSR, scratch1)); 1444 __ Ret(); 1445 1446 if (CpuFeatures::IsSupported(SUDIV)) { 1447 CpuFeatureScope scope(masm, SUDIV); 1448 Label result_not_zero; 1449 1450 __ bind(&div_with_sdiv); 1451 // Do division. 1452 __ sdiv(scratch1, left, right); 1453 // Check that the remainder is zero. 1454 __ mls(scratch2, scratch1, right, left); 1455 __ cmp(scratch2, Operand::Zero()); 1456 __ b(ne, ¬_smi_result); 1457 // Check for negative zero result. 1458 __ cmp(scratch1, Operand::Zero()); 1459 __ b(ne, &result_not_zero); 1460 __ cmp(right, Operand::Zero()); 1461 __ b(lt, ¬_smi_result); 1462 __ bind(&result_not_zero); 1463 // Check for the corner case of dividing the most negative smi by -1. 1464 __ cmp(scratch1, Operand(0x40000000)); 1465 __ b(eq, ¬_smi_result); 1466 // Tag and return the result. 1467 __ SmiTag(right, scratch1); 1468 __ Ret(); 1469 } 1470 break; 1471 } 1472 case Token::MOD: { 1473 Label modulo_with_sdiv; 1474 1475 if (CpuFeatures::IsSupported(SUDIV)) { 1476 // Check for x % 0. 1477 __ cmp(right, Operand::Zero()); 1478 __ b(eq, ¬_smi_result); 1479 1480 // Check for two positive smis. 1481 __ orr(scratch1, left, Operand(right)); 1482 __ tst(scratch1, Operand(0x80000000u)); 1483 __ b(ne, &modulo_with_sdiv); 1484 1485 // Check for power of two on the right hand side. 1486 __ sub(scratch1, right, Operand(1)); 1487 __ tst(scratch1, right); 1488 __ b(ne, &modulo_with_sdiv); 1489 } else { 1490 // Check for two positive smis. 1491 __ orr(scratch1, left, Operand(right)); 1492 __ tst(scratch1, Operand(0x80000000u)); 1493 __ b(ne, ¬_smi_result); 1494 1495 // Check for power of two on the right hand side. 1496 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); 1497 } 1498 1499 // Perform modulus by masking (scratch1 contains right - 1). 1500 __ and_(right, left, Operand(scratch1)); 1501 __ Ret(); 1502 1503 if (CpuFeatures::IsSupported(SUDIV)) { 1504 CpuFeatureScope scope(masm, SUDIV); 1505 __ bind(&modulo_with_sdiv); 1506 __ mov(scratch2, right); 1507 // Perform modulus with sdiv and mls. 1508 __ sdiv(scratch1, left, right); 1509 __ mls(right, scratch1, right, left); 1510 // Return if the result is not 0. 1511 __ cmp(right, Operand::Zero()); 1512 __ Ret(ne); 1513 // The result is 0, check for -0 case. 1514 __ cmp(left, Operand::Zero()); 1515 __ Ret(pl); 1516 // This is a -0 case, restore the value of right. 1517 __ mov(right, scratch2); 1518 // We fall through here to not_smi_result to produce -0. 1519 } 1520 break; 1521 } 1522 case Token::BIT_OR: 1523 __ orr(right, left, Operand(right)); 1524 __ Ret(); 1525 break; 1526 case Token::BIT_AND: 1527 __ and_(right, left, Operand(right)); 1528 __ Ret(); 1529 break; 1530 case Token::BIT_XOR: 1531 __ eor(right, left, Operand(right)); 1532 __ Ret(); 1533 break; 1534 case Token::SAR: 1535 // Remove tags from right operand. 1536 __ GetLeastBitsFromSmi(scratch1, right, 5); 1537 __ mov(right, Operand(left, ASR, scratch1)); 1538 // Smi tag result. 1539 __ bic(right, right, Operand(kSmiTagMask)); 1540 __ Ret(); 1541 break; 1542 case Token::SHR: 1543 // Remove tags from operands. We can't do this on a 31 bit number 1544 // because then the 0s get shifted into bit 30 instead of bit 31. 1545 __ SmiUntag(scratch1, left); 1546 __ GetLeastBitsFromSmi(scratch2, right, 5); 1547 __ mov(scratch1, Operand(scratch1, LSR, scratch2)); 1548 // Unsigned shift is not allowed to produce a negative number, so 1549 // check the sign bit and the sign bit after Smi tagging. 1550 __ tst(scratch1, Operand(0xc0000000)); 1551 __ b(ne, ¬_smi_result); 1552 // Smi tag result. 1553 __ SmiTag(right, scratch1); 1554 __ Ret(); 1555 break; 1556 case Token::SHL: 1557 // Remove tags from operands. 1558 __ SmiUntag(scratch1, left); 1559 __ GetLeastBitsFromSmi(scratch2, right, 5); 1560 __ mov(scratch1, Operand(scratch1, LSL, scratch2)); 1561 // Check that the signed result fits in a Smi. 1562 __ TrySmiTag(right, scratch1, ¬_smi_result); 1563 __ Ret(); 1564 break; 1565 default: 1566 UNREACHABLE(); 1567 } 1568 __ bind(¬_smi_result); 1569 } 1570 1571 1572 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, 1573 Register result, 1574 Register heap_number_map, 1575 Register scratch1, 1576 Register scratch2, 1577 Label* gc_required, 1578 OverwriteMode mode); 1579 1580 1581 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, 1582 BinaryOpIC::TypeInfo left_type, 1583 BinaryOpIC::TypeInfo right_type, 1584 bool smi_operands, 1585 Label* not_numbers, 1586 Label* gc_required, 1587 Label* miss, 1588 Token::Value op, 1589 OverwriteMode mode) { 1590 Register left = r1; 1591 Register right = r0; 1592 Register scratch1 = r6; 1593 Register scratch2 = r7; 1594 Register scratch3 = r4; 1595 1596 ASSERT(smi_operands || (not_numbers != NULL)); 1597 if (smi_operands) { 1598 __ AssertSmi(left); 1599 __ AssertSmi(right); 1600 } 1601 if (left_type == BinaryOpIC::SMI) { 1602 __ JumpIfNotSmi(left, miss); 1603 } 1604 if (right_type == BinaryOpIC::SMI) { 1605 __ JumpIfNotSmi(right, miss); 1606 } 1607 1608 Register heap_number_map = r9; 1609 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1610 1611 switch (op) { 1612 case Token::ADD: 1613 case Token::SUB: 1614 case Token::MUL: 1615 case Token::DIV: 1616 case Token::MOD: { 1617 // Allocate new heap number for result. 1618 Register result = r5; 1619 BinaryOpStub_GenerateHeapResultAllocation( 1620 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); 1621 1622 // Load left and right operands into d0 and d1. 1623 if (smi_operands) { 1624 __ SmiToDouble(d1, right); 1625 __ SmiToDouble(d0, left); 1626 } else { 1627 // Load right operand into d1. 1628 if (right_type == BinaryOpIC::INT32) { 1629 __ LoadNumberAsInt32Double( 1630 right, d1, heap_number_map, scratch1, d8, miss); 1631 } else { 1632 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; 1633 __ LoadNumber(right, d1, heap_number_map, scratch1, fail); 1634 } 1635 // Load left operand into d0. 1636 if (left_type == BinaryOpIC::INT32) { 1637 __ LoadNumberAsInt32Double( 1638 left, d0, heap_number_map, scratch1, d8, miss); 1639 } else { 1640 Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; 1641 __ LoadNumber( 1642 left, d0, heap_number_map, scratch1, fail); 1643 } 1644 } 1645 1646 // Calculate the result. 1647 if (op != Token::MOD) { 1648 // Using VFP registers: 1649 // d0: Left value 1650 // d1: Right value 1651 switch (op) { 1652 case Token::ADD: 1653 __ vadd(d5, d0, d1); 1654 break; 1655 case Token::SUB: 1656 __ vsub(d5, d0, d1); 1657 break; 1658 case Token::MUL: 1659 __ vmul(d5, d0, d1); 1660 break; 1661 case Token::DIV: 1662 __ vdiv(d5, d0, d1); 1663 break; 1664 default: 1665 UNREACHABLE(); 1666 } 1667 1668 __ sub(r0, result, Operand(kHeapObjectTag)); 1669 __ vstr(d5, r0, HeapNumber::kValueOffset); 1670 __ add(r0, r0, Operand(kHeapObjectTag)); 1671 __ Ret(); 1672 } else { 1673 // Call the C function to handle the double operation. 1674 CallCCodeForDoubleOperation(masm, op, result, scratch1); 1675 if (FLAG_debug_code) { 1676 __ stop("Unreachable code."); 1677 } 1678 } 1679 break; 1680 } 1681 case Token::BIT_OR: 1682 case Token::BIT_XOR: 1683 case Token::BIT_AND: 1684 case Token::SAR: 1685 case Token::SHR: 1686 case Token::SHL: { 1687 if (smi_operands) { 1688 __ SmiUntag(r3, left); 1689 __ SmiUntag(r2, right); 1690 } else { 1691 // Convert operands to 32-bit integers. Right in r2 and left in r3. 1692 __ ConvertNumberToInt32( 1693 left, r3, heap_number_map, 1694 scratch1, scratch2, scratch3, d0, d1, not_numbers); 1695 __ ConvertNumberToInt32( 1696 right, r2, heap_number_map, 1697 scratch1, scratch2, scratch3, d0, d1, not_numbers); 1698 } 1699 1700 Label result_not_a_smi; 1701 switch (op) { 1702 case Token::BIT_OR: 1703 __ orr(r2, r3, Operand(r2)); 1704 break; 1705 case Token::BIT_XOR: 1706 __ eor(r2, r3, Operand(r2)); 1707 break; 1708 case Token::BIT_AND: 1709 __ and_(r2, r3, Operand(r2)); 1710 break; 1711 case Token::SAR: 1712 // Use only the 5 least significant bits of the shift count. 1713 __ GetLeastBitsFromInt32(r2, r2, 5); 1714 __ mov(r2, Operand(r3, ASR, r2)); 1715 break; 1716 case Token::SHR: 1717 // Use only the 5 least significant bits of the shift count. 1718 __ GetLeastBitsFromInt32(r2, r2, 5); 1719 __ mov(r2, Operand(r3, LSR, r2), SetCC); 1720 // SHR is special because it is required to produce a positive answer. 1721 // The code below for writing into heap numbers isn't capable of 1722 // writing the register as an unsigned int so we go to slow case if we 1723 // hit this case. 1724 __ b(mi, &result_not_a_smi); 1725 break; 1726 case Token::SHL: 1727 // Use only the 5 least significant bits of the shift count. 1728 __ GetLeastBitsFromInt32(r2, r2, 5); 1729 __ mov(r2, Operand(r3, LSL, r2)); 1730 break; 1731 default: 1732 UNREACHABLE(); 1733 } 1734 1735 // Check that the *signed* result fits in a smi. 1736 __ TrySmiTag(r0, r2, &result_not_a_smi); 1737 __ Ret(); 1738 1739 // Allocate new heap number for result. 1740 __ bind(&result_not_a_smi); 1741 Register result = r5; 1742 if (smi_operands) { 1743 __ AllocateHeapNumber( 1744 result, scratch1, scratch2, heap_number_map, gc_required); 1745 } else { 1746 BinaryOpStub_GenerateHeapResultAllocation( 1747 masm, result, heap_number_map, scratch1, scratch2, gc_required, 1748 mode); 1749 } 1750 1751 // r2: Answer as signed int32. 1752 // r5: Heap number to write answer into. 1753 1754 // Nothing can go wrong now, so move the heap number to r0, which is the 1755 // result. 1756 __ mov(r0, Operand(r5)); 1757 1758 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As 1759 // mentioned above SHR needs to always produce a positive result. 1760 __ vmov(s0, r2); 1761 if (op == Token::SHR) { 1762 __ vcvt_f64_u32(d0, s0); 1763 } else { 1764 __ vcvt_f64_s32(d0, s0); 1765 } 1766 __ sub(r3, r0, Operand(kHeapObjectTag)); 1767 __ vstr(d0, r3, HeapNumber::kValueOffset); 1768 __ Ret(); 1769 break; 1770 } 1771 default: 1772 UNREACHABLE(); 1773 } 1774 } 1775 1776 1777 // Generate the smi code. If the operation on smis are successful this return is 1778 // generated. If the result is not a smi and heap number allocation is not 1779 // requested the code falls through. If number allocation is requested but a 1780 // heap number cannot be allocated the code jumps to the label gc_required. 1781 void BinaryOpStub_GenerateSmiCode( 1782 MacroAssembler* masm, 1783 Label* use_runtime, 1784 Label* gc_required, 1785 Token::Value op, 1786 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, 1787 OverwriteMode mode) { 1788 Label not_smis; 1789 1790 Register left = r1; 1791 Register right = r0; 1792 Register scratch1 = r7; 1793 1794 // Perform combined smi check on both operands. 1795 __ orr(scratch1, left, Operand(right)); 1796 __ JumpIfNotSmi(scratch1, ¬_smis); 1797 1798 // If the smi-smi operation results in a smi return is generated. 1799 BinaryOpStub_GenerateSmiSmiOperation(masm, op); 1800 1801 // If heap number results are possible generate the result in an allocated 1802 // heap number. 1803 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { 1804 BinaryOpStub_GenerateFPOperation( 1805 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, 1806 use_runtime, gc_required, ¬_smis, op, mode); 1807 } 1808 __ bind(¬_smis); 1809 } 1810 1811 1812 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 1813 Label right_arg_changed, call_runtime; 1814 1815 if (op_ == Token::MOD && encoded_right_arg_.has_value) { 1816 // It is guaranteed that the value will fit into a Smi, because if it 1817 // didn't, we wouldn't be here, see BinaryOp_Patch. 1818 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); 1819 __ b(ne, &right_arg_changed); 1820 } 1821 1822 if (result_type_ == BinaryOpIC::UNINITIALIZED || 1823 result_type_ == BinaryOpIC::SMI) { 1824 // Only allow smi results. 1825 BinaryOpStub_GenerateSmiCode( 1826 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); 1827 } else { 1828 // Allow heap number result and don't make a transition if a heap number 1829 // cannot be allocated. 1830 BinaryOpStub_GenerateSmiCode( 1831 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, 1832 mode_); 1833 } 1834 1835 // Code falls through if the result is not returned as either a smi or heap 1836 // number. 1837 __ bind(&right_arg_changed); 1838 GenerateTypeTransition(masm); 1839 1840 __ bind(&call_runtime); 1841 { 1842 FrameScope scope(masm, StackFrame::INTERNAL); 1843 GenerateRegisterArgsPush(masm); 1844 GenerateCallRuntime(masm); 1845 } 1846 __ Ret(); 1847 } 1848 1849 1850 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { 1851 Label call_runtime; 1852 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); 1853 ASSERT(op_ == Token::ADD); 1854 // If both arguments are strings, call the string add stub. 1855 // Otherwise, do a transition. 1856 1857 // Registers containing left and right operands respectively. 1858 Register left = r1; 1859 Register right = r0; 1860 1861 // Test if left operand is a string. 1862 __ JumpIfSmi(left, &call_runtime); 1863 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); 1864 __ b(ge, &call_runtime); 1865 1866 // Test if right operand is a string. 1867 __ JumpIfSmi(right, &call_runtime); 1868 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); 1869 __ b(ge, &call_runtime); 1870 1871 StringAddStub string_add_stub( 1872 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); 1873 GenerateRegisterArgsPush(masm); 1874 __ TailCallStub(&string_add_stub); 1875 1876 __ bind(&call_runtime); 1877 GenerateTypeTransition(masm); 1878 } 1879 1880 1881 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 1882 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); 1883 1884 Register left = r1; 1885 Register right = r0; 1886 Register scratch1 = r7; 1887 Register scratch2 = r9; 1888 LowDwVfpRegister double_scratch = d0; 1889 1890 Register heap_number_result = no_reg; 1891 Register heap_number_map = r6; 1892 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1893 1894 Label call_runtime; 1895 // Labels for type transition, used for wrong input or output types. 1896 // Both label are currently actually bound to the same position. We use two 1897 // different label to differentiate the cause leading to type transition. 1898 Label transition; 1899 1900 // Smi-smi fast case. 1901 Label skip; 1902 __ orr(scratch1, left, right); 1903 __ JumpIfNotSmi(scratch1, &skip); 1904 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); 1905 // Fall through if the result is not a smi. 1906 __ bind(&skip); 1907 1908 switch (op_) { 1909 case Token::ADD: 1910 case Token::SUB: 1911 case Token::MUL: 1912 case Token::DIV: 1913 case Token::MOD: { 1914 // It could be that only SMIs have been seen at either the left 1915 // or the right operand. For precise type feedback, patch the IC 1916 // again if this changes. 1917 if (left_type_ == BinaryOpIC::SMI) { 1918 __ JumpIfNotSmi(left, &transition); 1919 } 1920 if (right_type_ == BinaryOpIC::SMI) { 1921 __ JumpIfNotSmi(right, &transition); 1922 } 1923 // Load both operands and check that they are 32-bit integer. 1924 // Jump to type transition if they are not. The registers r0 and r1 (right 1925 // and left) are preserved for the runtime call. 1926 __ LoadNumberAsInt32Double( 1927 right, d1, heap_number_map, scratch1, d8, &transition); 1928 __ LoadNumberAsInt32Double( 1929 left, d0, heap_number_map, scratch1, d8, &transition); 1930 1931 if (op_ != Token::MOD) { 1932 Label return_heap_number; 1933 switch (op_) { 1934 case Token::ADD: 1935 __ vadd(d5, d0, d1); 1936 break; 1937 case Token::SUB: 1938 __ vsub(d5, d0, d1); 1939 break; 1940 case Token::MUL: 1941 __ vmul(d5, d0, d1); 1942 break; 1943 case Token::DIV: 1944 __ vdiv(d5, d0, d1); 1945 break; 1946 default: 1947 UNREACHABLE(); 1948 } 1949 1950 if (result_type_ <= BinaryOpIC::INT32) { 1951 __ TryDoubleToInt32Exact(scratch1, d5, d8); 1952 // If the ne condition is set, result does 1953 // not fit in a 32-bit integer. 1954 __ b(ne, &transition); 1955 // Try to tag the result as a Smi, return heap number on overflow. 1956 __ SmiTag(scratch1, SetCC); 1957 __ b(vs, &return_heap_number); 1958 // Check for minus zero, transition in that case (because we need 1959 // to return a heap number). 1960 Label not_zero; 1961 ASSERT(kSmiTag == 0); 1962 __ b(ne, ¬_zero); 1963 __ VmovHigh(scratch2, d5); 1964 __ tst(scratch2, Operand(HeapNumber::kSignMask)); 1965 __ b(ne, &transition); 1966 __ bind(¬_zero); 1967 __ mov(r0, scratch1); 1968 __ Ret(); 1969 } 1970 1971 __ bind(&return_heap_number); 1972 // Return a heap number, or fall through to type transition or runtime 1973 // call if we can't. 1974 // We are using vfp registers so r5 is available. 1975 heap_number_result = r5; 1976 BinaryOpStub_GenerateHeapResultAllocation(masm, 1977 heap_number_result, 1978 heap_number_map, 1979 scratch1, 1980 scratch2, 1981 &call_runtime, 1982 mode_); 1983 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); 1984 __ vstr(d5, r0, HeapNumber::kValueOffset); 1985 __ mov(r0, heap_number_result); 1986 __ Ret(); 1987 1988 // A DIV operation expecting an integer result falls through 1989 // to type transition. 1990 1991 } else { 1992 if (encoded_right_arg_.has_value) { 1993 __ Vmov(d8, fixed_right_arg_value(), scratch1); 1994 __ VFPCompareAndSetFlags(d1, d8); 1995 __ b(ne, &transition); 1996 } 1997 1998 // We preserved r0 and r1 to be able to call runtime. 1999 // Save the left value on the stack. 2000 __ Push(r5, r4); 2001 2002 Label pop_and_call_runtime; 2003 2004 // Allocate a heap number to store the result. 2005 heap_number_result = r5; 2006 BinaryOpStub_GenerateHeapResultAllocation(masm, 2007 heap_number_result, 2008 heap_number_map, 2009 scratch1, 2010 scratch2, 2011 &pop_and_call_runtime, 2012 mode_); 2013 2014 // Load the left value from the value saved on the stack. 2015 __ Pop(r1, r0); 2016 2017 // Call the C function to handle the double operation. 2018 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); 2019 if (FLAG_debug_code) { 2020 __ stop("Unreachable code."); 2021 } 2022 2023 __ bind(&pop_and_call_runtime); 2024 __ Drop(2); 2025 __ b(&call_runtime); 2026 } 2027 2028 break; 2029 } 2030 2031 case Token::BIT_OR: 2032 case Token::BIT_XOR: 2033 case Token::BIT_AND: 2034 case Token::SAR: 2035 case Token::SHR: 2036 case Token::SHL: { 2037 Label return_heap_number; 2038 // Convert operands to 32-bit integers. Right in r2 and left in r3. The 2039 // registers r0 and r1 (right and left) are preserved for the runtime 2040 // call. 2041 __ LoadNumberAsInt32(left, r3, heap_number_map, 2042 scratch1, d0, d1, &transition); 2043 __ LoadNumberAsInt32(right, r2, heap_number_map, 2044 scratch1, d0, d1, &transition); 2045 2046 // The ECMA-262 standard specifies that, for shift operations, only the 2047 // 5 least significant bits of the shift value should be used. 2048 switch (op_) { 2049 case Token::BIT_OR: 2050 __ orr(r2, r3, Operand(r2)); 2051 break; 2052 case Token::BIT_XOR: 2053 __ eor(r2, r3, Operand(r2)); 2054 break; 2055 case Token::BIT_AND: 2056 __ and_(r2, r3, Operand(r2)); 2057 break; 2058 case Token::SAR: 2059 __ and_(r2, r2, Operand(0x1f)); 2060 __ mov(r2, Operand(r3, ASR, r2)); 2061 break; 2062 case Token::SHR: 2063 __ and_(r2, r2, Operand(0x1f)); 2064 __ mov(r2, Operand(r3, LSR, r2), SetCC); 2065 // SHR is special because it is required to produce a positive answer. 2066 // We only get a negative result if the shift value (r2) is 0. 2067 // This result cannot be respresented as a signed 32-bit integer, try 2068 // to return a heap number if we can. 2069 __ b(mi, (result_type_ <= BinaryOpIC::INT32) 2070 ? &transition 2071 : &return_heap_number); 2072 break; 2073 case Token::SHL: 2074 __ and_(r2, r2, Operand(0x1f)); 2075 __ mov(r2, Operand(r3, LSL, r2)); 2076 break; 2077 default: 2078 UNREACHABLE(); 2079 } 2080 2081 // Check if the result fits in a smi. If not try to return a heap number. 2082 // (We know the result is an int32). 2083 __ TrySmiTag(r0, r2, &return_heap_number); 2084 __ Ret(); 2085 2086 __ bind(&return_heap_number); 2087 heap_number_result = r5; 2088 BinaryOpStub_GenerateHeapResultAllocation(masm, 2089 heap_number_result, 2090 heap_number_map, 2091 scratch1, 2092 scratch2, 2093 &call_runtime, 2094 mode_); 2095 2096 if (op_ != Token::SHR) { 2097 // Convert the result to a floating point value. 2098 __ vmov(double_scratch.low(), r2); 2099 __ vcvt_f64_s32(double_scratch, double_scratch.low()); 2100 } else { 2101 // The result must be interpreted as an unsigned 32-bit integer. 2102 __ vmov(double_scratch.low(), r2); 2103 __ vcvt_f64_u32(double_scratch, double_scratch.low()); 2104 } 2105 2106 // Store the result. 2107 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); 2108 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); 2109 __ mov(r0, heap_number_result); 2110 __ Ret(); 2111 2112 break; 2113 } 2114 2115 default: 2116 UNREACHABLE(); 2117 } 2118 2119 // We never expect DIV to yield an integer result, so we always generate 2120 // type transition code for DIV operations expecting an integer result: the 2121 // code will fall through to this type transition. 2122 if (transition.is_linked() || 2123 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { 2124 __ bind(&transition); 2125 GenerateTypeTransition(masm); 2126 } 2127 2128 __ bind(&call_runtime); 2129 { 2130 FrameScope scope(masm, StackFrame::INTERNAL); 2131 GenerateRegisterArgsPush(masm); 2132 GenerateCallRuntime(masm); 2133 } 2134 __ Ret(); 2135 } 2136 2137 2138 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { 2139 Label call_runtime; 2140 2141 if (op_ == Token::ADD) { 2142 // Handle string addition here, because it is the only operation 2143 // that does not do a ToNumber conversion on the operands. 2144 GenerateAddStrings(masm); 2145 } 2146 2147 // Convert oddball arguments to numbers. 2148 Label check, done; 2149 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); 2150 __ b(ne, &check); 2151 if (Token::IsBitOp(op_)) { 2152 __ mov(r1, Operand(Smi::FromInt(0))); 2153 } else { 2154 __ LoadRoot(r1, Heap::kNanValueRootIndex); 2155 } 2156 __ jmp(&done); 2157 __ bind(&check); 2158 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); 2159 __ b(ne, &done); 2160 if (Token::IsBitOp(op_)) { 2161 __ mov(r0, Operand(Smi::FromInt(0))); 2162 } else { 2163 __ LoadRoot(r0, Heap::kNanValueRootIndex); 2164 } 2165 __ bind(&done); 2166 2167 GenerateNumberStub(masm); 2168 } 2169 2170 2171 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { 2172 Label call_runtime, transition; 2173 BinaryOpStub_GenerateFPOperation( 2174 masm, left_type_, right_type_, false, 2175 &transition, &call_runtime, &transition, op_, mode_); 2176 2177 __ bind(&transition); 2178 GenerateTypeTransition(masm); 2179 2180 __ bind(&call_runtime); 2181 { 2182 FrameScope scope(masm, StackFrame::INTERNAL); 2183 GenerateRegisterArgsPush(masm); 2184 GenerateCallRuntime(masm); 2185 } 2186 __ Ret(); 2187 } 2188 2189 2190 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 2191 Label call_runtime, call_string_add_or_runtime, transition; 2192 2193 BinaryOpStub_GenerateSmiCode( 2194 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); 2195 2196 BinaryOpStub_GenerateFPOperation( 2197 masm, left_type_, right_type_, false, 2198 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); 2199 2200 __ bind(&transition); 2201 GenerateTypeTransition(masm); 2202 2203 __ bind(&call_string_add_or_runtime); 2204 if (op_ == Token::ADD) { 2205 GenerateAddStrings(masm); 2206 } 2207 2208 __ bind(&call_runtime); 2209 { 2210 FrameScope scope(masm, StackFrame::INTERNAL); 2211 GenerateRegisterArgsPush(masm); 2212 GenerateCallRuntime(masm); 2213 } 2214 __ Ret(); 2215 } 2216 2217 2218 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { 2219 ASSERT(op_ == Token::ADD); 2220 Label left_not_string, call_runtime; 2221 2222 Register left = r1; 2223 Register right = r0; 2224 2225 // Check if left argument is a string. 2226 __ JumpIfSmi(left, &left_not_string); 2227 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); 2228 __ b(ge, &left_not_string); 2229 2230 StringAddStub string_add_left_stub( 2231 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); 2232 GenerateRegisterArgsPush(masm); 2233 __ TailCallStub(&string_add_left_stub); 2234 2235 // Left operand is not a string, test right. 2236 __ bind(&left_not_string); 2237 __ JumpIfSmi(right, &call_runtime); 2238 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); 2239 __ b(ge, &call_runtime); 2240 2241 StringAddStub string_add_right_stub( 2242 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); 2243 GenerateRegisterArgsPush(masm); 2244 __ TailCallStub(&string_add_right_stub); 2245 2246 // At least one argument is not a string. 2247 __ bind(&call_runtime); 2248 } 2249 2250 2251 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, 2252 Register result, 2253 Register heap_number_map, 2254 Register scratch1, 2255 Register scratch2, 2256 Label* gc_required, 2257 OverwriteMode mode) { 2258 // Code below will scratch result if allocation fails. To keep both arguments 2259 // intact for the runtime call result cannot be one of these. 2260 ASSERT(!result.is(r0) && !result.is(r1)); 2261 2262 if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { 2263 Label skip_allocation, allocated; 2264 Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; 2265 // If the overwritable operand is already an object, we skip the 2266 // allocation of a heap number. 2267 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); 2268 // Allocate a heap number for the result. 2269 __ AllocateHeapNumber( 2270 result, scratch1, scratch2, heap_number_map, gc_required); 2271 __ b(&allocated); 2272 __ bind(&skip_allocation); 2273 // Use object holding the overwritable operand for result. 2274 __ mov(result, Operand(overwritable_operand)); 2275 __ bind(&allocated); 2276 } else { 2277 ASSERT(mode == NO_OVERWRITE); 2278 __ AllocateHeapNumber( 2279 result, scratch1, scratch2, heap_number_map, gc_required); 2280 } 2281 } 2282 2283 2284 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 2285 __ Push(r1, r0); 2286 } 2287 2288 2289 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 2290 // Untagged case: double input in d2, double result goes 2291 // into d2. 2292 // Tagged case: tagged input on top of stack and in r0, 2293 // tagged result (heap number) goes into r0. 2294 2295 Label input_not_smi; 2296 Label loaded; 2297 Label calculate; 2298 Label invalid_cache; 2299 const Register scratch0 = r9; 2300 const Register scratch1 = r7; 2301 const Register cache_entry = r0; 2302 const bool tagged = (argument_type_ == TAGGED); 2303 2304 if (tagged) { 2305 // Argument is a number and is on stack and in r0. 2306 // Load argument and check if it is a smi. 2307 __ JumpIfNotSmi(r0, &input_not_smi); 2308 2309 // Input is a smi. Convert to double and load the low and high words 2310 // of the double into r2, r3. 2311 __ SmiToDouble(d7, r0); 2312 __ vmov(r2, r3, d7); 2313 __ b(&loaded); 2314 2315 __ bind(&input_not_smi); 2316 // Check if input is a HeapNumber. 2317 __ CheckMap(r0, 2318 r1, 2319 Heap::kHeapNumberMapRootIndex, 2320 &calculate, 2321 DONT_DO_SMI_CHECK); 2322 // Input is a HeapNumber. Load it to a double register and store the 2323 // low and high words into r2, r3. 2324 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); 2325 __ vmov(r2, r3, d0); 2326 } else { 2327 // Input is untagged double in d2. Output goes to d2. 2328 __ vmov(r2, r3, d2); 2329 } 2330 __ bind(&loaded); 2331 // r2 = low 32 bits of double value 2332 // r3 = high 32 bits of double value 2333 // Compute hash (the shifts are arithmetic): 2334 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); 2335 __ eor(r1, r2, Operand(r3)); 2336 __ eor(r1, r1, Operand(r1, ASR, 16)); 2337 __ eor(r1, r1, Operand(r1, ASR, 8)); 2338 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); 2339 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); 2340 2341 // r2 = low 32 bits of double value. 2342 // r3 = high 32 bits of double value. 2343 // r1 = TranscendentalCache::hash(double value). 2344 Isolate* isolate = masm->isolate(); 2345 ExternalReference cache_array = 2346 ExternalReference::transcendental_cache_array_address(isolate); 2347 __ mov(cache_entry, Operand(cache_array)); 2348 // cache_entry points to cache array. 2349 int cache_array_index 2350 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); 2351 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); 2352 // r0 points to the cache for the type type_. 2353 // If NULL, the cache hasn't been initialized yet, so go through runtime. 2354 __ cmp(cache_entry, Operand::Zero()); 2355 __ b(eq, &invalid_cache); 2356 2357 #ifdef DEBUG 2358 // Check that the layout of cache elements match expectations. 2359 { TranscendentalCache::SubCache::Element test_elem[2]; 2360 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); 2361 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); 2362 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); 2363 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); 2364 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); 2365 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. 2366 CHECK_EQ(0, elem_in0 - elem_start); 2367 CHECK_EQ(kIntSize, elem_in1 - elem_start); 2368 CHECK_EQ(2 * kIntSize, elem_out - elem_start); 2369 } 2370 #endif 2371 2372 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. 2373 __ add(r1, r1, Operand(r1, LSL, 1)); 2374 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); 2375 // Check if cache matches: Double value is stored in uint32_t[2] array. 2376 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); 2377 __ cmp(r2, r4); 2378 __ cmp(r3, r5, eq); 2379 __ b(ne, &calculate); 2380 // Cache hit. Load result, cleanup and return. 2381 Counters* counters = masm->isolate()->counters(); 2382 __ IncrementCounter( 2383 counters->transcendental_cache_hit(), 1, scratch0, scratch1); 2384 if (tagged) { 2385 // Pop input value from stack and load result into r0. 2386 __ pop(); 2387 __ mov(r0, Operand(r6)); 2388 } else { 2389 // Load result into d2. 2390 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); 2391 } 2392 __ Ret(); 2393 2394 __ bind(&calculate); 2395 __ IncrementCounter( 2396 counters->transcendental_cache_miss(), 1, scratch0, scratch1); 2397 if (tagged) { 2398 __ bind(&invalid_cache); 2399 ExternalReference runtime_function = 2400 ExternalReference(RuntimeFunction(), masm->isolate()); 2401 __ TailCallExternalReference(runtime_function, 1, 1); 2402 } else { 2403 Label no_update; 2404 Label skip_cache; 2405 2406 // Call C function to calculate the result and update the cache. 2407 // r0: precalculated cache entry address. 2408 // r2 and r3: parts of the double value. 2409 // Store r0, r2 and r3 on stack for later before calling C function. 2410 __ Push(r3, r2, cache_entry); 2411 GenerateCallCFunction(masm, scratch0); 2412 __ GetCFunctionDoubleResult(d2); 2413 2414 // Try to update the cache. If we cannot allocate a 2415 // heap number, we return the result without updating. 2416 __ Pop(r3, r2, cache_entry); 2417 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); 2418 __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); 2419 __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); 2420 __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); 2421 __ Ret(); 2422 2423 __ bind(&invalid_cache); 2424 // The cache is invalid. Call runtime which will recreate the 2425 // cache. 2426 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); 2427 __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); 2428 __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); 2429 { 2430 FrameScope scope(masm, StackFrame::INTERNAL); 2431 __ push(r0); 2432 __ CallRuntime(RuntimeFunction(), 1); 2433 } 2434 __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); 2435 __ Ret(); 2436 2437 __ bind(&skip_cache); 2438 // Call C function to calculate the result and answer directly 2439 // without updating the cache. 2440 GenerateCallCFunction(masm, scratch0); 2441 __ GetCFunctionDoubleResult(d2); 2442 __ bind(&no_update); 2443 2444 // We return the value in d2 without adding it to the cache, but 2445 // we cause a scavenging GC so that future allocations will succeed. 2446 { 2447 FrameScope scope(masm, StackFrame::INTERNAL); 2448 2449 // Allocate an aligned object larger than a HeapNumber. 2450 ASSERT(4 * kPointerSize >= HeapNumber::kSize); 2451 __ mov(scratch0, Operand(4 * kPointerSize)); 2452 __ push(scratch0); 2453 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 2454 } 2455 __ Ret(); 2456 } 2457 } 2458 2459 2460 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, 2461 Register scratch) { 2462 Isolate* isolate = masm->isolate(); 2463 2464 __ push(lr); 2465 __ PrepareCallCFunction(0, 1, scratch); 2466 if (masm->use_eabi_hardfloat()) { 2467 __ vmov(d0, d2); 2468 } else { 2469 __ vmov(r0, r1, d2); 2470 } 2471 AllowExternalCallThatCantCauseGC scope(masm); 2472 switch (type_) { 2473 case TranscendentalCache::SIN: 2474 __ CallCFunction(ExternalReference::math_sin_double_function(isolate), 2475 0, 1); 2476 break; 2477 case TranscendentalCache::COS: 2478 __ CallCFunction(ExternalReference::math_cos_double_function(isolate), 2479 0, 1); 2480 break; 2481 case TranscendentalCache::TAN: 2482 __ CallCFunction(ExternalReference::math_tan_double_function(isolate), 2483 0, 1); 2484 break; 2485 case TranscendentalCache::LOG: 2486 __ CallCFunction(ExternalReference::math_log_double_function(isolate), 2487 0, 1); 2488 break; 2489 default: 2490 UNIMPLEMENTED(); 2491 break; 2492 } 2493 __ pop(lr); 2494 } 2495 2496 2497 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 2498 switch (type_) { 2499 // Add more cases when necessary. 2500 case TranscendentalCache::SIN: return Runtime::kMath_sin; 2501 case TranscendentalCache::COS: return Runtime::kMath_cos; 2502 case TranscendentalCache::TAN: return Runtime::kMath_tan; 2503 case TranscendentalCache::LOG: return Runtime::kMath_log; 2504 default: 2505 UNIMPLEMENTED(); 2506 return Runtime::kAbort; 2507 } 2508 } 2509 2510 2511 void StackCheckStub::Generate(MacroAssembler* masm) { 2512 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); 2513 } 2514 2515 2516 void InterruptStub::Generate(MacroAssembler* masm) { 2517 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); 2518 } 2519 2520 2521 void MathPowStub::Generate(MacroAssembler* masm) { 2522 const Register base = r1; 2523 const Register exponent = r2; 2524 const Register heapnumbermap = r5; 2525 const Register heapnumber = r0; 2526 const DwVfpRegister double_base = d1; 2527 const DwVfpRegister double_exponent = d2; 2528 const DwVfpRegister double_result = d3; 2529 const DwVfpRegister double_scratch = d0; 2530 const SwVfpRegister single_scratch = s0; 2531 const Register scratch = r9; 2532 const Register scratch2 = r7; 2533 2534 Label call_runtime, done, int_exponent; 2535 if (exponent_type_ == ON_STACK) { 2536 Label base_is_smi, unpack_exponent; 2537 // The exponent and base are supplied as arguments on the stack. 2538 // This can only happen if the stub is called from non-optimized code. 2539 // Load input parameters from stack to double registers. 2540 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); 2541 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); 2542 2543 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); 2544 2545 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); 2546 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); 2547 __ cmp(scratch, heapnumbermap); 2548 __ b(ne, &call_runtime); 2549 2550 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); 2551 __ jmp(&unpack_exponent); 2552 2553 __ bind(&base_is_smi); 2554 __ vmov(single_scratch, scratch); 2555 __ vcvt_f64_s32(double_base, single_scratch); 2556 __ bind(&unpack_exponent); 2557 2558 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); 2559 2560 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); 2561 __ cmp(scratch, heapnumbermap); 2562 __ b(ne, &call_runtime); 2563 __ vldr(double_exponent, 2564 FieldMemOperand(exponent, HeapNumber::kValueOffset)); 2565 } else if (exponent_type_ == TAGGED) { 2566 // Base is already in double_base. 2567 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); 2568 2569 __ vldr(double_exponent, 2570 FieldMemOperand(exponent, HeapNumber::kValueOffset)); 2571 } 2572 2573 if (exponent_type_ != INTEGER) { 2574 Label int_exponent_convert; 2575 // Detect integer exponents stored as double. 2576 __ vcvt_u32_f64(single_scratch, double_exponent); 2577 // We do not check for NaN or Infinity here because comparing numbers on 2578 // ARM correctly distinguishes NaNs. We end up calling the built-in. 2579 __ vcvt_f64_u32(double_scratch, single_scratch); 2580 __ VFPCompareAndSetFlags(double_scratch, double_exponent); 2581 __ b(eq, &int_exponent_convert); 2582 2583 if (exponent_type_ == ON_STACK) { 2584 // Detect square root case. Crankshaft detects constant +/-0.5 at 2585 // compile time and uses DoMathPowHalf instead. We then skip this check 2586 // for non-constant cases of +/-0.5 as these hardly occur. 2587 Label not_plus_half; 2588 2589 // Test for 0.5. 2590 __ vmov(double_scratch, 0.5, scratch); 2591 __ VFPCompareAndSetFlags(double_exponent, double_scratch); 2592 __ b(ne, ¬_plus_half); 2593 2594 // Calculates square root of base. Check for the special case of 2595 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). 2596 __ vmov(double_scratch, -V8_INFINITY, scratch); 2597 __ VFPCompareAndSetFlags(double_base, double_scratch); 2598 __ vneg(double_result, double_scratch, eq); 2599 __ b(eq, &done); 2600 2601 // Add +0 to convert -0 to +0. 2602 __ vadd(double_scratch, double_base, kDoubleRegZero); 2603 __ vsqrt(double_result, double_scratch); 2604 __ jmp(&done); 2605 2606 __ bind(¬_plus_half); 2607 __ vmov(double_scratch, -0.5, scratch); 2608 __ VFPCompareAndSetFlags(double_exponent, double_scratch); 2609 __ b(ne, &call_runtime); 2610 2611 // Calculates square root of base. Check for the special case of 2612 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). 2613 __ vmov(double_scratch, -V8_INFINITY, scratch); 2614 __ VFPCompareAndSetFlags(double_base, double_scratch); 2615 __ vmov(double_result, kDoubleRegZero, eq); 2616 __ b(eq, &done); 2617 2618 // Add +0 to convert -0 to +0. 2619 __ vadd(double_scratch, double_base, kDoubleRegZero); 2620 __ vmov(double_result, 1.0, scratch); 2621 __ vsqrt(double_scratch, double_scratch); 2622 __ vdiv(double_result, double_result, double_scratch); 2623 __ jmp(&done); 2624 } 2625 2626 __ push(lr); 2627 { 2628 AllowExternalCallThatCantCauseGC scope(masm); 2629 __ PrepareCallCFunction(0, 2, scratch); 2630 __ SetCallCDoubleArguments(double_base, double_exponent); 2631 __ CallCFunction( 2632 ExternalReference::power_double_double_function(masm->isolate()), 2633 0, 2); 2634 } 2635 __ pop(lr); 2636 __ GetCFunctionDoubleResult(double_result); 2637 __ jmp(&done); 2638 2639 __ bind(&int_exponent_convert); 2640 __ vcvt_u32_f64(single_scratch, double_exponent); 2641 __ vmov(scratch, single_scratch); 2642 } 2643 2644 // Calculate power with integer exponent. 2645 __ bind(&int_exponent); 2646 2647 // Get two copies of exponent in the registers scratch and exponent. 2648 if (exponent_type_ == INTEGER) { 2649 __ mov(scratch, exponent); 2650 } else { 2651 // Exponent has previously been stored into scratch as untagged integer. 2652 __ mov(exponent, scratch); 2653 } 2654 __ vmov(double_scratch, double_base); // Back up base. 2655 __ vmov(double_result, 1.0, scratch2); 2656 2657 // Get absolute value of exponent. 2658 __ cmp(scratch, Operand::Zero()); 2659 __ mov(scratch2, Operand::Zero(), LeaveCC, mi); 2660 __ sub(scratch, scratch2, scratch, LeaveCC, mi); 2661 2662 Label while_true; 2663 __ bind(&while_true); 2664 __ mov(scratch, Operand(scratch, ASR, 1), SetCC); 2665 __ vmul(double_result, double_result, double_scratch, cs); 2666 __ vmul(double_scratch, double_scratch, double_scratch, ne); 2667 __ b(ne, &while_true); 2668 2669 __ cmp(exponent, Operand::Zero()); 2670 __ b(ge, &done); 2671 __ vmov(double_scratch, 1.0, scratch); 2672 __ vdiv(double_result, double_scratch, double_result); 2673 // Test whether result is zero. Bail out to check for subnormal result. 2674 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. 2675 __ VFPCompareAndSetFlags(double_result, 0.0); 2676 __ b(ne, &done); 2677 // double_exponent may not containe the exponent value if the input was a 2678 // smi. We set it with exponent value before bailing out. 2679 __ vmov(single_scratch, exponent); 2680 __ vcvt_f64_s32(double_exponent, single_scratch); 2681 2682 // Returning or bailing out. 2683 Counters* counters = masm->isolate()->counters(); 2684 if (exponent_type_ == ON_STACK) { 2685 // The arguments are still on the stack. 2686 __ bind(&call_runtime); 2687 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); 2688 2689 // The stub is called from non-optimized code, which expects the result 2690 // as heap number in exponent. 2691 __ bind(&done); 2692 __ AllocateHeapNumber( 2693 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); 2694 __ vstr(double_result, 2695 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); 2696 ASSERT(heapnumber.is(r0)); 2697 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); 2698 __ Ret(2); 2699 } else { 2700 __ push(lr); 2701 { 2702 AllowExternalCallThatCantCauseGC scope(masm); 2703 __ PrepareCallCFunction(0, 2, scratch); 2704 __ SetCallCDoubleArguments(double_base, double_exponent); 2705 __ CallCFunction( 2706 ExternalReference::power_double_double_function(masm->isolate()), 2707 0, 2); 2708 } 2709 __ pop(lr); 2710 __ GetCFunctionDoubleResult(double_result); 2711 2712 __ bind(&done); 2713 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); 2714 __ Ret(); 2715 } 2716 } 2717 2718 2719 bool CEntryStub::NeedsImmovableCode() { 2720 return true; 2721 } 2722 2723 2724 bool CEntryStub::IsPregenerated() { 2725 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && 2726 result_size_ == 1; 2727 } 2728 2729 2730 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 2731 CEntryStub::GenerateAheadOfTime(isolate); 2732 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); 2733 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 2734 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 2735 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 2736 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 2737 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 2738 } 2739 2740 2741 void CodeStub::GenerateFPStubs(Isolate* isolate) { 2742 SaveFPRegsMode mode = kSaveFPRegs; 2743 CEntryStub save_doubles(1, mode); 2744 StoreBufferOverflowStub stub(mode); 2745 // These stubs might already be in the snapshot, detect that and don't 2746 // regenerate, which would lead to code stub initialization state being messed 2747 // up. 2748 Code* save_doubles_code; 2749 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { 2750 save_doubles_code = *save_doubles.GetCode(isolate); 2751 } 2752 Code* store_buffer_overflow_code; 2753 if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) { 2754 store_buffer_overflow_code = *stub.GetCode(isolate); 2755 } 2756 save_doubles_code->set_is_pregenerated(true); 2757 store_buffer_overflow_code->set_is_pregenerated(true); 2758 isolate->set_fp_stubs_generated(true); 2759 } 2760 2761 2762 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { 2763 CEntryStub stub(1, kDontSaveFPRegs); 2764 Handle<Code> code = stub.GetCode(isolate); 2765 code->set_is_pregenerated(true); 2766 } 2767 2768 2769 static void JumpIfOOM(MacroAssembler* masm, 2770 Register value, 2771 Register scratch, 2772 Label* oom_label) { 2773 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); 2774 STATIC_ASSERT(kFailureTag == 3); 2775 __ and_(scratch, value, Operand(0xf)); 2776 __ cmp(scratch, Operand(0xf)); 2777 __ b(eq, oom_label); 2778 } 2779 2780 2781 void CEntryStub::GenerateCore(MacroAssembler* masm, 2782 Label* throw_normal_exception, 2783 Label* throw_termination_exception, 2784 Label* throw_out_of_memory_exception, 2785 bool do_gc, 2786 bool always_allocate) { 2787 // r0: result parameter for PerformGC, if any 2788 // r4: number of arguments including receiver (C callee-saved) 2789 // r5: pointer to builtin function (C callee-saved) 2790 // r6: pointer to the first argument (C callee-saved) 2791 Isolate* isolate = masm->isolate(); 2792 2793 if (do_gc) { 2794 // Passing r0. 2795 __ PrepareCallCFunction(1, 0, r1); 2796 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2797 1, 0); 2798 } 2799 2800 ExternalReference scope_depth = 2801 ExternalReference::heap_always_allocate_scope_depth(isolate); 2802 if (always_allocate) { 2803 __ mov(r0, Operand(scope_depth)); 2804 __ ldr(r1, MemOperand(r0)); 2805 __ add(r1, r1, Operand(1)); 2806 __ str(r1, MemOperand(r0)); 2807 } 2808 2809 // Call C built-in. 2810 // r0 = argc, r1 = argv 2811 __ mov(r0, Operand(r4)); 2812 __ mov(r1, Operand(r6)); 2813 2814 #if V8_HOST_ARCH_ARM 2815 int frame_alignment = MacroAssembler::ActivationFrameAlignment(); 2816 int frame_alignment_mask = frame_alignment - 1; 2817 if (FLAG_debug_code) { 2818 if (frame_alignment > kPointerSize) { 2819 Label alignment_as_expected; 2820 ASSERT(IsPowerOf2(frame_alignment)); 2821 __ tst(sp, Operand(frame_alignment_mask)); 2822 __ b(eq, &alignment_as_expected); 2823 // Don't use Check here, as it will call Runtime_Abort re-entering here. 2824 __ stop("Unexpected alignment"); 2825 __ bind(&alignment_as_expected); 2826 } 2827 } 2828 #endif 2829 2830 __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); 2831 2832 // To let the GC traverse the return address of the exit frames, we need to 2833 // know where the return address is. The CEntryStub is unmovable, so 2834 // we can store the address on the stack to be able to find it again and 2835 // we never have to restore it, because it will not change. 2836 // Compute the return address in lr to return to after the jump below. Pc is 2837 // already at '+ 8' from the current instruction but return is after three 2838 // instructions so add another 4 to pc to get the return address. 2839 { 2840 // Prevent literal pool emission before return address. 2841 Assembler::BlockConstPoolScope block_const_pool(masm); 2842 masm->add(lr, pc, Operand(4)); 2843 __ str(lr, MemOperand(sp, 0)); 2844 masm->Jump(r5); 2845 } 2846 2847 __ VFPEnsureFPSCRState(r2); 2848 2849 if (always_allocate) { 2850 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 2851 // though (contain the result). 2852 __ mov(r2, Operand(scope_depth)); 2853 __ ldr(r3, MemOperand(r2)); 2854 __ sub(r3, r3, Operand(1)); 2855 __ str(r3, MemOperand(r2)); 2856 } 2857 2858 // check for failure result 2859 Label failure_returned; 2860 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); 2861 // Lower 2 bits of r2 are 0 iff r0 has failure tag. 2862 __ add(r2, r0, Operand(1)); 2863 __ tst(r2, Operand(kFailureTagMask)); 2864 __ b(eq, &failure_returned); 2865 2866 // Exit C frame and return. 2867 // r0:r1: result 2868 // sp: stack pointer 2869 // fp: frame pointer 2870 // Callee-saved register r4 still holds argc. 2871 __ LeaveExitFrame(save_doubles_, r4); 2872 __ mov(pc, lr); 2873 2874 // check if we should retry or throw exception 2875 Label retry; 2876 __ bind(&failure_returned); 2877 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); 2878 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); 2879 __ b(eq, &retry); 2880 2881 // Special handling of out of memory exceptions. 2882 JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); 2883 2884 // Retrieve the pending exception. 2885 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 2886 isolate))); 2887 __ ldr(r0, MemOperand(ip)); 2888 2889 // See if we just retrieved an OOM exception. 2890 JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); 2891 2892 // Clear the pending exception. 2893 __ mov(r3, Operand(isolate->factory()->the_hole_value())); 2894 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 2895 isolate))); 2896 __ str(r3, MemOperand(ip)); 2897 2898 // Special handling of termination exceptions which are uncatchable 2899 // by javascript code. 2900 __ cmp(r0, Operand(isolate->factory()->termination_exception())); 2901 __ b(eq, throw_termination_exception); 2902 2903 // Handle normal exception. 2904 __ jmp(throw_normal_exception); 2905 2906 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying 2907 } 2908 2909 2910 void CEntryStub::Generate(MacroAssembler* masm) { 2911 // Called from JavaScript; parameters are on stack as if calling JS function 2912 // r0: number of arguments including receiver 2913 // r1: pointer to builtin function 2914 // fp: frame pointer (restored after C call) 2915 // sp: stack pointer (restored as callee's sp after C call) 2916 // cp: current context (C callee-saved) 2917 2918 ProfileEntryHookStub::MaybeCallEntryHook(masm); 2919 2920 // Result returned in r0 or r0+r1 by default. 2921 2922 // NOTE: Invocations of builtins may return failure objects 2923 // instead of a proper result. The builtin entry handles 2924 // this by performing a garbage collection and retrying the 2925 // builtin once. 2926 2927 // Compute the argv pointer in a callee-saved register. 2928 __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); 2929 __ sub(r6, r6, Operand(kPointerSize)); 2930 2931 // Enter the exit frame that transitions from JavaScript to C++. 2932 FrameScope scope(masm, StackFrame::MANUAL); 2933 __ EnterExitFrame(save_doubles_); 2934 2935 // Set up argc and the builtin function in callee-saved registers. 2936 __ mov(r4, Operand(r0)); 2937 __ mov(r5, Operand(r1)); 2938 2939 // r4: number of arguments (C callee-saved) 2940 // r5: pointer to builtin function (C callee-saved) 2941 // r6: pointer to first argument (C callee-saved) 2942 2943 Label throw_normal_exception; 2944 Label throw_termination_exception; 2945 Label throw_out_of_memory_exception; 2946 2947 // Call into the runtime system. 2948 GenerateCore(masm, 2949 &throw_normal_exception, 2950 &throw_termination_exception, 2951 &throw_out_of_memory_exception, 2952 false, 2953 false); 2954 2955 // Do space-specific GC and retry runtime call. 2956 GenerateCore(masm, 2957 &throw_normal_exception, 2958 &throw_termination_exception, 2959 &throw_out_of_memory_exception, 2960 true, 2961 false); 2962 2963 // Do full GC and retry runtime call one final time. 2964 Failure* failure = Failure::InternalError(); 2965 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); 2966 GenerateCore(masm, 2967 &throw_normal_exception, 2968 &throw_termination_exception, 2969 &throw_out_of_memory_exception, 2970 true, 2971 true); 2972 2973 __ bind(&throw_out_of_memory_exception); 2974 // Set external caught exception to false. 2975 Isolate* isolate = masm->isolate(); 2976 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, 2977 isolate); 2978 __ mov(r0, Operand(false, RelocInfo::NONE32)); 2979 __ mov(r2, Operand(external_caught)); 2980 __ str(r0, MemOperand(r2)); 2981 2982 // Set pending exception and r0 to out of memory exception. 2983 Label already_have_failure; 2984 JumpIfOOM(masm, r0, ip, &already_have_failure); 2985 Failure* out_of_memory = Failure::OutOfMemoryException(0x1); 2986 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); 2987 __ bind(&already_have_failure); 2988 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 2989 isolate))); 2990 __ str(r0, MemOperand(r2)); 2991 // Fall through to the next label. 2992 2993 __ bind(&throw_termination_exception); 2994 __ ThrowUncatchable(r0); 2995 2996 __ bind(&throw_normal_exception); 2997 __ Throw(r0); 2998 } 2999 3000 3001 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { 3002 // r0: code entry 3003 // r1: function 3004 // r2: receiver 3005 // r3: argc 3006 // [sp+0]: argv 3007 3008 Label invoke, handler_entry, exit; 3009 3010 ProfileEntryHookStub::MaybeCallEntryHook(masm); 3011 3012 // Called from C, so do not pop argc and args on exit (preserve sp) 3013 // No need to save register-passed args 3014 // Save callee-saved registers (incl. cp and fp), sp, and lr 3015 __ stm(db_w, sp, kCalleeSaved | lr.bit()); 3016 3017 // Save callee-saved vfp registers. 3018 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); 3019 // Set up the reserved register for 0.0. 3020 __ vmov(kDoubleRegZero, 0.0); 3021 __ VFPEnsureFPSCRState(r4); 3022 3023 // Get address of argv, see stm above. 3024 // r0: code entry 3025 // r1: function 3026 // r2: receiver 3027 // r3: argc 3028 3029 // Set up argv in r4. 3030 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; 3031 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; 3032 __ ldr(r4, MemOperand(sp, offset_to_argv)); 3033 3034 // Push a frame with special values setup to mark it as an entry frame. 3035 // r0: code entry 3036 // r1: function 3037 // r2: receiver 3038 // r3: argc 3039 // r4: argv 3040 Isolate* isolate = masm->isolate(); 3041 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. 3042 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; 3043 __ mov(r7, Operand(Smi::FromInt(marker))); 3044 __ mov(r6, Operand(Smi::FromInt(marker))); 3045 __ mov(r5, 3046 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); 3047 __ ldr(r5, MemOperand(r5)); 3048 __ Push(r8, r7, r6, r5); 3049 3050 // Set up frame pointer for the frame to be pushed. 3051 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); 3052 3053 // If this is the outermost JS call, set js_entry_sp value. 3054 Label non_outermost_js; 3055 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); 3056 __ mov(r5, Operand(ExternalReference(js_entry_sp))); 3057 __ ldr(r6, MemOperand(r5)); 3058 __ cmp(r6, Operand::Zero()); 3059 __ b(ne, &non_outermost_js); 3060 __ str(fp, MemOperand(r5)); 3061 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); 3062 Label cont; 3063 __ b(&cont); 3064 __ bind(&non_outermost_js); 3065 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); 3066 __ bind(&cont); 3067 __ push(ip); 3068 3069 // Jump to a faked try block that does the invoke, with a faked catch 3070 // block that sets the pending exception. 3071 __ jmp(&invoke); 3072 3073 // Block literal pool emission whilst taking the position of the handler 3074 // entry. This avoids making the assumption that literal pools are always 3075 // emitted after an instruction is emitted, rather than before. 3076 { 3077 Assembler::BlockConstPoolScope block_const_pool(masm); 3078 __ bind(&handler_entry); 3079 handler_offset_ = handler_entry.pos(); 3080 // Caught exception: Store result (exception) in the pending exception 3081 // field in the JSEnv and return a failure sentinel. Coming in here the 3082 // fp will be invalid because the PushTryHandler below sets it to 0 to 3083 // signal the existence of the JSEntry frame. 3084 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 3085 isolate))); 3086 } 3087 __ str(r0, MemOperand(ip)); 3088 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); 3089 __ b(&exit); 3090 3091 // Invoke: Link this frame into the handler chain. There's only one 3092 // handler block in this code object, so its index is 0. 3093 __ bind(&invoke); 3094 // Must preserve r0-r4, r5-r7 are available. 3095 __ PushTryHandler(StackHandler::JS_ENTRY, 0); 3096 // If an exception not caught by another handler occurs, this handler 3097 // returns control to the code after the bl(&invoke) above, which 3098 // restores all kCalleeSaved registers (including cp and fp) to their 3099 // saved values before returning a failure to C. 3100 3101 // Clear any pending exceptions. 3102 __ mov(r5, Operand(isolate->factory()->the_hole_value())); 3103 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 3104 isolate))); 3105 __ str(r5, MemOperand(ip)); 3106 3107 // Invoke the function by calling through JS entry trampoline builtin. 3108 // Notice that we cannot store a reference to the trampoline code directly in 3109 // this stub, because runtime stubs are not traversed when doing GC. 3110 3111 // Expected registers by Builtins::JSEntryTrampoline 3112 // r0: code entry 3113 // r1: function 3114 // r2: receiver 3115 // r3: argc 3116 // r4: argv 3117 if (is_construct) { 3118 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, 3119 isolate); 3120 __ mov(ip, Operand(construct_entry)); 3121 } else { 3122 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); 3123 __ mov(ip, Operand(entry)); 3124 } 3125 __ ldr(ip, MemOperand(ip)); // deref address 3126 3127 // Branch and link to JSEntryTrampoline. We don't use the double underscore 3128 // macro for the add instruction because we don't want the coverage tool 3129 // inserting instructions here after we read the pc. We block literal pool 3130 // emission for the same reason. 3131 { 3132 Assembler::BlockConstPoolScope block_const_pool(masm); 3133 __ mov(lr, Operand(pc)); 3134 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); 3135 } 3136 3137 // Unlink this frame from the handler chain. 3138 __ PopTryHandler(); 3139 3140 __ bind(&exit); // r0 holds result 3141 // Check if the current stack frame is marked as the outermost JS frame. 3142 Label non_outermost_js_2; 3143 __ pop(r5); 3144 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); 3145 __ b(ne, &non_outermost_js_2); 3146 __ mov(r6, Operand::Zero()); 3147 __ mov(r5, Operand(ExternalReference(js_entry_sp))); 3148 __ str(r6, MemOperand(r5)); 3149 __ bind(&non_outermost_js_2); 3150 3151 // Restore the top frame descriptors from the stack. 3152 __ pop(r3); 3153 __ mov(ip, 3154 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); 3155 __ str(r3, MemOperand(ip)); 3156 3157 // Reset the stack to the callee saved registers. 3158 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); 3159 3160 // Restore callee-saved registers and return. 3161 #ifdef DEBUG 3162 if (FLAG_debug_code) { 3163 __ mov(lr, Operand(pc)); 3164 } 3165 #endif 3166 3167 // Restore callee-saved vfp registers. 3168 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); 3169 3170 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); 3171 } 3172 3173 3174 // Uses registers r0 to r4. 3175 // Expected input (depending on whether args are in registers or on the stack): 3176 // * object: r0 or at sp + 1 * kPointerSize. 3177 // * function: r1 or at sp. 3178 // 3179 // An inlined call site may have been generated before calling this stub. 3180 // In this case the offset to the inline site to patch is passed on the stack, 3181 // in the safepoint slot for register r4. 3182 // (See LCodeGen::DoInstanceOfKnownGlobal) 3183 void InstanceofStub::Generate(MacroAssembler* masm) { 3184 // Call site inlining and patching implies arguments in registers. 3185 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); 3186 // ReturnTrueFalse is only implemented for inlined call sites. 3187 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); 3188 3189 // Fixed register usage throughout the stub: 3190 const Register object = r0; // Object (lhs). 3191 Register map = r3; // Map of the object. 3192 const Register function = r1; // Function (rhs). 3193 const Register prototype = r4; // Prototype of the function. 3194 const Register inline_site = r9; 3195 const Register scratch = r2; 3196 3197 const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; 3198 3199 Label slow, loop, is_instance, is_not_instance, not_js_object; 3200 3201 if (!HasArgsInRegisters()) { 3202 __ ldr(object, MemOperand(sp, 1 * kPointerSize)); 3203 __ ldr(function, MemOperand(sp, 0)); 3204 } 3205 3206 // Check that the left hand is a JS object and load map. 3207 __ JumpIfSmi(object, ¬_js_object); 3208 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); 3209 3210 // If there is a call site cache don't look in the global cache, but do the 3211 // real lookup and update the call site cache. 3212 if (!HasCallSiteInlineCheck()) { 3213 Label miss; 3214 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); 3215 __ b(ne, &miss); 3216 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); 3217 __ b(ne, &miss); 3218 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); 3219 __ Ret(HasArgsInRegisters() ? 0 : 2); 3220 3221 __ bind(&miss); 3222 } 3223 3224 // Get the prototype of the function. 3225 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); 3226 3227 // Check that the function prototype is a JS object. 3228 __ JumpIfSmi(prototype, &slow); 3229 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); 3230 3231 // Update the global instanceof or call site inlined cache with the current 3232 // map and function. The cached answer will be set when it is known below. 3233 if (!HasCallSiteInlineCheck()) { 3234 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); 3235 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); 3236 } else { 3237 ASSERT(HasArgsInRegisters()); 3238 // Patch the (relocated) inlined map check. 3239 3240 // The offset was stored in r4 safepoint slot. 3241 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) 3242 __ LoadFromSafepointRegisterSlot(scratch, r4); 3243 __ sub(inline_site, lr, scratch); 3244 // Get the map location in scratch and patch it. 3245 __ GetRelocatedValueLocation(inline_site, scratch); 3246 __ ldr(scratch, MemOperand(scratch)); 3247 __ str(map, FieldMemOperand(scratch, Cell::kValueOffset)); 3248 } 3249 3250 // Register mapping: r3 is object map and r4 is function prototype. 3251 // Get prototype of object into r2. 3252 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); 3253 3254 // We don't need map any more. Use it as a scratch register. 3255 Register scratch2 = map; 3256 map = no_reg; 3257 3258 // Loop through the prototype chain looking for the function prototype. 3259 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); 3260 __ bind(&loop); 3261 __ cmp(scratch, Operand(prototype)); 3262 __ b(eq, &is_instance); 3263 __ cmp(scratch, scratch2); 3264 __ b(eq, &is_not_instance); 3265 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); 3266 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); 3267 __ jmp(&loop); 3268 3269 __ bind(&is_instance); 3270 if (!HasCallSiteInlineCheck()) { 3271 __ mov(r0, Operand(Smi::FromInt(0))); 3272 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); 3273 } else { 3274 // Patch the call site to return true. 3275 __ LoadRoot(r0, Heap::kTrueValueRootIndex); 3276 __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); 3277 // Get the boolean result location in scratch and patch it. 3278 __ GetRelocatedValueLocation(inline_site, scratch); 3279 __ str(r0, MemOperand(scratch)); 3280 3281 if (!ReturnTrueFalseObject()) { 3282 __ mov(r0, Operand(Smi::FromInt(0))); 3283 } 3284 } 3285 __ Ret(HasArgsInRegisters() ? 0 : 2); 3286 3287 __ bind(&is_not_instance); 3288 if (!HasCallSiteInlineCheck()) { 3289 __ mov(r0, Operand(Smi::FromInt(1))); 3290 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); 3291 } else { 3292 // Patch the call site to return false. 3293 __ LoadRoot(r0, Heap::kFalseValueRootIndex); 3294 __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); 3295 // Get the boolean result location in scratch and patch it. 3296 __ GetRelocatedValueLocation(inline_site, scratch); 3297 __ str(r0, MemOperand(scratch)); 3298 3299 if (!ReturnTrueFalseObject()) { 3300 __ mov(r0, Operand(Smi::FromInt(1))); 3301 } 3302 } 3303 __ Ret(HasArgsInRegisters() ? 0 : 2); 3304 3305 Label object_not_null, object_not_null_or_smi; 3306 __ bind(¬_js_object); 3307 // Before null, smi and string value checks, check that the rhs is a function 3308 // as for a non-function rhs an exception needs to be thrown. 3309 __ JumpIfSmi(function, &slow); 3310 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); 3311 __ b(ne, &slow); 3312 3313 // Null is not instance of anything. 3314 __ cmp(scratch, Operand(masm->isolate()->factory()->null_value())); 3315 __ b(ne, &object_not_null); 3316 __ mov(r0, Operand(Smi::FromInt(1))); 3317 __ Ret(HasArgsInRegisters() ? 0 : 2); 3318 3319 __ bind(&object_not_null); 3320 // Smi values are not instances of anything. 3321 __ JumpIfNotSmi(object, &object_not_null_or_smi); 3322 __ mov(r0, Operand(Smi::FromInt(1))); 3323 __ Ret(HasArgsInRegisters() ? 0 : 2); 3324 3325 __ bind(&object_not_null_or_smi); 3326 // String values are not instances of anything. 3327 __ IsObjectJSStringType(object, scratch, &slow); 3328 __ mov(r0, Operand(Smi::FromInt(1))); 3329 __ Ret(HasArgsInRegisters() ? 0 : 2); 3330 3331 // Slow-case. Tail call builtin. 3332 __ bind(&slow); 3333 if (!ReturnTrueFalseObject()) { 3334 if (HasArgsInRegisters()) { 3335 __ Push(r0, r1); 3336 } 3337 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 3338 } else { 3339 { 3340 FrameScope scope(masm, StackFrame::INTERNAL); 3341 __ Push(r0, r1); 3342 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); 3343 } 3344 __ cmp(r0, Operand::Zero()); 3345 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); 3346 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); 3347 __ Ret(HasArgsInRegisters() ? 0 : 2); 3348 } 3349 } 3350 3351 3352 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { 3353 Label miss; 3354 Register receiver; 3355 if (kind() == Code::KEYED_LOAD_IC) { 3356 // ----------- S t a t e ------------- 3357 // -- lr : return address 3358 // -- r0 : key 3359 // -- r1 : receiver 3360 // ----------------------------------- 3361 __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string())); 3362 __ b(ne, &miss); 3363 receiver = r1; 3364 } else { 3365 ASSERT(kind() == Code::LOAD_IC); 3366 // ----------- S t a t e ------------- 3367 // -- r2 : name 3368 // -- lr : return address 3369 // -- r0 : receiver 3370 // -- sp[0] : receiver 3371 // ----------------------------------- 3372 receiver = r0; 3373 } 3374 3375 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss); 3376 __ bind(&miss); 3377 StubCompiler::TailCallBuiltin( 3378 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); 3379 } 3380 3381 3382 void StringLengthStub::Generate(MacroAssembler* masm) { 3383 Label miss; 3384 Register receiver; 3385 if (kind() == Code::KEYED_LOAD_IC) { 3386 // ----------- S t a t e ------------- 3387 // -- lr : return address 3388 // -- r0 : key 3389 // -- r1 : receiver 3390 // ----------------------------------- 3391 __ cmp(r0, Operand(masm->isolate()->factory()->length_string())); 3392 __ b(ne, &miss); 3393 receiver = r1; 3394 } else { 3395 ASSERT(kind() == Code::LOAD_IC); 3396 // ----------- S t a t e ------------- 3397 // -- r2 : name 3398 // -- lr : return address 3399 // -- r0 : receiver 3400 // -- sp[0] : receiver 3401 // ----------------------------------- 3402 receiver = r0; 3403 } 3404 3405 StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss, 3406 support_wrapper_); 3407 3408 __ bind(&miss); 3409 StubCompiler::TailCallBuiltin( 3410 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); 3411 } 3412 3413 3414 void StoreArrayLengthStub::Generate(MacroAssembler* masm) { 3415 // This accepts as a receiver anything JSArray::SetElementsLength accepts 3416 // (currently anything except for external arrays which means anything with 3417 // elements of FixedArray type). Value must be a number, but only smis are 3418 // accepted as the most common case. 3419 Label miss; 3420 3421 Register receiver; 3422 Register value; 3423 if (kind() == Code::KEYED_STORE_IC) { 3424 // ----------- S t a t e ------------- 3425 // -- lr : return address 3426 // -- r0 : value 3427 // -- r1 : key 3428 // -- r2 : receiver 3429 // ----------------------------------- 3430 __ cmp(r1, Operand(masm->isolate()->factory()->length_string())); 3431 __ b(ne, &miss); 3432 receiver = r2; 3433 value = r0; 3434 } else { 3435 ASSERT(kind() == Code::STORE_IC); 3436 // ----------- S t a t e ------------- 3437 // -- lr : return address 3438 // -- r0 : value 3439 // -- r1 : receiver 3440 // -- r2 : key 3441 // ----------------------------------- 3442 receiver = r1; 3443 value = r0; 3444 } 3445 Register scratch = r3; 3446 3447 // Check that the receiver isn't a smi. 3448 __ JumpIfSmi(receiver, &miss); 3449 3450 // Check that the object is a JS array. 3451 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); 3452 __ b(ne, &miss); 3453 3454 // Check that elements are FixedArray. 3455 // We rely on StoreIC_ArrayLength below to deal with all types of 3456 // fast elements (including COW). 3457 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); 3458 __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); 3459 __ b(ne, &miss); 3460 3461 // Check that the array has fast properties, otherwise the length 3462 // property might have been redefined. 3463 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); 3464 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset)); 3465 __ CompareRoot(scratch, Heap::kHashTableMapRootIndex); 3466 __ b(eq, &miss); 3467 3468 // Check that value is a smi. 3469 __ JumpIfNotSmi(value, &miss); 3470 3471 // Prepare tail call to StoreIC_ArrayLength. 3472 __ Push(receiver, value); 3473 3474 ExternalReference ref = 3475 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); 3476 __ TailCallExternalReference(ref, 2, 1); 3477 3478 __ bind(&miss); 3479 3480 StubCompiler::TailCallBuiltin( 3481 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); 3482 } 3483 3484 3485 Register InstanceofStub::left() { return r0; } 3486 3487 3488 Register InstanceofStub::right() { return r1; } 3489 3490 3491 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 3492 // The displacement is the offset of the last parameter (if any) 3493 // relative to the frame pointer. 3494 const int kDisplacement = 3495 StandardFrameConstants::kCallerSPOffset - kPointerSize; 3496 3497 // Check that the key is a smi. 3498 Label slow; 3499 __ JumpIfNotSmi(r1, &slow); 3500 3501 // Check if the calling frame is an arguments adaptor frame. 3502 Label adaptor; 3503 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3504 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); 3505 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3506 __ b(eq, &adaptor); 3507 3508 // Check index against formal parameters count limit passed in 3509 // through register r0. Use unsigned comparison to get negative 3510 // check for free. 3511 __ cmp(r1, r0); 3512 __ b(hs, &slow); 3513 3514 // Read the argument from the stack and return it. 3515 __ sub(r3, r0, r1); 3516 __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3)); 3517 __ ldr(r0, MemOperand(r3, kDisplacement)); 3518 __ Jump(lr); 3519 3520 // Arguments adaptor case: Check index against actual arguments 3521 // limit found in the arguments adaptor frame. Use unsigned 3522 // comparison to get negative check for free. 3523 __ bind(&adaptor); 3524 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3525 __ cmp(r1, r0); 3526 __ b(cs, &slow); 3527 3528 // Read the argument from the adaptor frame and return it. 3529 __ sub(r3, r0, r1); 3530 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3)); 3531 __ ldr(r0, MemOperand(r3, kDisplacement)); 3532 __ Jump(lr); 3533 3534 // Slow-case: Handle non-smi or out-of-bounds access to arguments 3535 // by calling the runtime system. 3536 __ bind(&slow); 3537 __ push(r1); 3538 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); 3539 } 3540 3541 3542 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { 3543 // sp[0] : number of parameters 3544 // sp[4] : receiver displacement 3545 // sp[8] : function 3546 3547 // Check if the calling frame is an arguments adaptor frame. 3548 Label runtime; 3549 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3550 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); 3551 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3552 __ b(ne, &runtime); 3553 3554 // Patch the arguments.length and the parameters pointer in the current frame. 3555 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3556 __ str(r2, MemOperand(sp, 0 * kPointerSize)); 3557 __ add(r3, r3, Operand(r2, LSL, 1)); 3558 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); 3559 __ str(r3, MemOperand(sp, 1 * kPointerSize)); 3560 3561 __ bind(&runtime); 3562 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 3563 } 3564 3565 3566 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { 3567 // Stack layout: 3568 // sp[0] : number of parameters (tagged) 3569 // sp[4] : address of receiver argument 3570 // sp[8] : function 3571 // Registers used over whole function: 3572 // r6 : allocated object (tagged) 3573 // r9 : mapped parameter count (tagged) 3574 3575 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); 3576 // r1 = parameter count (tagged) 3577 3578 // Check if the calling frame is an arguments adaptor frame. 3579 Label runtime; 3580 Label adaptor_frame, try_allocate; 3581 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3582 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); 3583 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3584 __ b(eq, &adaptor_frame); 3585 3586 // No adaptor, parameter count = argument count. 3587 __ mov(r2, r1); 3588 __ b(&try_allocate); 3589 3590 // We have an adaptor frame. Patch the parameters pointer. 3591 __ bind(&adaptor_frame); 3592 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3593 __ add(r3, r3, Operand(r2, LSL, 1)); 3594 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); 3595 __ str(r3, MemOperand(sp, 1 * kPointerSize)); 3596 3597 // r1 = parameter count (tagged) 3598 // r2 = argument count (tagged) 3599 // Compute the mapped parameter count = min(r1, r2) in r1. 3600 __ cmp(r1, Operand(r2)); 3601 __ mov(r1, Operand(r2), LeaveCC, gt); 3602 3603 __ bind(&try_allocate); 3604 3605 // Compute the sizes of backing store, parameter map, and arguments object. 3606 // 1. Parameter map, has 2 extra words containing context and backing store. 3607 const int kParameterMapHeaderSize = 3608 FixedArray::kHeaderSize + 2 * kPointerSize; 3609 // If there are no mapped parameters, we do not need the parameter_map. 3610 __ cmp(r1, Operand(Smi::FromInt(0))); 3611 __ mov(r9, Operand::Zero(), LeaveCC, eq); 3612 __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne); 3613 __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne); 3614 3615 // 2. Backing store. 3616 __ add(r9, r9, Operand(r2, LSL, 1)); 3617 __ add(r9, r9, Operand(FixedArray::kHeaderSize)); 3618 3619 // 3. Arguments object. 3620 __ add(r9, r9, Operand(Heap::kArgumentsObjectSize)); 3621 3622 // Do the allocation of all three objects in one go. 3623 __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT); 3624 3625 // r0 = address of new object(s) (tagged) 3626 // r2 = argument count (tagged) 3627 // Get the arguments boilerplate from the current native context into r4. 3628 const int kNormalOffset = 3629 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); 3630 const int kAliasedOffset = 3631 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); 3632 3633 __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 3634 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); 3635 __ cmp(r1, Operand::Zero()); 3636 __ ldr(r4, MemOperand(r4, kNormalOffset), eq); 3637 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne); 3638 3639 // r0 = address of new object (tagged) 3640 // r1 = mapped parameter count (tagged) 3641 // r2 = argument count (tagged) 3642 // r4 = address of boilerplate object (tagged) 3643 // Copy the JS object part. 3644 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { 3645 __ ldr(r3, FieldMemOperand(r4, i)); 3646 __ str(r3, FieldMemOperand(r0, i)); 3647 } 3648 3649 // Set up the callee in-object property. 3650 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); 3651 __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); 3652 const int kCalleeOffset = JSObject::kHeaderSize + 3653 Heap::kArgumentsCalleeIndex * kPointerSize; 3654 __ str(r3, FieldMemOperand(r0, kCalleeOffset)); 3655 3656 // Use the length (smi tagged) and set that as an in-object property too. 3657 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 3658 const int kLengthOffset = JSObject::kHeaderSize + 3659 Heap::kArgumentsLengthIndex * kPointerSize; 3660 __ str(r2, FieldMemOperand(r0, kLengthOffset)); 3661 3662 // Set up the elements pointer in the allocated arguments object. 3663 // If we allocated a parameter map, r4 will point there, otherwise 3664 // it will point to the backing store. 3665 __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); 3666 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); 3667 3668 // r0 = address of new object (tagged) 3669 // r1 = mapped parameter count (tagged) 3670 // r2 = argument count (tagged) 3671 // r4 = address of parameter map or backing store (tagged) 3672 // Initialize parameter map. If there are no mapped arguments, we're done. 3673 Label skip_parameter_map; 3674 __ cmp(r1, Operand(Smi::FromInt(0))); 3675 // Move backing store address to r3, because it is 3676 // expected there when filling in the unmapped arguments. 3677 __ mov(r3, r4, LeaveCC, eq); 3678 __ b(eq, &skip_parameter_map); 3679 3680 __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex); 3681 __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset)); 3682 __ add(r6, r1, Operand(Smi::FromInt(2))); 3683 __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset)); 3684 __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize)); 3685 __ add(r6, r4, Operand(r1, LSL, 1)); 3686 __ add(r6, r6, Operand(kParameterMapHeaderSize)); 3687 __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize)); 3688 3689 // Copy the parameter slots and the holes in the arguments. 3690 // We need to fill in mapped_parameter_count slots. They index the context, 3691 // where parameters are stored in reverse order, at 3692 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 3693 // The mapped parameter thus need to get indices 3694 // MIN_CONTEXT_SLOTS+parameter_count-1 .. 3695 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count 3696 // We loop from right to left. 3697 Label parameters_loop, parameters_test; 3698 __ mov(r6, r1); 3699 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); 3700 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); 3701 __ sub(r9, r9, Operand(r1)); 3702 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); 3703 __ add(r3, r4, Operand(r6, LSL, 1)); 3704 __ add(r3, r3, Operand(kParameterMapHeaderSize)); 3705 3706 // r6 = loop variable (tagged) 3707 // r1 = mapping index (tagged) 3708 // r3 = address of backing store (tagged) 3709 // r4 = address of parameter map (tagged) 3710 // r5 = temporary scratch (a.o., for address calculation) 3711 // r7 = the hole value 3712 __ jmp(¶meters_test); 3713 3714 __ bind(¶meters_loop); 3715 __ sub(r6, r6, Operand(Smi::FromInt(1))); 3716 __ mov(r5, Operand(r6, LSL, 1)); 3717 __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); 3718 __ str(r9, MemOperand(r4, r5)); 3719 __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); 3720 __ str(r7, MemOperand(r3, r5)); 3721 __ add(r9, r9, Operand(Smi::FromInt(1))); 3722 __ bind(¶meters_test); 3723 __ cmp(r6, Operand(Smi::FromInt(0))); 3724 __ b(ne, ¶meters_loop); 3725 3726 __ bind(&skip_parameter_map); 3727 // r2 = argument count (tagged) 3728 // r3 = address of backing store (tagged) 3729 // r5 = scratch 3730 // Copy arguments header and remaining slots (if there are any). 3731 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); 3732 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); 3733 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); 3734 3735 Label arguments_loop, arguments_test; 3736 __ mov(r9, r1); 3737 __ ldr(r4, MemOperand(sp, 1 * kPointerSize)); 3738 __ sub(r4, r4, Operand(r9, LSL, 1)); 3739 __ jmp(&arguments_test); 3740 3741 __ bind(&arguments_loop); 3742 __ sub(r4, r4, Operand(kPointerSize)); 3743 __ ldr(r6, MemOperand(r4, 0)); 3744 __ add(r5, r3, Operand(r9, LSL, 1)); 3745 __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize)); 3746 __ add(r9, r9, Operand(Smi::FromInt(1))); 3747 3748 __ bind(&arguments_test); 3749 __ cmp(r9, Operand(r2)); 3750 __ b(lt, &arguments_loop); 3751 3752 // Return and remove the on-stack parameters. 3753 __ add(sp, sp, Operand(3 * kPointerSize)); 3754 __ Ret(); 3755 3756 // Do the runtime call to allocate the arguments object. 3757 // r2 = argument count (tagged) 3758 __ bind(&runtime); 3759 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. 3760 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 3761 } 3762 3763 3764 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { 3765 // sp[0] : number of parameters 3766 // sp[4] : receiver displacement 3767 // sp[8] : function 3768 // Check if the calling frame is an arguments adaptor frame. 3769 Label adaptor_frame, try_allocate, runtime; 3770 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3771 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); 3772 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3773 __ b(eq, &adaptor_frame); 3774 3775 // Get the length from the frame. 3776 __ ldr(r1, MemOperand(sp, 0)); 3777 __ b(&try_allocate); 3778 3779 // Patch the arguments.length and the parameters pointer. 3780 __ bind(&adaptor_frame); 3781 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3782 __ str(r1, MemOperand(sp, 0)); 3783 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1)); 3784 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); 3785 __ str(r3, MemOperand(sp, 1 * kPointerSize)); 3786 3787 // Try the new space allocation. Start out with computing the size 3788 // of the arguments object and the elements array in words. 3789 Label add_arguments_object; 3790 __ bind(&try_allocate); 3791 __ SmiUntag(r1, SetCC); 3792 __ b(eq, &add_arguments_object); 3793 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); 3794 __ bind(&add_arguments_object); 3795 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); 3796 3797 // Do the allocation of both objects in one go. 3798 __ Allocate(r1, r0, r2, r3, &runtime, 3799 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 3800 3801 // Get the arguments boilerplate from the current native context. 3802 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 3803 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); 3804 __ ldr(r4, MemOperand(r4, Context::SlotOffset( 3805 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); 3806 3807 // Copy the JS object part. 3808 __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize); 3809 3810 // Get the length (smi tagged) and set that as an in-object property too. 3811 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 3812 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); 3813 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + 3814 Heap::kArgumentsLengthIndex * kPointerSize)); 3815 3816 // If there are no actual arguments, we're done. 3817 Label done; 3818 __ cmp(r1, Operand::Zero()); 3819 __ b(eq, &done); 3820 3821 // Get the parameters pointer from the stack. 3822 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); 3823 3824 // Set up the elements pointer in the allocated arguments object and 3825 // initialize the header in the elements fixed array. 3826 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); 3827 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); 3828 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); 3829 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); 3830 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); 3831 __ SmiUntag(r1); 3832 3833 // Copy the fixed array slots. 3834 Label loop; 3835 // Set up r4 to point to the first array slot. 3836 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 3837 __ bind(&loop); 3838 // Pre-decrement r2 with kPointerSize on each iteration. 3839 // Pre-decrement in order to skip receiver. 3840 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); 3841 // Post-increment r4 with kPointerSize on each iteration. 3842 __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); 3843 __ sub(r1, r1, Operand(1)); 3844 __ cmp(r1, Operand::Zero()); 3845 __ b(ne, &loop); 3846 3847 // Return and remove the on-stack parameters. 3848 __ bind(&done); 3849 __ add(sp, sp, Operand(3 * kPointerSize)); 3850 __ Ret(); 3851 3852 // Do the runtime call to allocate the arguments object. 3853 __ bind(&runtime); 3854 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); 3855 } 3856 3857 3858 void RegExpExecStub::Generate(MacroAssembler* masm) { 3859 // Just jump directly to runtime if native RegExp is not selected at compile 3860 // time or if regexp entry in generated code is turned off runtime switch or 3861 // at compilation. 3862 #ifdef V8_INTERPRETED_REGEXP 3863 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 3864 #else // V8_INTERPRETED_REGEXP 3865 3866 // Stack frame on entry. 3867 // sp[0]: last_match_info (expected JSArray) 3868 // sp[4]: previous index 3869 // sp[8]: subject string 3870 // sp[12]: JSRegExp object 3871 3872 const int kLastMatchInfoOffset = 0 * kPointerSize; 3873 const int kPreviousIndexOffset = 1 * kPointerSize; 3874 const int kSubjectOffset = 2 * kPointerSize; 3875 const int kJSRegExpOffset = 3 * kPointerSize; 3876 3877 Label runtime; 3878 // Allocation of registers for this function. These are in callee save 3879 // registers and will be preserved by the call to the native RegExp code, as 3880 // this code is called using the normal C calling convention. When calling 3881 // directly from generated code the native RegExp code will not do a GC and 3882 // therefore the content of these registers are safe to use after the call. 3883 Register subject = r4; 3884 Register regexp_data = r5; 3885 Register last_match_info_elements = r6; 3886 3887 // Ensure that a RegExp stack is allocated. 3888 Isolate* isolate = masm->isolate(); 3889 ExternalReference address_of_regexp_stack_memory_address = 3890 ExternalReference::address_of_regexp_stack_memory_address(isolate); 3891 ExternalReference address_of_regexp_stack_memory_size = 3892 ExternalReference::address_of_regexp_stack_memory_size(isolate); 3893 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); 3894 __ ldr(r0, MemOperand(r0, 0)); 3895 __ cmp(r0, Operand::Zero()); 3896 __ b(eq, &runtime); 3897 3898 // Check that the first argument is a JSRegExp object. 3899 __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); 3900 __ JumpIfSmi(r0, &runtime); 3901 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); 3902 __ b(ne, &runtime); 3903 3904 // Check that the RegExp has been compiled (data contains a fixed array). 3905 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); 3906 if (FLAG_debug_code) { 3907 __ SmiTst(regexp_data); 3908 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); 3909 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); 3910 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); 3911 } 3912 3913 // regexp_data: RegExp data (FixedArray) 3914 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. 3915 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); 3916 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); 3917 __ b(ne, &runtime); 3918 3919 // regexp_data: RegExp data (FixedArray) 3920 // Check that the number of captures fit in the static offsets vector buffer. 3921 __ ldr(r2, 3922 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); 3923 // Check (number_of_captures + 1) * 2 <= offsets vector size 3924 // Or number_of_captures * 2 <= offsets vector size - 2 3925 // Multiplying by 2 comes for free since r2 is smi-tagged. 3926 STATIC_ASSERT(kSmiTag == 0); 3927 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 3928 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); 3929 __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); 3930 __ b(hi, &runtime); 3931 3932 // Reset offset for possibly sliced string. 3933 __ mov(r9, Operand::Zero()); 3934 __ ldr(subject, MemOperand(sp, kSubjectOffset)); 3935 __ JumpIfSmi(subject, &runtime); 3936 __ mov(r3, subject); // Make a copy of the original subject string. 3937 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); 3938 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); 3939 // subject: subject string 3940 // r3: subject string 3941 // r0: subject string instance type 3942 // regexp_data: RegExp data (FixedArray) 3943 // Handle subject string according to its encoding and representation: 3944 // (1) Sequential string? If yes, go to (5). 3945 // (2) Anything but sequential or cons? If yes, go to (6). 3946 // (3) Cons string. If the string is flat, replace subject with first string. 3947 // Otherwise bailout. 3948 // (4) Is subject external? If yes, go to (7). 3949 // (5) Sequential string. Load regexp code according to encoding. 3950 // (E) Carry on. 3951 /// [...] 3952 3953 // Deferred code at the end of the stub: 3954 // (6) Not a long external string? If yes, go to (8). 3955 // (7) External string. Make it, offset-wise, look like a sequential string. 3956 // Go to (5). 3957 // (8) Short external string or not a string? If yes, bail out to runtime. 3958 // (9) Sliced string. Replace subject with parent. Go to (4). 3959 3960 Label seq_string /* 5 */, external_string /* 7 */, 3961 check_underlying /* 4 */, not_seq_nor_cons /* 6 */, 3962 not_long_external /* 8 */; 3963 3964 // (1) Sequential string? If yes, go to (5). 3965 __ and_(r1, 3966 r0, 3967 Operand(kIsNotStringMask | 3968 kStringRepresentationMask | 3969 kShortExternalStringMask), 3970 SetCC); 3971 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); 3972 __ b(eq, &seq_string); // Go to (5). 3973 3974 // (2) Anything but sequential or cons? If yes, go to (6). 3975 STATIC_ASSERT(kConsStringTag < kExternalStringTag); 3976 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); 3977 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); 3978 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); 3979 __ cmp(r1, Operand(kExternalStringTag)); 3980 __ b(ge, ¬_seq_nor_cons); // Go to (6). 3981 3982 // (3) Cons string. Check that it's flat. 3983 // Replace subject with first string and reload instance type. 3984 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); 3985 __ CompareRoot(r0, Heap::kempty_stringRootIndex); 3986 __ b(ne, &runtime); 3987 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); 3988 3989 // (4) Is subject external? If yes, go to (7). 3990 __ bind(&check_underlying); 3991 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); 3992 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); 3993 STATIC_ASSERT(kSeqStringTag == 0); 3994 __ tst(r0, Operand(kStringRepresentationMask)); 3995 // The underlying external string is never a short external string. 3996 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); 3997 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); 3998 __ b(ne, &external_string); // Go to (7). 3999 4000 // (5) Sequential string. Load regexp code according to encoding. 4001 __ bind(&seq_string); 4002 // subject: sequential subject string (or look-alike, external string) 4003 // r3: original subject string 4004 // Load previous index and check range before r3 is overwritten. We have to 4005 // use r3 instead of subject here because subject might have been only made 4006 // to look like a sequential string when it actually is an external string. 4007 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); 4008 __ JumpIfNotSmi(r1, &runtime); 4009 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); 4010 __ cmp(r3, Operand(r1)); 4011 __ b(ls, &runtime); 4012 __ SmiUntag(r1); 4013 4014 STATIC_ASSERT(4 == kOneByteStringTag); 4015 STATIC_ASSERT(kTwoByteStringTag == 0); 4016 __ and_(r0, r0, Operand(kStringEncodingMask)); 4017 __ mov(r3, Operand(r0, ASR, 2), SetCC); 4018 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); 4019 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); 4020 4021 // (E) Carry on. String handling is done. 4022 // r7: irregexp code 4023 // Check that the irregexp code has been generated for the actual string 4024 // encoding. If it has, the field contains a code object otherwise it contains 4025 // a smi (code flushing support). 4026 __ JumpIfSmi(r7, &runtime); 4027 4028 // r1: previous index 4029 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); 4030 // r7: code 4031 // subject: Subject string 4032 // regexp_data: RegExp data (FixedArray) 4033 // All checks done. Now push arguments for native regexp code. 4034 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); 4035 4036 // Isolates: note we add an additional parameter here (isolate pointer). 4037 const int kRegExpExecuteArguments = 9; 4038 const int kParameterRegisters = 4; 4039 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); 4040 4041 // Stack pointer now points to cell where return address is to be written. 4042 // Arguments are before that on the stack or in registers. 4043 4044 // Argument 9 (sp[20]): Pass current isolate address. 4045 __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); 4046 __ str(r0, MemOperand(sp, 5 * kPointerSize)); 4047 4048 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. 4049 __ mov(r0, Operand(1)); 4050 __ str(r0, MemOperand(sp, 4 * kPointerSize)); 4051 4052 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. 4053 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); 4054 __ ldr(r0, MemOperand(r0, 0)); 4055 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); 4056 __ ldr(r2, MemOperand(r2, 0)); 4057 __ add(r0, r0, Operand(r2)); 4058 __ str(r0, MemOperand(sp, 3 * kPointerSize)); 4059 4060 // Argument 6: Set the number of capture registers to zero to force global 4061 // regexps to behave as non-global. This does not affect non-global regexps. 4062 __ mov(r0, Operand::Zero()); 4063 __ str(r0, MemOperand(sp, 2 * kPointerSize)); 4064 4065 // Argument 5 (sp[4]): static offsets vector buffer. 4066 __ mov(r0, 4067 Operand(ExternalReference::address_of_static_offsets_vector(isolate))); 4068 __ str(r0, MemOperand(sp, 1 * kPointerSize)); 4069 4070 // For arguments 4 and 3 get string length, calculate start of string data and 4071 // calculate the shift of the index (0 for ASCII and 1 for two byte). 4072 __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); 4073 __ eor(r3, r3, Operand(1)); 4074 // Load the length from the original subject string from the previous stack 4075 // frame. Therefore we have to use fp, which points exactly to two pointer 4076 // sizes below the previous sp. (Because creating a new stack frame pushes 4077 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) 4078 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); 4079 // If slice offset is not 0, load the length from the original sliced string. 4080 // Argument 4, r3: End of string data 4081 // Argument 3, r2: Start of string data 4082 // Prepare start and end index of the input. 4083 __ add(r9, r8, Operand(r9, LSL, r3)); 4084 __ add(r2, r9, Operand(r1, LSL, r3)); 4085 4086 __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset)); 4087 __ SmiUntag(r8); 4088 __ add(r3, r9, Operand(r8, LSL, r3)); 4089 4090 // Argument 2 (r1): Previous index. 4091 // Already there 4092 4093 // Argument 1 (r0): Subject string. 4094 __ mov(r0, subject); 4095 4096 // Locate the code entry and call it. 4097 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); 4098 DirectCEntryStub stub; 4099 stub.GenerateCall(masm, r7); 4100 4101 __ LeaveExitFrame(false, no_reg); 4102 4103 // r0: result 4104 // subject: subject string (callee saved) 4105 // regexp_data: RegExp data (callee saved) 4106 // last_match_info_elements: Last match info elements (callee saved) 4107 // Check the result. 4108 Label success; 4109 __ cmp(r0, Operand(1)); 4110 // We expect exactly one result since we force the called regexp to behave 4111 // as non-global. 4112 __ b(eq, &success); 4113 Label failure; 4114 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); 4115 __ b(eq, &failure); 4116 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); 4117 // If not exception it can only be retry. Handle that in the runtime system. 4118 __ b(ne, &runtime); 4119 // Result must now be exception. If there is no pending exception already a 4120 // stack overflow (on the backtrack stack) was detected in RegExp code but 4121 // haven't created the exception yet. Handle that in the runtime system. 4122 // TODO(592): Rerunning the RegExp to get the stack overflow exception. 4123 __ mov(r1, Operand(isolate->factory()->the_hole_value())); 4124 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 4125 isolate))); 4126 __ ldr(r0, MemOperand(r2, 0)); 4127 __ cmp(r0, r1); 4128 __ b(eq, &runtime); 4129 4130 __ str(r1, MemOperand(r2, 0)); // Clear pending exception. 4131 4132 // Check if the exception is a termination. If so, throw as uncatchable. 4133 __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex); 4134 4135 Label termination_exception; 4136 __ b(eq, &termination_exception); 4137 4138 __ Throw(r0); 4139 4140 __ bind(&termination_exception); 4141 __ ThrowUncatchable(r0); 4142 4143 __ bind(&failure); 4144 // For failure and exception return null. 4145 __ mov(r0, Operand(masm->isolate()->factory()->null_value())); 4146 __ add(sp, sp, Operand(4 * kPointerSize)); 4147 __ Ret(); 4148 4149 // Process the result from the native regexp code. 4150 __ bind(&success); 4151 __ ldr(r1, 4152 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); 4153 // Calculate number of capture registers (number_of_captures + 1) * 2. 4154 // Multiplying by 2 comes for free since r1 is smi-tagged. 4155 STATIC_ASSERT(kSmiTag == 0); 4156 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 4157 __ add(r1, r1, Operand(2)); // r1 was a smi. 4158 4159 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); 4160 __ JumpIfSmi(r0, &runtime); 4161 __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE); 4162 __ b(ne, &runtime); 4163 // Check that the JSArray is in fast case. 4164 __ ldr(last_match_info_elements, 4165 FieldMemOperand(r0, JSArray::kElementsOffset)); 4166 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); 4167 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); 4168 __ b(ne, &runtime); 4169 // Check that the last match info has space for the capture registers and the 4170 // additional information. 4171 __ ldr(r0, 4172 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); 4173 __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead)); 4174 __ cmp(r2, Operand::SmiUntag(r0)); 4175 __ b(gt, &runtime); 4176 4177 // r1: number of capture registers 4178 // r4: subject string 4179 // Store the capture count. 4180 __ SmiTag(r2, r1); 4181 __ str(r2, FieldMemOperand(last_match_info_elements, 4182 RegExpImpl::kLastCaptureCountOffset)); 4183 // Store last subject and last input. 4184 __ str(subject, 4185 FieldMemOperand(last_match_info_elements, 4186 RegExpImpl::kLastSubjectOffset)); 4187 __ mov(r2, subject); 4188 __ RecordWriteField(last_match_info_elements, 4189 RegExpImpl::kLastSubjectOffset, 4190 subject, 4191 r7, 4192 kLRHasNotBeenSaved, 4193 kDontSaveFPRegs); 4194 __ mov(subject, r2); 4195 __ str(subject, 4196 FieldMemOperand(last_match_info_elements, 4197 RegExpImpl::kLastInputOffset)); 4198 __ RecordWriteField(last_match_info_elements, 4199 RegExpImpl::kLastInputOffset, 4200 subject, 4201 r7, 4202 kLRHasNotBeenSaved, 4203 kDontSaveFPRegs); 4204 4205 // Get the static offsets vector filled by the native regexp code. 4206 ExternalReference address_of_static_offsets_vector = 4207 ExternalReference::address_of_static_offsets_vector(isolate); 4208 __ mov(r2, Operand(address_of_static_offsets_vector)); 4209 4210 // r1: number of capture registers 4211 // r2: offsets vector 4212 Label next_capture, done; 4213 // Capture register counter starts from number of capture registers and 4214 // counts down until wraping after zero. 4215 __ add(r0, 4216 last_match_info_elements, 4217 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); 4218 __ bind(&next_capture); 4219 __ sub(r1, r1, Operand(1), SetCC); 4220 __ b(mi, &done); 4221 // Read the value from the static offsets vector buffer. 4222 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); 4223 // Store the smi value in the last match info. 4224 __ SmiTag(r3); 4225 __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); 4226 __ jmp(&next_capture); 4227 __ bind(&done); 4228 4229 // Return last match info. 4230 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); 4231 __ add(sp, sp, Operand(4 * kPointerSize)); 4232 __ Ret(); 4233 4234 // Do the runtime call to execute the regexp. 4235 __ bind(&runtime); 4236 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 4237 4238 // Deferred code for string handling. 4239 // (6) Not a long external string? If yes, go to (8). 4240 __ bind(¬_seq_nor_cons); 4241 // Compare flags are still set. 4242 __ b(gt, ¬_long_external); // Go to (8). 4243 4244 // (7) External string. Make it, offset-wise, look like a sequential string. 4245 __ bind(&external_string); 4246 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); 4247 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); 4248 if (FLAG_debug_code) { 4249 // Assert that we do not have a cons or slice (indirect strings) here. 4250 // Sequential strings have already been ruled out. 4251 __ tst(r0, Operand(kIsIndirectStringMask)); 4252 __ Assert(eq, kExternalStringExpectedButNotFound); 4253 } 4254 __ ldr(subject, 4255 FieldMemOperand(subject, ExternalString::kResourceDataOffset)); 4256 // Move the pointer so that offset-wise, it looks like a sequential string. 4257 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 4258 __ sub(subject, 4259 subject, 4260 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 4261 __ jmp(&seq_string); // Go to (5). 4262 4263 // (8) Short external string or not a string? If yes, bail out to runtime. 4264 __ bind(¬_long_external); 4265 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); 4266 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); 4267 __ b(ne, &runtime); 4268 4269 // (9) Sliced string. Replace subject with parent. Go to (4). 4270 // Load offset into r9 and replace subject string with parent. 4271 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); 4272 __ SmiUntag(r9); 4273 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); 4274 __ jmp(&check_underlying); // Go to (4). 4275 #endif // V8_INTERPRETED_REGEXP 4276 } 4277 4278 4279 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { 4280 const int kMaxInlineLength = 100; 4281 Label slowcase; 4282 Label done; 4283 Factory* factory = masm->isolate()->factory(); 4284 4285 __ ldr(r1, MemOperand(sp, kPointerSize * 2)); 4286 STATIC_ASSERT(kSmiTag == 0); 4287 STATIC_ASSERT(kSmiTagSize == 1); 4288 __ JumpIfNotSmi(r1, &slowcase); 4289 __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); 4290 __ b(hi, &slowcase); 4291 // Smi-tagging is equivalent to multiplying by 2. 4292 // Allocate RegExpResult followed by FixedArray with size in ebx. 4293 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] 4294 // Elements: [Map][Length][..elements..] 4295 // Size of JSArray with two in-object properties and the header of a 4296 // FixedArray. 4297 int objects_size = 4298 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; 4299 __ SmiUntag(r5, r1); 4300 __ add(r2, r5, Operand(objects_size)); 4301 __ Allocate( 4302 r2, // In: Size, in words. 4303 r0, // Out: Start of allocation (tagged). 4304 r3, // Scratch register. 4305 r4, // Scratch register. 4306 &slowcase, 4307 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 4308 // r0: Start of allocated area, object-tagged. 4309 // r1: Number of elements in array, as smi. 4310 // r5: Number of elements, untagged. 4311 4312 // Set JSArray map to global.regexp_result_map(). 4313 // Set empty properties FixedArray. 4314 // Set elements to point to FixedArray allocated right after the JSArray. 4315 // Interleave operations for better latency. 4316 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); 4317 __ add(r3, r0, Operand(JSRegExpResult::kSize)); 4318 __ mov(r4, Operand(factory->empty_fixed_array())); 4319 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); 4320 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); 4321 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); 4322 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); 4323 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); 4324 4325 // Set input, index and length fields from arguments. 4326 __ ldr(r1, MemOperand(sp, kPointerSize * 0)); 4327 __ ldr(r2, MemOperand(sp, kPointerSize * 1)); 4328 __ ldr(r6, MemOperand(sp, kPointerSize * 2)); 4329 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); 4330 __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); 4331 __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset)); 4332 4333 // Fill out the elements FixedArray. 4334 // r0: JSArray, tagged. 4335 // r3: FixedArray, tagged. 4336 // r5: Number of elements in array, untagged. 4337 4338 // Set map. 4339 __ mov(r2, Operand(factory->fixed_array_map())); 4340 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); 4341 // Set FixedArray length. 4342 __ SmiTag(r6, r5); 4343 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); 4344 // Fill contents of fixed-array with undefined. 4345 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 4346 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 4347 // Fill fixed array elements with undefined. 4348 // r0: JSArray, tagged. 4349 // r2: undefined. 4350 // r3: Start of elements in FixedArray. 4351 // r5: Number of elements to fill. 4352 Label loop; 4353 __ cmp(r5, Operand::Zero()); 4354 __ bind(&loop); 4355 __ b(le, &done); // Jump if r5 is negative or zero. 4356 __ sub(r5, r5, Operand(1), SetCC); 4357 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); 4358 __ jmp(&loop); 4359 4360 __ bind(&done); 4361 __ add(sp, sp, Operand(3 * kPointerSize)); 4362 __ Ret(); 4363 4364 __ bind(&slowcase); 4365 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); 4366 } 4367 4368 4369 static void GenerateRecordCallTarget(MacroAssembler* masm) { 4370 // Cache the called function in a global property cell. Cache states 4371 // are uninitialized, monomorphic (indicated by a JSFunction), and 4372 // megamorphic. 4373 // r1 : the function to call 4374 // r2 : cache cell for call target 4375 Label initialize, done, miss, megamorphic, not_array_function; 4376 4377 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), 4378 masm->isolate()->heap()->undefined_value()); 4379 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), 4380 masm->isolate()->heap()->the_hole_value()); 4381 4382 // Load the cache state into r3. 4383 __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset)); 4384 4385 // A monomorphic cache hit or an already megamorphic state: invoke the 4386 // function without changing the state. 4387 __ cmp(r3, r1); 4388 __ b(eq, &done); 4389 4390 // If we came here, we need to see if we are the array function. 4391 // If we didn't have a matching function, and we didn't find the megamorph 4392 // sentinel, then we have in the cell either some other function or an 4393 // AllocationSite. Do a map check on the object in ecx. 4394 Handle<Map> allocation_site_map( 4395 masm->isolate()->heap()->allocation_site_map(), 4396 masm->isolate()); 4397 __ ldr(r5, FieldMemOperand(r3, 0)); 4398 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); 4399 __ b(ne, &miss); 4400 4401 // Make sure the function is the Array() function 4402 __ LoadArrayFunction(r3); 4403 __ cmp(r1, r3); 4404 __ b(ne, &megamorphic); 4405 __ jmp(&done); 4406 4407 __ bind(&miss); 4408 4409 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 4410 // megamorphic. 4411 __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); 4412 __ b(eq, &initialize); 4413 // MegamorphicSentinel is an immortal immovable object (undefined) so no 4414 // write-barrier is needed. 4415 __ bind(&megamorphic); 4416 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4417 __ str(ip, FieldMemOperand(r2, Cell::kValueOffset)); 4418 __ jmp(&done); 4419 4420 // An uninitialized cache is patched with the function or sentinel to 4421 // indicate the ElementsKind if function is the Array constructor. 4422 __ bind(&initialize); 4423 // Make sure the function is the Array() function 4424 __ LoadArrayFunction(r3); 4425 __ cmp(r1, r3); 4426 __ b(ne, ¬_array_function); 4427 4428 // The target function is the Array constructor, 4429 // Create an AllocationSite if we don't already have it, store it in the cell 4430 { 4431 FrameScope scope(masm, StackFrame::INTERNAL); 4432 4433 __ SmiTag(r0); 4434 __ push(r0); 4435 __ push(r1); 4436 __ push(r2); 4437 4438 CreateAllocationSiteStub create_stub; 4439 __ CallStub(&create_stub); 4440 4441 __ pop(r2); 4442 __ pop(r1); 4443 __ pop(r0); 4444 __ SmiUntag(r0); 4445 } 4446 __ b(&done); 4447 4448 __ bind(¬_array_function); 4449 __ str(r1, FieldMemOperand(r2, Cell::kValueOffset)); 4450 // No need for a write barrier here - cells are rescanned. 4451 4452 __ bind(&done); 4453 } 4454 4455 4456 void CallFunctionStub::Generate(MacroAssembler* masm) { 4457 // r1 : the function to call 4458 // r2 : cache cell for call target 4459 Label slow, non_function; 4460 4461 // The receiver might implicitly be the global object. This is 4462 // indicated by passing the hole as the receiver to the call 4463 // function stub. 4464 if (ReceiverMightBeImplicit()) { 4465 Label call; 4466 // Get the receiver from the stack. 4467 // function, receiver [, arguments] 4468 __ ldr(r4, MemOperand(sp, argc_ * kPointerSize)); 4469 // Call as function is indicated with the hole. 4470 __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); 4471 __ b(ne, &call); 4472 // Patch the receiver on the stack with the global receiver object. 4473 __ ldr(r3, 4474 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4475 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); 4476 __ str(r3, MemOperand(sp, argc_ * kPointerSize)); 4477 __ bind(&call); 4478 } 4479 4480 // Check that the function is really a JavaScript function. 4481 // r1: pushed function (to be verified) 4482 __ JumpIfSmi(r1, &non_function); 4483 // Get the map of the function object. 4484 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); 4485 __ b(ne, &slow); 4486 4487 if (RecordCallTarget()) { 4488 GenerateRecordCallTarget(masm); 4489 } 4490 4491 // Fast-case: Invoke the function now. 4492 // r1: pushed function 4493 ParameterCount actual(argc_); 4494 4495 if (ReceiverMightBeImplicit()) { 4496 Label call_as_function; 4497 __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); 4498 __ b(eq, &call_as_function); 4499 __ InvokeFunction(r1, 4500 actual, 4501 JUMP_FUNCTION, 4502 NullCallWrapper(), 4503 CALL_AS_METHOD); 4504 __ bind(&call_as_function); 4505 } 4506 __ InvokeFunction(r1, 4507 actual, 4508 JUMP_FUNCTION, 4509 NullCallWrapper(), 4510 CALL_AS_FUNCTION); 4511 4512 // Slow-case: Non-function called. 4513 __ bind(&slow); 4514 if (RecordCallTarget()) { 4515 // If there is a call target cache, mark it megamorphic in the 4516 // non-function case. MegamorphicSentinel is an immortal immovable 4517 // object (undefined) so no write barrier is needed. 4518 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), 4519 masm->isolate()->heap()->undefined_value()); 4520 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4521 __ str(ip, FieldMemOperand(r2, Cell::kValueOffset)); 4522 } 4523 // Check for function proxy. 4524 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); 4525 __ b(ne, &non_function); 4526 __ push(r1); // put proxy as additional argument 4527 __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32)); 4528 __ mov(r2, Operand::Zero()); 4529 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); 4530 __ SetCallKind(r5, CALL_AS_METHOD); 4531 { 4532 Handle<Code> adaptor = 4533 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); 4534 __ Jump(adaptor, RelocInfo::CODE_TARGET); 4535 } 4536 4537 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 4538 // of the original receiver from the call site). 4539 __ bind(&non_function); 4540 __ str(r1, MemOperand(sp, argc_ * kPointerSize)); 4541 __ mov(r0, Operand(argc_)); // Set up the number of arguments. 4542 __ mov(r2, Operand::Zero()); 4543 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); 4544 __ SetCallKind(r5, CALL_AS_METHOD); 4545 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 4546 RelocInfo::CODE_TARGET); 4547 } 4548 4549 4550 void CallConstructStub::Generate(MacroAssembler* masm) { 4551 // r0 : number of arguments 4552 // r1 : the function to call 4553 // r2 : cache cell for call target 4554 Label slow, non_function_call; 4555 4556 // Check that the function is not a smi. 4557 __ JumpIfSmi(r1, &non_function_call); 4558 // Check that the function is a JSFunction. 4559 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); 4560 __ b(ne, &slow); 4561 4562 if (RecordCallTarget()) { 4563 GenerateRecordCallTarget(masm); 4564 } 4565 4566 // Jump to the function-specific construct stub. 4567 Register jmp_reg = r3; 4568 __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); 4569 __ ldr(jmp_reg, FieldMemOperand(jmp_reg, 4570 SharedFunctionInfo::kConstructStubOffset)); 4571 __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); 4572 4573 // r0: number of arguments 4574 // r1: called object 4575 // r3: object type 4576 Label do_call; 4577 __ bind(&slow); 4578 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); 4579 __ b(ne, &non_function_call); 4580 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); 4581 __ jmp(&do_call); 4582 4583 __ bind(&non_function_call); 4584 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); 4585 __ bind(&do_call); 4586 // Set expected number of arguments to zero (not changing r0). 4587 __ mov(r2, Operand::Zero()); 4588 __ SetCallKind(r5, CALL_AS_METHOD); 4589 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 4590 RelocInfo::CODE_TARGET); 4591 } 4592 4593 4594 // StringCharCodeAtGenerator 4595 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { 4596 Label flat_string; 4597 Label ascii_string; 4598 Label got_char_code; 4599 Label sliced_string; 4600 4601 // If the receiver is a smi trigger the non-string case. 4602 __ JumpIfSmi(object_, receiver_not_string_); 4603 4604 // Fetch the instance type of the receiver into result register. 4605 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); 4606 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); 4607 // If the receiver is not a string trigger the non-string case. 4608 __ tst(result_, Operand(kIsNotStringMask)); 4609 __ b(ne, receiver_not_string_); 4610 4611 // If the index is non-smi trigger the non-smi case. 4612 __ JumpIfNotSmi(index_, &index_not_smi_); 4613 __ bind(&got_smi_index_); 4614 4615 // Check for index out of range. 4616 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); 4617 __ cmp(ip, Operand(index_)); 4618 __ b(ls, index_out_of_range_); 4619 4620 __ SmiUntag(index_); 4621 4622 StringCharLoadGenerator::Generate(masm, 4623 object_, 4624 index_, 4625 result_, 4626 &call_runtime_); 4627 4628 __ SmiTag(result_); 4629 __ bind(&exit_); 4630 } 4631 4632 4633 void StringCharCodeAtGenerator::GenerateSlow( 4634 MacroAssembler* masm, 4635 const RuntimeCallHelper& call_helper) { 4636 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); 4637 4638 // Index is not a smi. 4639 __ bind(&index_not_smi_); 4640 // If index is a heap number, try converting it to an integer. 4641 __ CheckMap(index_, 4642 result_, 4643 Heap::kHeapNumberMapRootIndex, 4644 index_not_number_, 4645 DONT_DO_SMI_CHECK); 4646 call_helper.BeforeCall(masm); 4647 __ push(object_); 4648 __ push(index_); // Consumed by runtime conversion function. 4649 if (index_flags_ == STRING_INDEX_IS_NUMBER) { 4650 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); 4651 } else { 4652 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); 4653 // NumberToSmi discards numbers that are not exact integers. 4654 __ CallRuntime(Runtime::kNumberToSmi, 1); 4655 } 4656 // Save the conversion result before the pop instructions below 4657 // have a chance to overwrite it. 4658 __ Move(index_, r0); 4659 __ pop(object_); 4660 // Reload the instance type. 4661 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); 4662 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); 4663 call_helper.AfterCall(masm); 4664 // If index is still not a smi, it must be out of range. 4665 __ JumpIfNotSmi(index_, index_out_of_range_); 4666 // Otherwise, return to the fast path. 4667 __ jmp(&got_smi_index_); 4668 4669 // Call runtime. We get here when the receiver is a string and the 4670 // index is a number, but the code of getting the actual character 4671 // is too complex (e.g., when the string needs to be flattened). 4672 __ bind(&call_runtime_); 4673 call_helper.BeforeCall(masm); 4674 __ SmiTag(index_); 4675 __ Push(object_, index_); 4676 __ CallRuntime(Runtime::kStringCharCodeAt, 2); 4677 __ Move(result_, r0); 4678 call_helper.AfterCall(masm); 4679 __ jmp(&exit_); 4680 4681 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); 4682 } 4683 4684 4685 // ------------------------------------------------------------------------- 4686 // StringCharFromCodeGenerator 4687 4688 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { 4689 // Fast case of Heap::LookupSingleCharacterStringFromCode. 4690 STATIC_ASSERT(kSmiTag == 0); 4691 STATIC_ASSERT(kSmiShiftSize == 0); 4692 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); 4693 __ tst(code_, 4694 Operand(kSmiTagMask | 4695 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); 4696 __ b(ne, &slow_case_); 4697 4698 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); 4699 // At this point code register contains smi tagged ASCII char code. 4700 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_)); 4701 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); 4702 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); 4703 __ b(eq, &slow_case_); 4704 __ bind(&exit_); 4705 } 4706 4707 4708 void StringCharFromCodeGenerator::GenerateSlow( 4709 MacroAssembler* masm, 4710 const RuntimeCallHelper& call_helper) { 4711 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); 4712 4713 __ bind(&slow_case_); 4714 call_helper.BeforeCall(masm); 4715 __ push(code_); 4716 __ CallRuntime(Runtime::kCharFromCode, 1); 4717 __ Move(result_, r0); 4718 call_helper.AfterCall(masm); 4719 __ jmp(&exit_); 4720 4721 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); 4722 } 4723 4724 4725 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, 4726 Register dest, 4727 Register src, 4728 Register count, 4729 Register scratch, 4730 bool ascii) { 4731 Label loop; 4732 Label done; 4733 // This loop just copies one character at a time, as it is only used for very 4734 // short strings. 4735 if (!ascii) { 4736 __ add(count, count, Operand(count), SetCC); 4737 } else { 4738 __ cmp(count, Operand::Zero()); 4739 } 4740 __ b(eq, &done); 4741 4742 __ bind(&loop); 4743 __ ldrb(scratch, MemOperand(src, 1, PostIndex)); 4744 // Perform sub between load and dependent store to get the load time to 4745 // complete. 4746 __ sub(count, count, Operand(1), SetCC); 4747 __ strb(scratch, MemOperand(dest, 1, PostIndex)); 4748 // last iteration. 4749 __ b(gt, &loop); 4750 4751 __ bind(&done); 4752 } 4753 4754 4755 enum CopyCharactersFlags { 4756 COPY_ASCII = 1, 4757 DEST_ALWAYS_ALIGNED = 2 4758 }; 4759 4760 4761 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, 4762 Register dest, 4763 Register src, 4764 Register count, 4765 Register scratch1, 4766 Register scratch2, 4767 Register scratch3, 4768 Register scratch4, 4769 Register scratch5, 4770 int flags) { 4771 bool ascii = (flags & COPY_ASCII) != 0; 4772 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; 4773 4774 if (dest_always_aligned && FLAG_debug_code) { 4775 // Check that destination is actually word aligned if the flag says 4776 // that it is. 4777 __ tst(dest, Operand(kPointerAlignmentMask)); 4778 __ Check(eq, kDestinationOfCopyNotAligned); 4779 } 4780 4781 const int kReadAlignment = 4; 4782 const int kReadAlignmentMask = kReadAlignment - 1; 4783 // Ensure that reading an entire aligned word containing the last character 4784 // of a string will not read outside the allocated area (because we pad up 4785 // to kObjectAlignment). 4786 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); 4787 // Assumes word reads and writes are little endian. 4788 // Nothing to do for zero characters. 4789 Label done; 4790 if (!ascii) { 4791 __ add(count, count, Operand(count), SetCC); 4792 } else { 4793 __ cmp(count, Operand::Zero()); 4794 } 4795 __ b(eq, &done); 4796 4797 // Assume that you cannot read (or write) unaligned. 4798 Label byte_loop; 4799 // Must copy at least eight bytes, otherwise just do it one byte at a time. 4800 __ cmp(count, Operand(8)); 4801 __ add(count, dest, Operand(count)); 4802 Register limit = count; // Read until src equals this. 4803 __ b(lt, &byte_loop); 4804 4805 if (!dest_always_aligned) { 4806 // Align dest by byte copying. Copies between zero and three bytes. 4807 __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); 4808 Label dest_aligned; 4809 __ b(eq, &dest_aligned); 4810 __ cmp(scratch4, Operand(2)); 4811 __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); 4812 __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); 4813 __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); 4814 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); 4815 __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); 4816 __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); 4817 __ bind(&dest_aligned); 4818 } 4819 4820 Label simple_loop; 4821 4822 __ sub(scratch4, dest, Operand(src)); 4823 __ and_(scratch4, scratch4, Operand(0x03), SetCC); 4824 __ b(eq, &simple_loop); 4825 // Shift register is number of bits in a source word that 4826 // must be combined with bits in the next source word in order 4827 // to create a destination word. 4828 4829 // Complex loop for src/dst that are not aligned the same way. 4830 { 4831 Label loop; 4832 __ mov(scratch4, Operand(scratch4, LSL, 3)); 4833 Register left_shift = scratch4; 4834 __ and_(src, src, Operand(~3)); // Round down to load previous word. 4835 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); 4836 // Store the "shift" most significant bits of scratch in the least 4837 // signficant bits (i.e., shift down by (32-shift)). 4838 __ rsb(scratch2, left_shift, Operand(32)); 4839 Register right_shift = scratch2; 4840 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); 4841 4842 __ bind(&loop); 4843 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); 4844 __ sub(scratch5, limit, Operand(dest)); 4845 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); 4846 __ str(scratch1, MemOperand(dest, 4, PostIndex)); 4847 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); 4848 // Loop if four or more bytes left to copy. 4849 // Compare to eight, because we did the subtract before increasing dst. 4850 __ sub(scratch5, scratch5, Operand(8), SetCC); 4851 __ b(ge, &loop); 4852 } 4853 // There is now between zero and three bytes left to copy (negative that 4854 // number is in scratch5), and between one and three bytes already read into 4855 // scratch1 (eight times that number in scratch4). We may have read past 4856 // the end of the string, but because objects are aligned, we have not read 4857 // past the end of the object. 4858 // Find the minimum of remaining characters to move and preloaded characters 4859 // and write those as bytes. 4860 __ add(scratch5, scratch5, Operand(4), SetCC); 4861 __ b(eq, &done); 4862 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); 4863 // Move minimum of bytes read and bytes left to copy to scratch4. 4864 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); 4865 // Between one and three (value in scratch5) characters already read into 4866 // scratch ready to write. 4867 __ cmp(scratch5, Operand(2)); 4868 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); 4869 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); 4870 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); 4871 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); 4872 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); 4873 // Copy any remaining bytes. 4874 __ b(&byte_loop); 4875 4876 // Simple loop. 4877 // Copy words from src to dst, until less than four bytes left. 4878 // Both src and dest are word aligned. 4879 __ bind(&simple_loop); 4880 { 4881 Label loop; 4882 __ bind(&loop); 4883 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); 4884 __ sub(scratch3, limit, Operand(dest)); 4885 __ str(scratch1, MemOperand(dest, 4, PostIndex)); 4886 // Compare to 8, not 4, because we do the substraction before increasing 4887 // dest. 4888 __ cmp(scratch3, Operand(8)); 4889 __ b(ge, &loop); 4890 } 4891 4892 // Copy bytes from src to dst until dst hits limit. 4893 __ bind(&byte_loop); 4894 __ cmp(dest, Operand(limit)); 4895 __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); 4896 __ b(ge, &done); 4897 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); 4898 __ b(&byte_loop); 4899 4900 __ bind(&done); 4901 } 4902 4903 4904 void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, 4905 Register c1, 4906 Register c2, 4907 Register scratch1, 4908 Register scratch2, 4909 Register scratch3, 4910 Register scratch4, 4911 Register scratch5, 4912 Label* not_found) { 4913 // Register scratch3 is the general scratch register in this function. 4914 Register scratch = scratch3; 4915 4916 // Make sure that both characters are not digits as such strings has a 4917 // different hash algorithm. Don't try to look for these in the string table. 4918 Label not_array_index; 4919 __ sub(scratch, c1, Operand(static_cast<int>('0'))); 4920 __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); 4921 __ b(hi, ¬_array_index); 4922 __ sub(scratch, c2, Operand(static_cast<int>('0'))); 4923 __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); 4924 4925 // If check failed combine both characters into single halfword. 4926 // This is required by the contract of the method: code at the 4927 // not_found branch expects this combination in c1 register 4928 __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); 4929 __ b(ls, not_found); 4930 4931 __ bind(¬_array_index); 4932 // Calculate the two character string hash. 4933 Register hash = scratch1; 4934 StringHelper::GenerateHashInit(masm, hash, c1); 4935 StringHelper::GenerateHashAddCharacter(masm, hash, c2); 4936 StringHelper::GenerateHashGetHash(masm, hash); 4937 4938 // Collect the two characters in a register. 4939 Register chars = c1; 4940 __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); 4941 4942 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. 4943 // hash: hash of two character string. 4944 4945 // Load string table 4946 // Load address of first element of the string table. 4947 Register string_table = c2; 4948 __ LoadRoot(string_table, Heap::kStringTableRootIndex); 4949 4950 Register undefined = scratch4; 4951 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); 4952 4953 // Calculate capacity mask from the string table capacity. 4954 Register mask = scratch2; 4955 __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset)); 4956 __ mov(mask, Operand(mask, ASR, 1)); 4957 __ sub(mask, mask, Operand(1)); 4958 4959 // Calculate untagged address of the first element of the string table. 4960 Register first_string_table_element = string_table; 4961 __ add(first_string_table_element, string_table, 4962 Operand(StringTable::kElementsStartOffset - kHeapObjectTag)); 4963 4964 // Registers 4965 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. 4966 // hash: hash of two character string 4967 // mask: capacity mask 4968 // first_string_table_element: address of the first element of 4969 // the string table 4970 // undefined: the undefined object 4971 // scratch: - 4972 4973 // Perform a number of probes in the string table. 4974 const int kProbes = 4; 4975 Label found_in_string_table; 4976 Label next_probe[kProbes]; 4977 Register candidate = scratch5; // Scratch register contains candidate. 4978 for (int i = 0; i < kProbes; i++) { 4979 // Calculate entry in string table. 4980 if (i > 0) { 4981 __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i))); 4982 } else { 4983 __ mov(candidate, hash); 4984 } 4985 4986 __ and_(candidate, candidate, Operand(mask)); 4987 4988 // Load the entry from the symble table. 4989 STATIC_ASSERT(StringTable::kEntrySize == 1); 4990 __ ldr(candidate, 4991 MemOperand(first_string_table_element, 4992 candidate, 4993 LSL, 4994 kPointerSizeLog2)); 4995 4996 // If entry is undefined no string with this hash can be found. 4997 Label is_string; 4998 __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE); 4999 __ b(ne, &is_string); 5000 5001 __ cmp(undefined, candidate); 5002 __ b(eq, not_found); 5003 // Must be the hole (deleted entry). 5004 if (FLAG_debug_code) { 5005 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 5006 __ cmp(ip, candidate); 5007 __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole); 5008 } 5009 __ jmp(&next_probe[i]); 5010 5011 __ bind(&is_string); 5012 5013 // Check that the candidate is a non-external ASCII string. The instance 5014 // type is still in the scratch register from the CompareObjectType 5015 // operation. 5016 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]); 5017 5018 // If length is not 2 the string is not a candidate. 5019 __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); 5020 __ cmp(scratch, Operand(Smi::FromInt(2))); 5021 __ b(ne, &next_probe[i]); 5022 5023 // Check if the two characters match. 5024 // Assumes that word load is little endian. 5025 __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); 5026 __ cmp(chars, scratch); 5027 __ b(eq, &found_in_string_table); 5028 __ bind(&next_probe[i]); 5029 } 5030 5031 // No matching 2 character string found by probing. 5032 __ jmp(not_found); 5033 5034 // Scratch register contains result when we fall through to here. 5035 Register result = candidate; 5036 __ bind(&found_in_string_table); 5037 __ Move(r0, result); 5038 } 5039 5040 5041 void StringHelper::GenerateHashInit(MacroAssembler* masm, 5042 Register hash, 5043 Register character) { 5044 // hash = character + (character << 10); 5045 __ LoadRoot(hash, Heap::kHashSeedRootIndex); 5046 // Untag smi seed and add the character. 5047 __ add(hash, character, Operand(hash, LSR, kSmiTagSize)); 5048 // hash += hash << 10; 5049 __ add(hash, hash, Operand(hash, LSL, 10)); 5050 // hash ^= hash >> 6; 5051 __ eor(hash, hash, Operand(hash, LSR, 6)); 5052 } 5053 5054 5055 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, 5056 Register hash, 5057 Register character) { 5058 // hash += character; 5059 __ add(hash, hash, Operand(character)); 5060 // hash += hash << 10; 5061 __ add(hash, hash, Operand(hash, LSL, 10)); 5062 // hash ^= hash >> 6; 5063 __ eor(hash, hash, Operand(hash, LSR, 6)); 5064 } 5065 5066 5067 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, 5068 Register hash) { 5069 // hash += hash << 3; 5070 __ add(hash, hash, Operand(hash, LSL, 3)); 5071 // hash ^= hash >> 11; 5072 __ eor(hash, hash, Operand(hash, LSR, 11)); 5073 // hash += hash << 15; 5074 __ add(hash, hash, Operand(hash, LSL, 15)); 5075 5076 __ and_(hash, hash, Operand(String::kHashBitMask), SetCC); 5077 5078 // if (hash == 0) hash = 27; 5079 __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq); 5080 } 5081 5082 5083 void SubStringStub::Generate(MacroAssembler* masm) { 5084 Label runtime; 5085 5086 // Stack frame on entry. 5087 // lr: return address 5088 // sp[0]: to 5089 // sp[4]: from 5090 // sp[8]: string 5091 5092 // This stub is called from the native-call %_SubString(...), so 5093 // nothing can be assumed about the arguments. It is tested that: 5094 // "string" is a sequential string, 5095 // both "from" and "to" are smis, and 5096 // 0 <= from <= to <= string.length. 5097 // If any of these assumptions fail, we call the runtime system. 5098 5099 const int kToOffset = 0 * kPointerSize; 5100 const int kFromOffset = 1 * kPointerSize; 5101 const int kStringOffset = 2 * kPointerSize; 5102 5103 __ Ldrd(r2, r3, MemOperand(sp, kToOffset)); 5104 STATIC_ASSERT(kFromOffset == kToOffset + 4); 5105 STATIC_ASSERT(kSmiTag == 0); 5106 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 5107 5108 // Arithmetic shift right by one un-smi-tags. In this case we rotate right 5109 // instead because we bail out on non-smi values: ROR and ASR are equivalent 5110 // for smis but they set the flags in a way that's easier to optimize. 5111 __ mov(r2, Operand(r2, ROR, 1), SetCC); 5112 __ mov(r3, Operand(r3, ROR, 1), SetCC, cc); 5113 // If either to or from had the smi tag bit set, then C is set now, and N 5114 // has the same value: we rotated by 1, so the bottom bit is now the top bit. 5115 // We want to bailout to runtime here if From is negative. In that case, the 5116 // next instruction is not executed and we fall through to bailing out to 5117 // runtime. 5118 // Executed if both r2 and r3 are untagged integers. 5119 __ sub(r2, r2, Operand(r3), SetCC, cc); 5120 // One of the above un-smis or the above SUB could have set N==1. 5121 __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to. 5122 5123 // Make sure first argument is a string. 5124 __ ldr(r0, MemOperand(sp, kStringOffset)); 5125 // Do a JumpIfSmi, but fold its jump into the subsequent string test. 5126 __ SmiTst(r0); 5127 Condition is_string = masm->IsObjectStringType(r0, r1, ne); 5128 ASSERT(is_string == eq); 5129 __ b(NegateCondition(is_string), &runtime); 5130 5131 Label single_char; 5132 __ cmp(r2, Operand(1)); 5133 __ b(eq, &single_char); 5134 5135 // Short-cut for the case of trivial substring. 5136 Label return_r0; 5137 // r0: original string 5138 // r2: result string length 5139 __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); 5140 __ cmp(r2, Operand(r4, ASR, 1)); 5141 // Return original string. 5142 __ b(eq, &return_r0); 5143 // Longer than original string's length or negative: unsafe arguments. 5144 __ b(hi, &runtime); 5145 // Shorter than original string's length: an actual substring. 5146 5147 // Deal with different string types: update the index if necessary 5148 // and put the underlying string into r5. 5149 // r0: original string 5150 // r1: instance type 5151 // r2: length 5152 // r3: from index (untagged) 5153 Label underlying_unpacked, sliced_string, seq_or_external_string; 5154 // If the string is not indirect, it can only be sequential or external. 5155 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); 5156 STATIC_ASSERT(kIsIndirectStringMask != 0); 5157 __ tst(r1, Operand(kIsIndirectStringMask)); 5158 __ b(eq, &seq_or_external_string); 5159 5160 __ tst(r1, Operand(kSlicedNotConsMask)); 5161 __ b(ne, &sliced_string); 5162 // Cons string. Check whether it is flat, then fetch first part. 5163 __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset)); 5164 __ CompareRoot(r5, Heap::kempty_stringRootIndex); 5165 __ b(ne, &runtime); 5166 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); 5167 // Update instance type. 5168 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); 5169 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); 5170 __ jmp(&underlying_unpacked); 5171 5172 __ bind(&sliced_string); 5173 // Sliced string. Fetch parent and correct start index by offset. 5174 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); 5175 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset)); 5176 __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index. 5177 // Update instance type. 5178 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); 5179 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); 5180 __ jmp(&underlying_unpacked); 5181 5182 __ bind(&seq_or_external_string); 5183 // Sequential or external string. Just move string to the expected register. 5184 __ mov(r5, r0); 5185 5186 __ bind(&underlying_unpacked); 5187 5188 if (FLAG_string_slices) { 5189 Label copy_routine; 5190 // r5: underlying subject string 5191 // r1: instance type of underlying subject string 5192 // r2: length 5193 // r3: adjusted start index (untagged) 5194 __ cmp(r2, Operand(SlicedString::kMinLength)); 5195 // Short slice. Copy instead of slicing. 5196 __ b(lt, ©_routine); 5197 // Allocate new sliced string. At this point we do not reload the instance 5198 // type including the string encoding because we simply rely on the info 5199 // provided by the original string. It does not matter if the original 5200 // string's encoding is wrong because we always have to recheck encoding of 5201 // the newly created string's parent anyways due to externalized strings. 5202 Label two_byte_slice, set_slice_header; 5203 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); 5204 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 5205 __ tst(r1, Operand(kStringEncodingMask)); 5206 __ b(eq, &two_byte_slice); 5207 __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); 5208 __ jmp(&set_slice_header); 5209 __ bind(&two_byte_slice); 5210 __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); 5211 __ bind(&set_slice_header); 5212 __ mov(r3, Operand(r3, LSL, 1)); 5213 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); 5214 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); 5215 __ jmp(&return_r0); 5216 5217 __ bind(©_routine); 5218 } 5219 5220 // r5: underlying subject string 5221 // r1: instance type of underlying subject string 5222 // r2: length 5223 // r3: adjusted start index (untagged) 5224 Label two_byte_sequential, sequential_string, allocate_result; 5225 STATIC_ASSERT(kExternalStringTag != 0); 5226 STATIC_ASSERT(kSeqStringTag == 0); 5227 __ tst(r1, Operand(kExternalStringTag)); 5228 __ b(eq, &sequential_string); 5229 5230 // Handle external string. 5231 // Rule out short external strings. 5232 STATIC_CHECK(kShortExternalStringTag != 0); 5233 __ tst(r1, Operand(kShortExternalStringTag)); 5234 __ b(ne, &runtime); 5235 __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset)); 5236 // r5 already points to the first character of underlying string. 5237 __ jmp(&allocate_result); 5238 5239 __ bind(&sequential_string); 5240 // Locate first character of underlying subject string. 5241 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 5242 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5243 5244 __ bind(&allocate_result); 5245 // Sequential acii string. Allocate the result. 5246 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); 5247 __ tst(r1, Operand(kStringEncodingMask)); 5248 __ b(eq, &two_byte_sequential); 5249 5250 // Allocate and copy the resulting ASCII string. 5251 __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); 5252 5253 // Locate first character of substring to copy. 5254 __ add(r5, r5, r3); 5255 // Locate first character of result. 5256 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5257 5258 // r0: result string 5259 // r1: first character of result string 5260 // r2: result string length 5261 // r5: first character of substring to copy 5262 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 5263 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, 5264 COPY_ASCII | DEST_ALWAYS_ALIGNED); 5265 __ jmp(&return_r0); 5266 5267 // Allocate and copy the resulting two-byte string. 5268 __ bind(&two_byte_sequential); 5269 __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); 5270 5271 // Locate first character of substring to copy. 5272 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 5273 __ add(r5, r5, Operand(r3, LSL, 1)); 5274 // Locate first character of result. 5275 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5276 5277 // r0: result string. 5278 // r1: first character of result. 5279 // r2: result length. 5280 // r5: first character of substring to copy. 5281 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 5282 StringHelper::GenerateCopyCharactersLong( 5283 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); 5284 5285 __ bind(&return_r0); 5286 Counters* counters = masm->isolate()->counters(); 5287 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); 5288 __ Drop(3); 5289 __ Ret(); 5290 5291 // Just jump to runtime to create the sub string. 5292 __ bind(&runtime); 5293 __ TailCallRuntime(Runtime::kSubString, 3, 1); 5294 5295 __ bind(&single_char); 5296 // r0: original string 5297 // r1: instance type 5298 // r2: length 5299 // r3: from index (untagged) 5300 __ SmiTag(r3, r3); 5301 StringCharAtGenerator generator( 5302 r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); 5303 generator.GenerateFast(masm); 5304 __ Drop(3); 5305 __ Ret(); 5306 generator.SkipSlow(masm, &runtime); 5307 } 5308 5309 5310 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, 5311 Register left, 5312 Register right, 5313 Register scratch1, 5314 Register scratch2, 5315 Register scratch3) { 5316 Register length = scratch1; 5317 5318 // Compare lengths. 5319 Label strings_not_equal, check_zero_length; 5320 __ ldr(length, FieldMemOperand(left, String::kLengthOffset)); 5321 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); 5322 __ cmp(length, scratch2); 5323 __ b(eq, &check_zero_length); 5324 __ bind(&strings_not_equal); 5325 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); 5326 __ Ret(); 5327 5328 // Check if the length is zero. 5329 Label compare_chars; 5330 __ bind(&check_zero_length); 5331 STATIC_ASSERT(kSmiTag == 0); 5332 __ cmp(length, Operand::Zero()); 5333 __ b(ne, &compare_chars); 5334 __ mov(r0, Operand(Smi::FromInt(EQUAL))); 5335 __ Ret(); 5336 5337 // Compare characters. 5338 __ bind(&compare_chars); 5339 GenerateAsciiCharsCompareLoop(masm, 5340 left, right, length, scratch2, scratch3, 5341 &strings_not_equal); 5342 5343 // Characters are equal. 5344 __ mov(r0, Operand(Smi::FromInt(EQUAL))); 5345 __ Ret(); 5346 } 5347 5348 5349 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, 5350 Register left, 5351 Register right, 5352 Register scratch1, 5353 Register scratch2, 5354 Register scratch3, 5355 Register scratch4) { 5356 Label result_not_equal, compare_lengths; 5357 // Find minimum length and length difference. 5358 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); 5359 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); 5360 __ sub(scratch3, scratch1, Operand(scratch2), SetCC); 5361 Register length_delta = scratch3; 5362 __ mov(scratch1, scratch2, LeaveCC, gt); 5363 Register min_length = scratch1; 5364 STATIC_ASSERT(kSmiTag == 0); 5365 __ cmp(min_length, Operand::Zero()); 5366 __ b(eq, &compare_lengths); 5367 5368 // Compare loop. 5369 GenerateAsciiCharsCompareLoop(masm, 5370 left, right, min_length, scratch2, scratch4, 5371 &result_not_equal); 5372 5373 // Compare lengths - strings up to min-length are equal. 5374 __ bind(&compare_lengths); 5375 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); 5376 // Use length_delta as result if it's zero. 5377 __ mov(r0, Operand(length_delta), SetCC); 5378 __ bind(&result_not_equal); 5379 // Conditionally update the result based either on length_delta or 5380 // the last comparion performed in the loop above. 5381 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); 5382 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); 5383 __ Ret(); 5384 } 5385 5386 5387 void StringCompareStub::GenerateAsciiCharsCompareLoop( 5388 MacroAssembler* masm, 5389 Register left, 5390 Register right, 5391 Register length, 5392 Register scratch1, 5393 Register scratch2, 5394 Label* chars_not_equal) { 5395 // Change index to run from -length to -1 by adding length to string 5396 // start. This means that loop ends when index reaches zero, which 5397 // doesn't need an additional compare. 5398 __ SmiUntag(length); 5399 __ add(scratch1, length, 5400 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5401 __ add(left, left, Operand(scratch1)); 5402 __ add(right, right, Operand(scratch1)); 5403 __ rsb(length, length, Operand::Zero()); 5404 Register index = length; // index = -length; 5405 5406 // Compare loop. 5407 Label loop; 5408 __ bind(&loop); 5409 __ ldrb(scratch1, MemOperand(left, index)); 5410 __ ldrb(scratch2, MemOperand(right, index)); 5411 __ cmp(scratch1, scratch2); 5412 __ b(ne, chars_not_equal); 5413 __ add(index, index, Operand(1), SetCC); 5414 __ b(ne, &loop); 5415 } 5416 5417 5418 void StringCompareStub::Generate(MacroAssembler* masm) { 5419 Label runtime; 5420 5421 Counters* counters = masm->isolate()->counters(); 5422 5423 // Stack frame on entry. 5424 // sp[0]: right string 5425 // sp[4]: left string 5426 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1. 5427 5428 Label not_same; 5429 __ cmp(r0, r1); 5430 __ b(ne, ¬_same); 5431 STATIC_ASSERT(EQUAL == 0); 5432 STATIC_ASSERT(kSmiTag == 0); 5433 __ mov(r0, Operand(Smi::FromInt(EQUAL))); 5434 __ IncrementCounter(counters->string_compare_native(), 1, r1, r2); 5435 __ add(sp, sp, Operand(2 * kPointerSize)); 5436 __ Ret(); 5437 5438 __ bind(¬_same); 5439 5440 // Check that both objects are sequential ASCII strings. 5441 __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); 5442 5443 // Compare flat ASCII strings natively. Remove arguments from stack first. 5444 __ IncrementCounter(counters->string_compare_native(), 1, r2, r3); 5445 __ add(sp, sp, Operand(2 * kPointerSize)); 5446 GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); 5447 5448 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 5449 // tagged as a small integer. 5450 __ bind(&runtime); 5451 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 5452 } 5453 5454 5455 void StringAddStub::Generate(MacroAssembler* masm) { 5456 Label call_runtime, call_builtin; 5457 Builtins::JavaScript builtin_id = Builtins::ADD; 5458 5459 Counters* counters = masm->isolate()->counters(); 5460 5461 // Stack on entry: 5462 // sp[0]: second argument (right). 5463 // sp[4]: first argument (left). 5464 5465 // Load the two arguments. 5466 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. 5467 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. 5468 5469 // Make sure that both arguments are strings if not known in advance. 5470 // Otherwise, at least one of the arguments is definitely a string, 5471 // and we convert the one that is not known to be a string. 5472 if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) { 5473 ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT); 5474 ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT); 5475 __ JumpIfEitherSmi(r0, r1, &call_runtime); 5476 // Load instance types. 5477 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5478 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5479 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5480 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5481 STATIC_ASSERT(kStringTag == 0); 5482 // If either is not a string, go to runtime. 5483 __ tst(r4, Operand(kIsNotStringMask)); 5484 __ tst(r5, Operand(kIsNotStringMask), eq); 5485 __ b(ne, &call_runtime); 5486 } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) { 5487 ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0); 5488 GenerateConvertArgument( 5489 masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin); 5490 builtin_id = Builtins::STRING_ADD_RIGHT; 5491 } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) { 5492 ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0); 5493 GenerateConvertArgument( 5494 masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin); 5495 builtin_id = Builtins::STRING_ADD_LEFT; 5496 } 5497 5498 // Both arguments are strings. 5499 // r0: first string 5500 // r1: second string 5501 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5502 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5503 { 5504 Label strings_not_empty; 5505 // Check if either of the strings are empty. In that case return the other. 5506 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); 5507 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); 5508 STATIC_ASSERT(kSmiTag == 0); 5509 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. 5510 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. 5511 STATIC_ASSERT(kSmiTag == 0); 5512 // Else test if second string is empty. 5513 __ cmp(r3, Operand(Smi::FromInt(0)), ne); 5514 __ b(ne, &strings_not_empty); // If either string was empty, return r0. 5515 5516 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5517 __ add(sp, sp, Operand(2 * kPointerSize)); 5518 __ Ret(); 5519 5520 __ bind(&strings_not_empty); 5521 } 5522 5523 __ SmiUntag(r2); 5524 __ SmiUntag(r3); 5525 // Both strings are non-empty. 5526 // r0: first string 5527 // r1: second string 5528 // r2: length of first string 5529 // r3: length of second string 5530 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5531 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5532 // Look at the length of the result of adding the two strings. 5533 Label string_add_flat_result, longer_than_two; 5534 // Adding two lengths can't overflow. 5535 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); 5536 __ add(r6, r2, Operand(r3)); 5537 // Use the string table when adding two one character strings, as it 5538 // helps later optimizations to return a string here. 5539 __ cmp(r6, Operand(2)); 5540 __ b(ne, &longer_than_two); 5541 5542 // Check that both strings are non-external ASCII strings. 5543 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5544 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5545 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5546 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5547 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5548 } 5549 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, 5550 &call_runtime); 5551 5552 // Get the two characters forming the sub string. 5553 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); 5554 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); 5555 5556 // Try to lookup two character string in string table. If it is not found 5557 // just allocate a new one. 5558 Label make_two_character_string; 5559 StringHelper::GenerateTwoCharacterStringTableProbe( 5560 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); 5561 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5562 __ add(sp, sp, Operand(2 * kPointerSize)); 5563 __ Ret(); 5564 5565 __ bind(&make_two_character_string); 5566 // Resulting string has length 2 and first chars of two strings 5567 // are combined into single halfword in r2 register. 5568 // So we can fill resulting string without two loops by a single 5569 // halfword store instruction (which assumes that processor is 5570 // in a little endian mode) 5571 __ mov(r6, Operand(2)); 5572 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); 5573 __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); 5574 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5575 __ add(sp, sp, Operand(2 * kPointerSize)); 5576 __ Ret(); 5577 5578 __ bind(&longer_than_two); 5579 // Check if resulting string will be flat. 5580 __ cmp(r6, Operand(ConsString::kMinLength)); 5581 __ b(lt, &string_add_flat_result); 5582 // Handle exceptionally long strings in the runtime system. 5583 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); 5584 ASSERT(IsPowerOf2(String::kMaxLength + 1)); 5585 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. 5586 __ cmp(r6, Operand(String::kMaxLength + 1)); 5587 __ b(hs, &call_runtime); 5588 5589 // If result is not supposed to be flat, allocate a cons string object. 5590 // If both strings are ASCII the result is an ASCII cons string. 5591 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5592 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5593 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5594 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5595 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5596 } 5597 Label non_ascii, allocated, ascii_data; 5598 STATIC_ASSERT(kTwoByteStringTag == 0); 5599 __ tst(r4, Operand(kStringEncodingMask)); 5600 __ tst(r5, Operand(kStringEncodingMask), ne); 5601 __ b(eq, &non_ascii); 5602 5603 // Allocate an ASCII cons string. 5604 __ bind(&ascii_data); 5605 __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); 5606 __ bind(&allocated); 5607 // Fill the fields of the cons string. 5608 Label skip_write_barrier, after_writing; 5609 ExternalReference high_promotion_mode = ExternalReference:: 5610 new_space_high_promotion_mode_active_address(masm->isolate()); 5611 __ mov(r4, Operand(high_promotion_mode)); 5612 __ ldr(r4, MemOperand(r4, 0)); 5613 __ cmp(r4, Operand::Zero()); 5614 __ b(eq, &skip_write_barrier); 5615 5616 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); 5617 __ RecordWriteField(r7, 5618 ConsString::kFirstOffset, 5619 r0, 5620 r4, 5621 kLRHasNotBeenSaved, 5622 kDontSaveFPRegs); 5623 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); 5624 __ RecordWriteField(r7, 5625 ConsString::kSecondOffset, 5626 r1, 5627 r4, 5628 kLRHasNotBeenSaved, 5629 kDontSaveFPRegs); 5630 __ jmp(&after_writing); 5631 5632 __ bind(&skip_write_barrier); 5633 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); 5634 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); 5635 5636 __ bind(&after_writing); 5637 5638 __ mov(r0, Operand(r7)); 5639 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5640 __ add(sp, sp, Operand(2 * kPointerSize)); 5641 __ Ret(); 5642 5643 __ bind(&non_ascii); 5644 // At least one of the strings is two-byte. Check whether it happens 5645 // to contain only one byte characters. 5646 // r4: first instance type. 5647 // r5: second instance type. 5648 __ tst(r4, Operand(kOneByteDataHintMask)); 5649 __ tst(r5, Operand(kOneByteDataHintMask), ne); 5650 __ b(ne, &ascii_data); 5651 __ eor(r4, r4, Operand(r5)); 5652 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); 5653 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); 5654 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); 5655 __ b(eq, &ascii_data); 5656 5657 // Allocate a two byte cons string. 5658 __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); 5659 __ jmp(&allocated); 5660 5661 // We cannot encounter sliced strings or cons strings here since: 5662 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); 5663 // Handle creating a flat result from either external or sequential strings. 5664 // Locate the first characters' locations. 5665 // r0: first string 5666 // r1: second string 5667 // r2: length of first string 5668 // r3: length of second string 5669 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5670 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5671 // r6: sum of lengths. 5672 Label first_prepared, second_prepared; 5673 __ bind(&string_add_flat_result); 5674 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5675 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5676 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5677 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5678 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5679 } 5680 5681 // Check whether both strings have same encoding 5682 __ eor(r7, r4, Operand(r5)); 5683 __ tst(r7, Operand(kStringEncodingMask)); 5684 __ b(ne, &call_runtime); 5685 5686 STATIC_ASSERT(kSeqStringTag == 0); 5687 __ tst(r4, Operand(kStringRepresentationMask)); 5688 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5689 __ add(r7, 5690 r0, 5691 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), 5692 LeaveCC, 5693 eq); 5694 __ b(eq, &first_prepared); 5695 // External string: rule out short external string and load string resource. 5696 STATIC_ASSERT(kShortExternalStringTag != 0); 5697 __ tst(r4, Operand(kShortExternalStringMask)); 5698 __ b(ne, &call_runtime); 5699 __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); 5700 __ bind(&first_prepared); 5701 5702 STATIC_ASSERT(kSeqStringTag == 0); 5703 __ tst(r5, Operand(kStringRepresentationMask)); 5704 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5705 __ add(r1, 5706 r1, 5707 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), 5708 LeaveCC, 5709 eq); 5710 __ b(eq, &second_prepared); 5711 // External string: rule out short external string and load string resource. 5712 STATIC_ASSERT(kShortExternalStringTag != 0); 5713 __ tst(r5, Operand(kShortExternalStringMask)); 5714 __ b(ne, &call_runtime); 5715 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); 5716 __ bind(&second_prepared); 5717 5718 Label non_ascii_string_add_flat_result; 5719 // r7: first character of first string 5720 // r1: first character of second string 5721 // r2: length of first string. 5722 // r3: length of second string. 5723 // r6: sum of lengths. 5724 // Both strings have the same encoding. 5725 STATIC_ASSERT(kTwoByteStringTag == 0); 5726 __ tst(r5, Operand(kStringEncodingMask)); 5727 __ b(eq, &non_ascii_string_add_flat_result); 5728 5729 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); 5730 __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5731 // r0: result string. 5732 // r7: first character of first string. 5733 // r1: first character of second string. 5734 // r2: length of first string. 5735 // r3: length of second string. 5736 // r6: first character of result. 5737 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); 5738 // r6: next character of result. 5739 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); 5740 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5741 __ add(sp, sp, Operand(2 * kPointerSize)); 5742 __ Ret(); 5743 5744 __ bind(&non_ascii_string_add_flat_result); 5745 __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); 5746 __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5747 // r0: result string. 5748 // r7: first character of first string. 5749 // r1: first character of second string. 5750 // r2: length of first string. 5751 // r3: length of second string. 5752 // r6: first character of result. 5753 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); 5754 // r6: next character of result. 5755 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); 5756 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5757 __ add(sp, sp, Operand(2 * kPointerSize)); 5758 __ Ret(); 5759 5760 // Just jump to runtime to add the two strings. 5761 __ bind(&call_runtime); 5762 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { 5763 GenerateRegisterArgsPop(masm); 5764 // Build a frame 5765 { 5766 FrameScope scope(masm, StackFrame::INTERNAL); 5767 GenerateRegisterArgsPush(masm); 5768 __ CallRuntime(Runtime::kStringAdd, 2); 5769 } 5770 __ Ret(); 5771 } else { 5772 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 5773 } 5774 5775 if (call_builtin.is_linked()) { 5776 __ bind(&call_builtin); 5777 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { 5778 GenerateRegisterArgsPop(masm); 5779 // Build a frame 5780 { 5781 FrameScope scope(masm, StackFrame::INTERNAL); 5782 GenerateRegisterArgsPush(masm); 5783 __ InvokeBuiltin(builtin_id, CALL_FUNCTION); 5784 } 5785 __ Ret(); 5786 } else { 5787 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); 5788 } 5789 } 5790 } 5791 5792 5793 void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 5794 __ push(r0); 5795 __ push(r1); 5796 } 5797 5798 5799 void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) { 5800 __ pop(r1); 5801 __ pop(r0); 5802 } 5803 5804 5805 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, 5806 int stack_offset, 5807 Register arg, 5808 Register scratch1, 5809 Register scratch2, 5810 Register scratch3, 5811 Register scratch4, 5812 Label* slow) { 5813 // First check if the argument is already a string. 5814 Label not_string, done; 5815 __ JumpIfSmi(arg, ¬_string); 5816 __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); 5817 __ b(lt, &done); 5818 5819 // Check the number to string cache. 5820 Label not_cached; 5821 __ bind(¬_string); 5822 // Puts the cached result into scratch1. 5823 NumberToStringStub::GenerateLookupNumberStringCache(masm, 5824 arg, 5825 scratch1, 5826 scratch2, 5827 scratch3, 5828 scratch4, 5829 ¬_cached); 5830 __ mov(arg, scratch1); 5831 __ str(arg, MemOperand(sp, stack_offset)); 5832 __ jmp(&done); 5833 5834 // Check if the argument is a safe string wrapper. 5835 __ bind(¬_cached); 5836 __ JumpIfSmi(arg, slow); 5837 __ CompareObjectType( 5838 arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1. 5839 __ b(ne, slow); 5840 __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); 5841 __ and_(scratch2, 5842 scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); 5843 __ cmp(scratch2, 5844 Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); 5845 __ b(ne, slow); 5846 __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); 5847 __ str(arg, MemOperand(sp, stack_offset)); 5848 5849 __ bind(&done); 5850 } 5851 5852 5853 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { 5854 ASSERT(state_ == CompareIC::SMI); 5855 Label miss; 5856 __ orr(r2, r1, r0); 5857 __ JumpIfNotSmi(r2, &miss); 5858 5859 if (GetCondition() == eq) { 5860 // For equality we do not care about the sign of the result. 5861 __ sub(r0, r0, r1, SetCC); 5862 } else { 5863 // Untag before subtracting to avoid handling overflow. 5864 __ SmiUntag(r1); 5865 __ sub(r0, r1, Operand::SmiUntag(r0)); 5866 } 5867 __ Ret(); 5868 5869 __ bind(&miss); 5870 GenerateMiss(masm); 5871 } 5872 5873 5874 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { 5875 ASSERT(state_ == CompareIC::NUMBER); 5876 5877 Label generic_stub; 5878 Label unordered, maybe_undefined1, maybe_undefined2; 5879 Label miss; 5880 5881 if (left_ == CompareIC::SMI) { 5882 __ JumpIfNotSmi(r1, &miss); 5883 } 5884 if (right_ == CompareIC::SMI) { 5885 __ JumpIfNotSmi(r0, &miss); 5886 } 5887 5888 // Inlining the double comparison and falling back to the general compare 5889 // stub if NaN is involved. 5890 // Load left and right operand. 5891 Label done, left, left_smi, right_smi; 5892 __ JumpIfSmi(r0, &right_smi); 5893 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, 5894 DONT_DO_SMI_CHECK); 5895 __ sub(r2, r0, Operand(kHeapObjectTag)); 5896 __ vldr(d1, r2, HeapNumber::kValueOffset); 5897 __ b(&left); 5898 __ bind(&right_smi); 5899 __ SmiToDouble(d1, r0); 5900 5901 __ bind(&left); 5902 __ JumpIfSmi(r1, &left_smi); 5903 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, 5904 DONT_DO_SMI_CHECK); 5905 __ sub(r2, r1, Operand(kHeapObjectTag)); 5906 __ vldr(d0, r2, HeapNumber::kValueOffset); 5907 __ b(&done); 5908 __ bind(&left_smi); 5909 __ SmiToDouble(d0, r1); 5910 5911 __ bind(&done); 5912 // Compare operands. 5913 __ VFPCompareAndSetFlags(d0, d1); 5914 5915 // Don't base result on status bits when a NaN is involved. 5916 __ b(vs, &unordered); 5917 5918 // Return a result of -1, 0, or 1, based on status bits. 5919 __ mov(r0, Operand(EQUAL), LeaveCC, eq); 5920 __ mov(r0, Operand(LESS), LeaveCC, lt); 5921 __ mov(r0, Operand(GREATER), LeaveCC, gt); 5922 __ Ret(); 5923 5924 __ bind(&unordered); 5925 __ bind(&generic_stub); 5926 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, 5927 CompareIC::GENERIC); 5928 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 5929 5930 __ bind(&maybe_undefined1); 5931 if (Token::IsOrderedRelationalCompareOp(op_)) { 5932 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); 5933 __ b(ne, &miss); 5934 __ JumpIfSmi(r1, &unordered); 5935 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); 5936 __ b(ne, &maybe_undefined2); 5937 __ jmp(&unordered); 5938 } 5939 5940 __ bind(&maybe_undefined2); 5941 if (Token::IsOrderedRelationalCompareOp(op_)) { 5942 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); 5943 __ b(eq, &unordered); 5944 } 5945 5946 __ bind(&miss); 5947 GenerateMiss(masm); 5948 } 5949 5950 5951 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { 5952 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); 5953 Label miss; 5954 5955 // Registers containing left and right operands respectively. 5956 Register left = r1; 5957 Register right = r0; 5958 Register tmp1 = r2; 5959 Register tmp2 = r3; 5960 5961 // Check that both operands are heap objects. 5962 __ JumpIfEitherSmi(left, right, &miss); 5963 5964 // Check that both operands are internalized strings. 5965 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); 5966 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); 5967 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); 5968 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); 5969 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); 5970 __ orr(tmp1, tmp1, Operand(tmp2)); 5971 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); 5972 __ b(ne, &miss); 5973 5974 // Internalized strings are compared by identity. 5975 __ cmp(left, right); 5976 // Make sure r0 is non-zero. At this point input operands are 5977 // guaranteed to be non-zero. 5978 ASSERT(right.is(r0)); 5979 STATIC_ASSERT(EQUAL == 0); 5980 STATIC_ASSERT(kSmiTag == 0); 5981 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); 5982 __ Ret(); 5983 5984 __ bind(&miss); 5985 GenerateMiss(masm); 5986 } 5987 5988 5989 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { 5990 ASSERT(state_ == CompareIC::UNIQUE_NAME); 5991 ASSERT(GetCondition() == eq); 5992 Label miss; 5993 5994 // Registers containing left and right operands respectively. 5995 Register left = r1; 5996 Register right = r0; 5997 Register tmp1 = r2; 5998 Register tmp2 = r3; 5999 6000 // Check that both operands are heap objects. 6001 __ JumpIfEitherSmi(left, right, &miss); 6002 6003 // Check that both operands are unique names. This leaves the instance 6004 // types loaded in tmp1 and tmp2. 6005 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); 6006 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); 6007 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); 6008 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); 6009 6010 __ JumpIfNotUniqueName(tmp1, &miss); 6011 __ JumpIfNotUniqueName(tmp2, &miss); 6012 6013 // Unique names are compared by identity. 6014 __ cmp(left, right); 6015 // Make sure r0 is non-zero. At this point input operands are 6016 // guaranteed to be non-zero. 6017 ASSERT(right.is(r0)); 6018 STATIC_ASSERT(EQUAL == 0); 6019 STATIC_ASSERT(kSmiTag == 0); 6020 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); 6021 __ Ret(); 6022 6023 __ bind(&miss); 6024 GenerateMiss(masm); 6025 } 6026 6027 6028 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { 6029 ASSERT(state_ == CompareIC::STRING); 6030 Label miss; 6031 6032 bool equality = Token::IsEqualityOp(op_); 6033 6034 // Registers containing left and right operands respectively. 6035 Register left = r1; 6036 Register right = r0; 6037 Register tmp1 = r2; 6038 Register tmp2 = r3; 6039 Register tmp3 = r4; 6040 Register tmp4 = r5; 6041 6042 // Check that both operands are heap objects. 6043 __ JumpIfEitherSmi(left, right, &miss); 6044 6045 // Check that both operands are strings. This leaves the instance 6046 // types loaded in tmp1 and tmp2. 6047 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); 6048 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); 6049 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); 6050 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); 6051 STATIC_ASSERT(kNotStringTag != 0); 6052 __ orr(tmp3, tmp1, tmp2); 6053 __ tst(tmp3, Operand(kIsNotStringMask)); 6054 __ b(ne, &miss); 6055 6056 // Fast check for identical strings. 6057 __ cmp(left, right); 6058 STATIC_ASSERT(EQUAL == 0); 6059 STATIC_ASSERT(kSmiTag == 0); 6060 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); 6061 __ Ret(eq); 6062 6063 // Handle not identical strings. 6064 6065 // Check that both strings are internalized strings. If they are, we're done 6066 // because we already know they are not identical. We know they are both 6067 // strings. 6068 if (equality) { 6069 ASSERT(GetCondition() == eq); 6070 STATIC_ASSERT(kInternalizedTag == 0); 6071 __ orr(tmp3, tmp1, Operand(tmp2)); 6072 __ tst(tmp3, Operand(kIsNotInternalizedMask)); 6073 // Make sure r0 is non-zero. At this point input operands are 6074 // guaranteed to be non-zero. 6075 ASSERT(right.is(r0)); 6076 __ Ret(eq); 6077 } 6078 6079 // Check that both strings are sequential ASCII. 6080 Label runtime; 6081 __ JumpIfBothInstanceTypesAreNotSequentialAscii( 6082 tmp1, tmp2, tmp3, tmp4, &runtime); 6083 6084 // Compare flat ASCII strings. Returns when done. 6085 if (equality) { 6086 StringCompareStub::GenerateFlatAsciiStringEquals( 6087 masm, left, right, tmp1, tmp2, tmp3); 6088 } else { 6089 StringCompareStub::GenerateCompareFlatAsciiStrings( 6090 masm, left, right, tmp1, tmp2, tmp3, tmp4); 6091 } 6092 6093 // Handle more complex cases in runtime. 6094 __ bind(&runtime); 6095 __ Push(left, right); 6096 if (equality) { 6097 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); 6098 } else { 6099 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 6100 } 6101 6102 __ bind(&miss); 6103 GenerateMiss(masm); 6104 } 6105 6106 6107 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { 6108 ASSERT(state_ == CompareIC::OBJECT); 6109 Label miss; 6110 __ and_(r2, r1, Operand(r0)); 6111 __ JumpIfSmi(r2, &miss); 6112 6113 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); 6114 __ b(ne, &miss); 6115 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); 6116 __ b(ne, &miss); 6117 6118 ASSERT(GetCondition() == eq); 6119 __ sub(r0, r0, Operand(r1)); 6120 __ Ret(); 6121 6122 __ bind(&miss); 6123 GenerateMiss(masm); 6124 } 6125 6126 6127 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { 6128 Label miss; 6129 __ and_(r2, r1, Operand(r0)); 6130 __ JumpIfSmi(r2, &miss); 6131 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); 6132 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); 6133 __ cmp(r2, Operand(known_map_)); 6134 __ b(ne, &miss); 6135 __ cmp(r3, Operand(known_map_)); 6136 __ b(ne, &miss); 6137 6138 __ sub(r0, r0, Operand(r1)); 6139 __ Ret(); 6140 6141 __ bind(&miss); 6142 GenerateMiss(masm); 6143 } 6144 6145 6146 6147 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 6148 { 6149 // Call the runtime system in a fresh internal frame. 6150 ExternalReference miss = 6151 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); 6152 6153 FrameScope scope(masm, StackFrame::INTERNAL); 6154 __ Push(r1, r0); 6155 __ push(lr); 6156 __ Push(r1, r0); 6157 __ mov(ip, Operand(Smi::FromInt(op_))); 6158 __ push(ip); 6159 __ CallExternalReference(miss, 3); 6160 // Compute the entry point of the rewritten stub. 6161 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); 6162 // Restore registers. 6163 __ pop(lr); 6164 __ pop(r0); 6165 __ pop(r1); 6166 } 6167 6168 __ Jump(r2); 6169 } 6170 6171 6172 void DirectCEntryStub::Generate(MacroAssembler* masm) { 6173 __ ldr(pc, MemOperand(sp, 0)); 6174 } 6175 6176 6177 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, 6178 Register target) { 6179 intptr_t code = 6180 reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); 6181 __ mov(lr, Operand(code, RelocInfo::CODE_TARGET)); 6182 6183 // Prevent literal pool emission during calculation of return address. 6184 Assembler::BlockConstPoolScope block_const_pool(masm); 6185 6186 // Push return address (accessible to GC through exit frame pc). 6187 // Note that using pc with str is deprecated. 6188 Label start; 6189 __ bind(&start); 6190 __ add(ip, pc, Operand(Assembler::kInstrSize)); 6191 __ str(ip, MemOperand(sp, 0)); 6192 __ Jump(target); // Call the C++ function. 6193 ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta, 6194 masm->SizeOfCodeGeneratedSince(&start)); 6195 __ VFPEnsureFPSCRState(r2); 6196 } 6197 6198 6199 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, 6200 Label* miss, 6201 Label* done, 6202 Register receiver, 6203 Register properties, 6204 Handle<Name> name, 6205 Register scratch0) { 6206 ASSERT(name->IsUniqueName()); 6207 // If names of slots in range from 1 to kProbes - 1 for the hash value are 6208 // not equal to the name and kProbes-th slot is not used (its name is the 6209 // undefined value), it guarantees the hash table doesn't contain the 6210 // property. It's true even if some slots represent deleted properties 6211 // (their names are the hole value). 6212 for (int i = 0; i < kInlinedProbes; i++) { 6213 // scratch0 points to properties hash. 6214 // Compute the masked index: (hash + i + i * i) & mask. 6215 Register index = scratch0; 6216 // Capacity is smi 2^n. 6217 __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); 6218 __ sub(index, index, Operand(1)); 6219 __ and_(index, index, Operand( 6220 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); 6221 6222 // Scale the index by multiplying by the entry size. 6223 ASSERT(NameDictionary::kEntrySize == 3); 6224 __ add(index, index, Operand(index, LSL, 1)); // index *= 3. 6225 6226 Register entity_name = scratch0; 6227 // Having undefined at this place means the name is not contained. 6228 ASSERT_EQ(kSmiTagSize, 1); 6229 Register tmp = properties; 6230 __ add(tmp, properties, Operand(index, LSL, 1)); 6231 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); 6232 6233 ASSERT(!tmp.is(entity_name)); 6234 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); 6235 __ cmp(entity_name, tmp); 6236 __ b(eq, done); 6237 6238 // Load the hole ready for use below: 6239 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); 6240 6241 // Stop if found the property. 6242 __ cmp(entity_name, Operand(Handle<Name>(name))); 6243 __ b(eq, miss); 6244 6245 Label good; 6246 __ cmp(entity_name, tmp); 6247 __ b(eq, &good); 6248 6249 // Check if the entry name is not a unique name. 6250 __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); 6251 __ ldrb(entity_name, 6252 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); 6253 __ JumpIfNotUniqueName(entity_name, miss); 6254 __ bind(&good); 6255 6256 // Restore the properties. 6257 __ ldr(properties, 6258 FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 6259 } 6260 6261 const int spill_mask = 6262 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() | 6263 r2.bit() | r1.bit() | r0.bit()); 6264 6265 __ stm(db_w, sp, spill_mask); 6266 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 6267 __ mov(r1, Operand(Handle<Name>(name))); 6268 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); 6269 __ CallStub(&stub); 6270 __ cmp(r0, Operand::Zero()); 6271 __ ldm(ia_w, sp, spill_mask); 6272 6273 __ b(eq, done); 6274 __ b(ne, miss); 6275 } 6276 6277 6278 // Probe the name dictionary in the |elements| register. Jump to the 6279 // |done| label if a property with the given name is found. Jump to 6280 // the |miss| label otherwise. 6281 // If lookup was successful |scratch2| will be equal to elements + 4 * index. 6282 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, 6283 Label* miss, 6284 Label* done, 6285 Register elements, 6286 Register name, 6287 Register scratch1, 6288 Register scratch2) { 6289 ASSERT(!elements.is(scratch1)); 6290 ASSERT(!elements.is(scratch2)); 6291 ASSERT(!name.is(scratch1)); 6292 ASSERT(!name.is(scratch2)); 6293 6294 __ AssertName(name); 6295 6296 // Compute the capacity mask. 6297 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); 6298 __ SmiUntag(scratch1); 6299 __ sub(scratch1, scratch1, Operand(1)); 6300 6301 // Generate an unrolled loop that performs a few probes before 6302 // giving up. Measurements done on Gmail indicate that 2 probes 6303 // cover ~93% of loads from dictionaries. 6304 for (int i = 0; i < kInlinedProbes; i++) { 6305 // Compute the masked index: (hash + i + i * i) & mask. 6306 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); 6307 if (i > 0) { 6308 // Add the probe offset (i + i * i) left shifted to avoid right shifting 6309 // the hash in a separate instruction. The value hash + i + i * i is right 6310 // shifted in the following and instruction. 6311 ASSERT(NameDictionary::GetProbeOffset(i) < 6312 1 << (32 - Name::kHashFieldOffset)); 6313 __ add(scratch2, scratch2, Operand( 6314 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); 6315 } 6316 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); 6317 6318 // Scale the index by multiplying by the element size. 6319 ASSERT(NameDictionary::kEntrySize == 3); 6320 // scratch2 = scratch2 * 3. 6321 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); 6322 6323 // Check if the key is identical to the name. 6324 __ add(scratch2, elements, Operand(scratch2, LSL, 2)); 6325 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); 6326 __ cmp(name, Operand(ip)); 6327 __ b(eq, done); 6328 } 6329 6330 const int spill_mask = 6331 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | 6332 r3.bit() | r2.bit() | r1.bit() | r0.bit()) & 6333 ~(scratch1.bit() | scratch2.bit()); 6334 6335 __ stm(db_w, sp, spill_mask); 6336 if (name.is(r0)) { 6337 ASSERT(!elements.is(r1)); 6338 __ Move(r1, name); 6339 __ Move(r0, elements); 6340 } else { 6341 __ Move(r0, elements); 6342 __ Move(r1, name); 6343 } 6344 NameDictionaryLookupStub stub(POSITIVE_LOOKUP); 6345 __ CallStub(&stub); 6346 __ cmp(r0, Operand::Zero()); 6347 __ mov(scratch2, Operand(r2)); 6348 __ ldm(ia_w, sp, spill_mask); 6349 6350 __ b(ne, done); 6351 __ b(eq, miss); 6352 } 6353 6354 6355 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { 6356 // This stub overrides SometimesSetsUpAFrame() to return false. That means 6357 // we cannot call anything that could cause a GC from this stub. 6358 // Registers: 6359 // result: NameDictionary to probe 6360 // r1: key 6361 // dictionary: NameDictionary to probe. 6362 // index: will hold an index of entry if lookup is successful. 6363 // might alias with result_. 6364 // Returns: 6365 // result_ is zero if lookup failed, non zero otherwise. 6366 6367 Register result = r0; 6368 Register dictionary = r0; 6369 Register key = r1; 6370 Register index = r2; 6371 Register mask = r3; 6372 Register hash = r4; 6373 Register undefined = r5; 6374 Register entry_key = r6; 6375 6376 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; 6377 6378 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); 6379 __ SmiUntag(mask); 6380 __ sub(mask, mask, Operand(1)); 6381 6382 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); 6383 6384 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); 6385 6386 for (int i = kInlinedProbes; i < kTotalProbes; i++) { 6387 // Compute the masked index: (hash + i + i * i) & mask. 6388 // Capacity is smi 2^n. 6389 if (i > 0) { 6390 // Add the probe offset (i + i * i) left shifted to avoid right shifting 6391 // the hash in a separate instruction. The value hash + i + i * i is right 6392 // shifted in the following and instruction. 6393 ASSERT(NameDictionary::GetProbeOffset(i) < 6394 1 << (32 - Name::kHashFieldOffset)); 6395 __ add(index, hash, Operand( 6396 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); 6397 } else { 6398 __ mov(index, Operand(hash)); 6399 } 6400 __ and_(index, mask, Operand(index, LSR, Name::kHashShift)); 6401 6402 // Scale the index by multiplying by the entry size. 6403 ASSERT(NameDictionary::kEntrySize == 3); 6404 __ add(index, index, Operand(index, LSL, 1)); // index *= 3. 6405 6406 ASSERT_EQ(kSmiTagSize, 1); 6407 __ add(index, dictionary, Operand(index, LSL, 2)); 6408 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); 6409 6410 // Having undefined at this place means the name is not contained. 6411 __ cmp(entry_key, Operand(undefined)); 6412 __ b(eq, ¬_in_dictionary); 6413 6414 // Stop if found the property. 6415 __ cmp(entry_key, Operand(key)); 6416 __ b(eq, &in_dictionary); 6417 6418 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { 6419 // Check if the entry name is not a unique name. 6420 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); 6421 __ ldrb(entry_key, 6422 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); 6423 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); 6424 } 6425 } 6426 6427 __ bind(&maybe_in_dictionary); 6428 // If we are doing negative lookup then probing failure should be 6429 // treated as a lookup success. For positive lookup probing failure 6430 // should be treated as lookup failure. 6431 if (mode_ == POSITIVE_LOOKUP) { 6432 __ mov(result, Operand::Zero()); 6433 __ Ret(); 6434 } 6435 6436 __ bind(&in_dictionary); 6437 __ mov(result, Operand(1)); 6438 __ Ret(); 6439 6440 __ bind(¬_in_dictionary); 6441 __ mov(result, Operand::Zero()); 6442 __ Ret(); 6443 } 6444 6445 6446 struct AheadOfTimeWriteBarrierStubList { 6447 Register object, value, address; 6448 RememberedSetAction action; 6449 }; 6450 6451 6452 #define REG(Name) { kRegister_ ## Name ## _Code } 6453 6454 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { 6455 // Used in RegExpExecStub. 6456 { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, 6457 // Used in CompileArrayPushCall. 6458 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. 6459 // Also used in KeyedStoreIC::GenerateGeneric. 6460 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, 6461 // Used in CompileStoreGlobal. 6462 { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET }, 6463 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. 6464 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, 6465 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, 6466 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. 6467 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, 6468 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, 6469 // KeyedStoreStubCompiler::GenerateStoreFastElement. 6470 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, 6471 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, 6472 // ElementsTransitionGenerator::GenerateMapChangeElementTransition 6473 // and ElementsTransitionGenerator::GenerateSmiToDouble 6474 // and ElementsTransitionGenerator::GenerateDoubleToObject 6475 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, 6476 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, 6477 // ElementsTransitionGenerator::GenerateDoubleToObject 6478 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, 6479 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, 6480 // StoreArrayLiteralElementStub::Generate 6481 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, 6482 // FastNewClosureStub::Generate 6483 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, 6484 // StringAddStub::Generate 6485 { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, 6486 { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, 6487 // Null termination. 6488 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 6489 }; 6490 6491 #undef REG 6492 6493 6494 bool RecordWriteStub::IsPregenerated() { 6495 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 6496 !entry->object.is(no_reg); 6497 entry++) { 6498 if (object_.is(entry->object) && 6499 value_.is(entry->value) && 6500 address_.is(entry->address) && 6501 remembered_set_action_ == entry->action && 6502 save_fp_regs_mode_ == kDontSaveFPRegs) { 6503 return true; 6504 } 6505 } 6506 return false; 6507 } 6508 6509 6510 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( 6511 Isolate* isolate) { 6512 StoreBufferOverflowStub stub1(kDontSaveFPRegs); 6513 stub1.GetCode(isolate)->set_is_pregenerated(true); 6514 // Hydrogen code stubs need stub2 at snapshot time. 6515 StoreBufferOverflowStub stub2(kSaveFPRegs); 6516 stub2.GetCode(isolate)->set_is_pregenerated(true); 6517 } 6518 6519 6520 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { 6521 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 6522 !entry->object.is(no_reg); 6523 entry++) { 6524 RecordWriteStub stub(entry->object, 6525 entry->value, 6526 entry->address, 6527 entry->action, 6528 kDontSaveFPRegs); 6529 stub.GetCode(isolate)->set_is_pregenerated(true); 6530 } 6531 } 6532 6533 6534 bool CodeStub::CanUseFPRegisters() { 6535 return true; // VFP2 is a base requirement for V8 6536 } 6537 6538 6539 // Takes the input in 3 registers: address_ value_ and object_. A pointer to 6540 // the value has just been written into the object, now this stub makes sure 6541 // we keep the GC informed. The word in the object where the value has been 6542 // written is in the address register. 6543 void RecordWriteStub::Generate(MacroAssembler* masm) { 6544 Label skip_to_incremental_noncompacting; 6545 Label skip_to_incremental_compacting; 6546 6547 // The first two instructions are generated with labels so as to get the 6548 // offset fixed up correctly by the bind(Label*) call. We patch it back and 6549 // forth between a compare instructions (a nop in this position) and the 6550 // real branch when we start and stop incremental heap marking. 6551 // See RecordWriteStub::Patch for details. 6552 { 6553 // Block literal pool emission, as the position of these two instructions 6554 // is assumed by the patching code. 6555 Assembler::BlockConstPoolScope block_const_pool(masm); 6556 __ b(&skip_to_incremental_noncompacting); 6557 __ b(&skip_to_incremental_compacting); 6558 } 6559 6560 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { 6561 __ RememberedSetHelper(object_, 6562 address_, 6563 value_, 6564 save_fp_regs_mode_, 6565 MacroAssembler::kReturnAtEnd); 6566 } 6567 __ Ret(); 6568 6569 __ bind(&skip_to_incremental_noncompacting); 6570 GenerateIncremental(masm, INCREMENTAL); 6571 6572 __ bind(&skip_to_incremental_compacting); 6573 GenerateIncremental(masm, INCREMENTAL_COMPACTION); 6574 6575 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. 6576 // Will be checked in IncrementalMarking::ActivateGeneratedStub. 6577 ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); 6578 ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); 6579 PatchBranchIntoNop(masm, 0); 6580 PatchBranchIntoNop(masm, Assembler::kInstrSize); 6581 } 6582 6583 6584 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { 6585 regs_.Save(masm); 6586 6587 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { 6588 Label dont_need_remembered_set; 6589 6590 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); 6591 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. 6592 regs_.scratch0(), 6593 &dont_need_remembered_set); 6594 6595 __ CheckPageFlag(regs_.object(), 6596 regs_.scratch0(), 6597 1 << MemoryChunk::SCAN_ON_SCAVENGE, 6598 ne, 6599 &dont_need_remembered_set); 6600 6601 // First notify the incremental marker if necessary, then update the 6602 // remembered set. 6603 CheckNeedsToInformIncrementalMarker( 6604 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); 6605 InformIncrementalMarker(masm, mode); 6606 regs_.Restore(masm); 6607 __ RememberedSetHelper(object_, 6608 address_, 6609 value_, 6610 save_fp_regs_mode_, 6611 MacroAssembler::kReturnAtEnd); 6612 6613 __ bind(&dont_need_remembered_set); 6614 } 6615 6616 CheckNeedsToInformIncrementalMarker( 6617 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); 6618 InformIncrementalMarker(masm, mode); 6619 regs_.Restore(masm); 6620 __ Ret(); 6621 } 6622 6623 6624 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { 6625 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); 6626 int argument_count = 3; 6627 __ PrepareCallCFunction(argument_count, regs_.scratch0()); 6628 Register address = 6629 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); 6630 ASSERT(!address.is(regs_.object())); 6631 ASSERT(!address.is(r0)); 6632 __ Move(address, regs_.address()); 6633 __ Move(r0, regs_.object()); 6634 __ Move(r1, address); 6635 __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); 6636 6637 AllowExternalCallThatCantCauseGC scope(masm); 6638 if (mode == INCREMENTAL_COMPACTION) { 6639 __ CallCFunction( 6640 ExternalReference::incremental_evacuation_record_write_function( 6641 masm->isolate()), 6642 argument_count); 6643 } else { 6644 ASSERT(mode == INCREMENTAL); 6645 __ CallCFunction( 6646 ExternalReference::incremental_marking_record_write_function( 6647 masm->isolate()), 6648 argument_count); 6649 } 6650 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); 6651 } 6652 6653 6654 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( 6655 MacroAssembler* masm, 6656 OnNoNeedToInformIncrementalMarker on_no_need, 6657 Mode mode) { 6658 Label on_black; 6659 Label need_incremental; 6660 Label need_incremental_pop_scratch; 6661 6662 __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); 6663 __ ldr(regs_.scratch1(), 6664 MemOperand(regs_.scratch0(), 6665 MemoryChunk::kWriteBarrierCounterOffset)); 6666 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC); 6667 __ str(regs_.scratch1(), 6668 MemOperand(regs_.scratch0(), 6669 MemoryChunk::kWriteBarrierCounterOffset)); 6670 __ b(mi, &need_incremental); 6671 6672 // Let's look at the color of the object: If it is not black we don't have 6673 // to inform the incremental marker. 6674 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); 6675 6676 regs_.Restore(masm); 6677 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 6678 __ RememberedSetHelper(object_, 6679 address_, 6680 value_, 6681 save_fp_regs_mode_, 6682 MacroAssembler::kReturnAtEnd); 6683 } else { 6684 __ Ret(); 6685 } 6686 6687 __ bind(&on_black); 6688 6689 // Get the value from the slot. 6690 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); 6691 6692 if (mode == INCREMENTAL_COMPACTION) { 6693 Label ensure_not_white; 6694 6695 __ CheckPageFlag(regs_.scratch0(), // Contains value. 6696 regs_.scratch1(), // Scratch. 6697 MemoryChunk::kEvacuationCandidateMask, 6698 eq, 6699 &ensure_not_white); 6700 6701 __ CheckPageFlag(regs_.object(), 6702 regs_.scratch1(), // Scratch. 6703 MemoryChunk::kSkipEvacuationSlotsRecordingMask, 6704 eq, 6705 &need_incremental); 6706 6707 __ bind(&ensure_not_white); 6708 } 6709 6710 // We need extra registers for this, so we push the object and the address 6711 // register temporarily. 6712 __ Push(regs_.object(), regs_.address()); 6713 __ EnsureNotWhite(regs_.scratch0(), // The value. 6714 regs_.scratch1(), // Scratch. 6715 regs_.object(), // Scratch. 6716 regs_.address(), // Scratch. 6717 &need_incremental_pop_scratch); 6718 __ Pop(regs_.object(), regs_.address()); 6719 6720 regs_.Restore(masm); 6721 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 6722 __ RememberedSetHelper(object_, 6723 address_, 6724 value_, 6725 save_fp_regs_mode_, 6726 MacroAssembler::kReturnAtEnd); 6727 } else { 6728 __ Ret(); 6729 } 6730 6731 __ bind(&need_incremental_pop_scratch); 6732 __ Pop(regs_.object(), regs_.address()); 6733 6734 __ bind(&need_incremental); 6735 6736 // Fall through when we need to inform the incremental marker. 6737 } 6738 6739 6740 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { 6741 // ----------- S t a t e ------------- 6742 // -- r0 : element value to store 6743 // -- r3 : element index as smi 6744 // -- sp[0] : array literal index in function as smi 6745 // -- sp[4] : array literal 6746 // clobbers r1, r2, r4 6747 // ----------------------------------- 6748 6749 Label element_done; 6750 Label double_elements; 6751 Label smi_element; 6752 Label slow_elements; 6753 Label fast_elements; 6754 6755 // Get array literal index, array literal and its map. 6756 __ ldr(r4, MemOperand(sp, 0 * kPointerSize)); 6757 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); 6758 __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset)); 6759 6760 __ CheckFastElements(r2, r5, &double_elements); 6761 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS 6762 __ JumpIfSmi(r0, &smi_element); 6763 __ CheckFastSmiElements(r2, r5, &fast_elements); 6764 6765 // Store into the array literal requires a elements transition. Call into 6766 // the runtime. 6767 __ bind(&slow_elements); 6768 // call. 6769 __ Push(r1, r3, r0); 6770 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 6771 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); 6772 __ Push(r5, r4); 6773 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); 6774 6775 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. 6776 __ bind(&fast_elements); 6777 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); 6778 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); 6779 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 6780 __ str(r0, MemOperand(r6, 0)); 6781 // Update the write barrier for the array store. 6782 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, 6783 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); 6784 __ Ret(); 6785 6786 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, 6787 // and value is Smi. 6788 __ bind(&smi_element); 6789 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); 6790 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); 6791 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); 6792 __ Ret(); 6793 6794 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. 6795 __ bind(&double_elements); 6796 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); 6797 __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements); 6798 __ Ret(); 6799 } 6800 6801 6802 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { 6803 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); 6804 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 6805 int parameter_count_offset = 6806 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; 6807 __ ldr(r1, MemOperand(fp, parameter_count_offset)); 6808 if (function_mode_ == JS_FUNCTION_STUB_MODE) { 6809 __ add(r1, r1, Operand(1)); 6810 } 6811 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); 6812 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); 6813 __ add(sp, sp, r1); 6814 __ Ret(); 6815 } 6816 6817 6818 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { 6819 if (masm->isolate()->function_entry_hook() != NULL) { 6820 PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize); 6821 AllowStubCallsScope allow_stub_calls(masm, true); 6822 ProfileEntryHookStub stub; 6823 __ push(lr); 6824 __ CallStub(&stub); 6825 __ pop(lr); 6826 } 6827 } 6828 6829 6830 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { 6831 // The entry hook is a "push lr" instruction, followed by a call. 6832 const int32_t kReturnAddressDistanceFromFunctionStart = 6833 3 * Assembler::kInstrSize; 6834 6835 // This should contain all kCallerSaved registers. 6836 const RegList kSavedRegs = 6837 1 << 0 | // r0 6838 1 << 1 | // r1 6839 1 << 2 | // r2 6840 1 << 3 | // r3 6841 1 << 5 | // r5 6842 1 << 9; // r9 6843 // We also save lr, so the count here is one higher than the mask indicates. 6844 const int32_t kNumSavedRegs = 7; 6845 6846 ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved); 6847 6848 // Save all caller-save registers as this may be called from anywhere. 6849 __ stm(db_w, sp, kSavedRegs | lr.bit()); 6850 6851 // Compute the function's address for the first argument. 6852 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart)); 6853 6854 // The caller's return address is above the saved temporaries. 6855 // Grab that for the second argument to the hook. 6856 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); 6857 6858 // Align the stack if necessary. 6859 int frame_alignment = masm->ActivationFrameAlignment(); 6860 if (frame_alignment > kPointerSize) { 6861 __ mov(r5, sp); 6862 ASSERT(IsPowerOf2(frame_alignment)); 6863 __ and_(sp, sp, Operand(-frame_alignment)); 6864 } 6865 6866 #if V8_HOST_ARCH_ARM 6867 int32_t entry_hook = 6868 reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook()); 6869 __ mov(ip, Operand(entry_hook)); 6870 #else 6871 // Under the simulator we need to indirect the entry hook through a 6872 // trampoline function at a known address. 6873 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); 6874 __ mov(ip, Operand(ExternalReference(&dispatcher, 6875 ExternalReference::BUILTIN_CALL, 6876 masm->isolate()))); 6877 #endif 6878 __ Call(ip); 6879 6880 // Restore the stack pointer if needed. 6881 if (frame_alignment > kPointerSize) { 6882 __ mov(sp, r5); 6883 } 6884 6885 // Also pop pc to get Ret(0). 6886 __ ldm(ia_w, sp, kSavedRegs | pc.bit()); 6887 } 6888 6889 6890 template<class T> 6891 static void CreateArrayDispatch(MacroAssembler* masm) { 6892 int last_index = GetSequenceIndexFromFastElementsKind( 6893 TERMINAL_FAST_ELEMENTS_KIND); 6894 for (int i = 0; i <= last_index; ++i) { 6895 Label next; 6896 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); 6897 __ cmp(r3, Operand(kind)); 6898 __ b(ne, &next); 6899 T stub(kind); 6900 __ TailCallStub(&stub); 6901 __ bind(&next); 6902 } 6903 6904 // If we reached this point there is a problem. 6905 __ Abort(kUnexpectedElementsKindInArrayConstructor); 6906 } 6907 6908 6909 static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { 6910 // r2 - type info cell 6911 // r3 - kind 6912 // r0 - number of arguments 6913 // r1 - constructor? 6914 // sp[0] - last argument 6915 ASSERT(FAST_SMI_ELEMENTS == 0); 6916 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); 6917 ASSERT(FAST_ELEMENTS == 2); 6918 ASSERT(FAST_HOLEY_ELEMENTS == 3); 6919 ASSERT(FAST_DOUBLE_ELEMENTS == 4); 6920 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); 6921 6922 // is the low bit set? If so, we are holey and that is good. 6923 __ tst(r3, Operand(1)); 6924 Label normal_sequence; 6925 __ b(ne, &normal_sequence); 6926 6927 // look at the first argument 6928 __ ldr(r5, MemOperand(sp, 0)); 6929 __ cmp(r5, Operand::Zero()); 6930 __ b(eq, &normal_sequence); 6931 6932 // We are going to create a holey array, but our kind is non-holey. 6933 // Fix kind and retry (only if we have an allocation site in the cell). 6934 __ add(r3, r3, Operand(1)); 6935 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); 6936 __ b(eq, &normal_sequence); 6937 __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset)); 6938 __ ldr(r5, FieldMemOperand(r5, 0)); 6939 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); 6940 __ b(ne, &normal_sequence); 6941 6942 // Save the resulting elements kind in type info 6943 __ SmiTag(r3); 6944 __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset)); 6945 __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset)); 6946 __ SmiUntag(r3); 6947 6948 __ bind(&normal_sequence); 6949 int last_index = GetSequenceIndexFromFastElementsKind( 6950 TERMINAL_FAST_ELEMENTS_KIND); 6951 for (int i = 0; i <= last_index; ++i) { 6952 Label next; 6953 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); 6954 __ cmp(r3, Operand(kind)); 6955 __ b(ne, &next); 6956 ArraySingleArgumentConstructorStub stub(kind); 6957 __ TailCallStub(&stub); 6958 __ bind(&next); 6959 } 6960 6961 // If we reached this point there is a problem. 6962 __ Abort(kUnexpectedElementsKindInArrayConstructor); 6963 } 6964 6965 6966 template<class T> 6967 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { 6968 int to_index = GetSequenceIndexFromFastElementsKind( 6969 TERMINAL_FAST_ELEMENTS_KIND); 6970 for (int i = 0; i <= to_index; ++i) { 6971 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); 6972 T stub(kind); 6973 stub.GetCode(isolate)->set_is_pregenerated(true); 6974 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { 6975 T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES); 6976 stub1.GetCode(isolate)->set_is_pregenerated(true); 6977 } 6978 } 6979 } 6980 6981 6982 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { 6983 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( 6984 isolate); 6985 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( 6986 isolate); 6987 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( 6988 isolate); 6989 } 6990 6991 6992 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( 6993 Isolate* isolate) { 6994 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; 6995 for (int i = 0; i < 2; i++) { 6996 // For internal arrays we only need a few things 6997 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); 6998 stubh1.GetCode(isolate)->set_is_pregenerated(true); 6999 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); 7000 stubh2.GetCode(isolate)->set_is_pregenerated(true); 7001 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); 7002 stubh3.GetCode(isolate)->set_is_pregenerated(true); 7003 } 7004 } 7005 7006 7007 void ArrayConstructorStub::Generate(MacroAssembler* masm) { 7008 // ----------- S t a t e ------------- 7009 // -- r0 : argc (only if argument_count_ == ANY) 7010 // -- r1 : constructor 7011 // -- r2 : type info cell 7012 // -- sp[0] : return address 7013 // -- sp[4] : last argument 7014 // ----------------------------------- 7015 if (FLAG_debug_code) { 7016 // The array construct code is only set for the global and natives 7017 // builtin Array functions which always have maps. 7018 7019 // Initial map for the builtin Array function should be a map. 7020 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); 7021 // Will both indicate a NULL and a Smi. 7022 __ tst(r3, Operand(kSmiTagMask)); 7023 __ Assert(ne, kUnexpectedInitialMapForArrayFunction); 7024 __ CompareObjectType(r3, r3, r4, MAP_TYPE); 7025 __ Assert(eq, kUnexpectedInitialMapForArrayFunction); 7026 7027 // We should either have undefined in ebx or a valid cell 7028 Label okay_here; 7029 Handle<Map> cell_map = masm->isolate()->factory()->cell_map(); 7030 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); 7031 __ b(eq, &okay_here); 7032 __ ldr(r3, FieldMemOperand(r2, 0)); 7033 __ cmp(r3, Operand(cell_map)); 7034 __ Assert(eq, kExpectedPropertyCellInRegisterEbx); 7035 __ bind(&okay_here); 7036 } 7037 7038 Label no_info, switch_ready; 7039 // Get the elements kind and case on that. 7040 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); 7041 __ b(eq, &no_info); 7042 __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset)); 7043 7044 // The type cell may have undefined in its value. 7045 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); 7046 __ b(eq, &no_info); 7047 7048 // The type cell has either an AllocationSite or a JSFunction 7049 __ ldr(r4, FieldMemOperand(r3, 0)); 7050 __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex); 7051 __ b(ne, &no_info); 7052 7053 __ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset)); 7054 __ SmiUntag(r3); 7055 __ jmp(&switch_ready); 7056 __ bind(&no_info); 7057 __ mov(r3, Operand(GetInitialFastElementsKind())); 7058 __ bind(&switch_ready); 7059 7060 if (argument_count_ == ANY) { 7061 Label not_zero_case, not_one_case; 7062 __ tst(r0, r0); 7063 __ b(ne, ¬_zero_case); 7064 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); 7065 7066 __ bind(¬_zero_case); 7067 __ cmp(r0, Operand(1)); 7068 __ b(gt, ¬_one_case); 7069 CreateArrayDispatchOneArgument(masm); 7070 7071 __ bind(¬_one_case); 7072 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); 7073 } else if (argument_count_ == NONE) { 7074 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); 7075 } else if (argument_count_ == ONE) { 7076 CreateArrayDispatchOneArgument(masm); 7077 } else if (argument_count_ == MORE_THAN_ONE) { 7078 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); 7079 } else { 7080 UNREACHABLE(); 7081 } 7082 } 7083 7084 7085 void InternalArrayConstructorStub::GenerateCase( 7086 MacroAssembler* masm, ElementsKind kind) { 7087 Label not_zero_case, not_one_case; 7088 Label normal_sequence; 7089 7090 __ tst(r0, r0); 7091 __ b(ne, ¬_zero_case); 7092 InternalArrayNoArgumentConstructorStub stub0(kind); 7093 __ TailCallStub(&stub0); 7094 7095 __ bind(¬_zero_case); 7096 __ cmp(r0, Operand(1)); 7097 __ b(gt, ¬_one_case); 7098 7099 if (IsFastPackedElementsKind(kind)) { 7100 // We might need to create a holey array 7101 // look at the first argument 7102 __ ldr(r3, MemOperand(sp, 0)); 7103 __ cmp(r3, Operand::Zero()); 7104 __ b(eq, &normal_sequence); 7105 7106 InternalArraySingleArgumentConstructorStub 7107 stub1_holey(GetHoleyElementsKind(kind)); 7108 __ TailCallStub(&stub1_holey); 7109 } 7110 7111 __ bind(&normal_sequence); 7112 InternalArraySingleArgumentConstructorStub stub1(kind); 7113 __ TailCallStub(&stub1); 7114 7115 __ bind(¬_one_case); 7116 InternalArrayNArgumentsConstructorStub stubN(kind); 7117 __ TailCallStub(&stubN); 7118 } 7119 7120 7121 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { 7122 // ----------- S t a t e ------------- 7123 // -- r0 : argc 7124 // -- r1 : constructor 7125 // -- sp[0] : return address 7126 // -- sp[4] : last argument 7127 // ----------------------------------- 7128 7129 if (FLAG_debug_code) { 7130 // The array construct code is only set for the global and natives 7131 // builtin Array functions which always have maps. 7132 7133 // Initial map for the builtin Array function should be a map. 7134 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); 7135 // Will both indicate a NULL and a Smi. 7136 __ tst(r3, Operand(kSmiTagMask)); 7137 __ Assert(ne, kUnexpectedInitialMapForArrayFunction); 7138 __ CompareObjectType(r3, r3, r4, MAP_TYPE); 7139 __ Assert(eq, kUnexpectedInitialMapForArrayFunction); 7140 } 7141 7142 // Figure out the right elements kind 7143 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); 7144 // Load the map's "bit field 2" into |result|. We only need the first byte, 7145 // but the following bit field extraction takes care of that anyway. 7146 __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset)); 7147 // Retrieve elements_kind from bit field 2. 7148 __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount); 7149 7150 if (FLAG_debug_code) { 7151 Label done; 7152 __ cmp(r3, Operand(FAST_ELEMENTS)); 7153 __ b(eq, &done); 7154 __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS)); 7155 __ Assert(eq, 7156 kInvalidElementsKindForInternalArrayOrInternalPackedArray); 7157 __ bind(&done); 7158 } 7159 7160 Label fast_elements_case; 7161 __ cmp(r3, Operand(FAST_ELEMENTS)); 7162 __ b(eq, &fast_elements_case); 7163 GenerateCase(masm, FAST_HOLEY_ELEMENTS); 7164 7165 __ bind(&fast_elements_case); 7166 GenerateCase(masm, FAST_ELEMENTS); 7167 } 7168 7169 7170 #undef __ 7171 7172 } } // namespace v8::internal 7173 7174 #endif // V8_TARGET_ARCH_ARM 7175