1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include "v8.h" 29 30 #if defined(V8_TARGET_ARCH_IA32) 31 32 #include "codegen.h" 33 #include "heap.h" 34 #include "macro-assembler.h" 35 36 namespace v8 { 37 namespace internal { 38 39 40 // ------------------------------------------------------------------------- 41 // Platform-specific RuntimeCallHelper functions. 42 43 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 44 masm->EnterFrame(StackFrame::INTERNAL); 45 ASSERT(!masm->has_frame()); 46 masm->set_has_frame(true); 47 } 48 49 50 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 51 masm->LeaveFrame(StackFrame::INTERNAL); 52 ASSERT(masm->has_frame()); 53 masm->set_has_frame(false); 54 } 55 56 57 #define __ masm. 58 59 60 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { 61 size_t actual_size; 62 // Allocate buffer in executable space. 63 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, 64 &actual_size, 65 true)); 66 if (buffer == NULL) { 67 // Fallback to library function if function cannot be created. 68 switch (type) { 69 case TranscendentalCache::SIN: return &sin; 70 case TranscendentalCache::COS: return &cos; 71 case TranscendentalCache::TAN: return &tan; 72 case TranscendentalCache::LOG: return &log; 73 default: UNIMPLEMENTED(); 74 } 75 } 76 77 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); 78 // esp[1 * kPointerSize]: raw double input 79 // esp[0 * kPointerSize]: return address 80 // Move double input into registers. 81 82 __ push(ebx); 83 __ push(edx); 84 __ push(edi); 85 __ fld_d(Operand(esp, 4 * kPointerSize)); 86 __ mov(ebx, Operand(esp, 4 * kPointerSize)); 87 __ mov(edx, Operand(esp, 5 * kPointerSize)); 88 TranscendentalCacheStub::GenerateOperation(&masm, type); 89 // The return value is expected to be on ST(0) of the FPU stack. 90 __ pop(edi); 91 __ pop(edx); 92 __ pop(ebx); 93 __ Ret(); 94 95 CodeDesc desc; 96 masm.GetCode(&desc); 97 ASSERT(desc.reloc_size == 0); 98 99 CPU::FlushICache(buffer, actual_size); 100 OS::ProtectCode(buffer, actual_size); 101 return FUNCTION_CAST<UnaryMathFunction>(buffer); 102 } 103 104 105 UnaryMathFunction CreateSqrtFunction() { 106 size_t actual_size; 107 // Allocate buffer in executable space. 108 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, 109 &actual_size, 110 true)); 111 // If SSE2 is not available, we can use libc's implementation to ensure 112 // consistency since code by fullcodegen's calls into runtime in that case. 113 if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt; 114 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); 115 // esp[1 * kPointerSize]: raw double input 116 // esp[0 * kPointerSize]: return address 117 // Move double input into registers. 118 { 119 CpuFeatures::Scope use_sse2(SSE2); 120 __ movdbl(xmm0, Operand(esp, 1 * kPointerSize)); 121 __ sqrtsd(xmm0, xmm0); 122 __ movdbl(Operand(esp, 1 * kPointerSize), xmm0); 123 // Load result into floating point register as return value. 124 __ fld_d(Operand(esp, 1 * kPointerSize)); 125 __ Ret(); 126 } 127 128 CodeDesc desc; 129 masm.GetCode(&desc); 130 ASSERT(desc.reloc_size == 0); 131 132 CPU::FlushICache(buffer, actual_size); 133 OS::ProtectCode(buffer, actual_size); 134 return FUNCTION_CAST<UnaryMathFunction>(buffer); 135 } 136 137 138 static void MemCopyWrapper(void* dest, const void* src, size_t size) { 139 memcpy(dest, src, size); 140 } 141 142 143 OS::MemCopyFunction CreateMemCopyFunction() { 144 size_t actual_size; 145 // Allocate buffer in executable space. 146 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, 147 &actual_size, 148 true)); 149 if (buffer == NULL) return &MemCopyWrapper; 150 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); 151 152 // Generated code is put into a fixed, unmovable, buffer, and not into 153 // the V8 heap. We can't, and don't, refer to any relocatable addresses 154 // (e.g. the JavaScript nan-object). 155 156 // 32-bit C declaration function calls pass arguments on stack. 157 158 // Stack layout: 159 // esp[12]: Third argument, size. 160 // esp[8]: Second argument, source pointer. 161 // esp[4]: First argument, destination pointer. 162 // esp[0]: return address 163 164 const int kDestinationOffset = 1 * kPointerSize; 165 const int kSourceOffset = 2 * kPointerSize; 166 const int kSizeOffset = 3 * kPointerSize; 167 168 int stack_offset = 0; // Update if we change the stack height. 169 170 if (FLAG_debug_code) { 171 __ cmp(Operand(esp, kSizeOffset + stack_offset), 172 Immediate(OS::kMinComplexMemCopy)); 173 Label ok; 174 __ j(greater_equal, &ok); 175 __ int3(); 176 __ bind(&ok); 177 } 178 if (CpuFeatures::IsSupported(SSE2)) { 179 CpuFeatures::Scope enable(SSE2); 180 __ push(edi); 181 __ push(esi); 182 stack_offset += 2 * kPointerSize; 183 Register dst = edi; 184 Register src = esi; 185 Register count = ecx; 186 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); 187 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); 188 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); 189 190 191 __ movdqu(xmm0, Operand(src, 0)); 192 __ movdqu(Operand(dst, 0), xmm0); 193 __ mov(edx, dst); 194 __ and_(edx, 0xF); 195 __ neg(edx); 196 __ add(edx, Immediate(16)); 197 __ add(dst, edx); 198 __ add(src, edx); 199 __ sub(count, edx); 200 201 // edi is now aligned. Check if esi is also aligned. 202 Label unaligned_source; 203 __ test(src, Immediate(0x0F)); 204 __ j(not_zero, &unaligned_source); 205 { 206 // Copy loop for aligned source and destination. 207 __ mov(edx, count); 208 Register loop_count = ecx; 209 Register count = edx; 210 __ shr(loop_count, 5); 211 { 212 // Main copy loop. 213 Label loop; 214 __ bind(&loop); 215 __ prefetch(Operand(src, 0x20), 1); 216 __ movdqa(xmm0, Operand(src, 0x00)); 217 __ movdqa(xmm1, Operand(src, 0x10)); 218 __ add(src, Immediate(0x20)); 219 220 __ movdqa(Operand(dst, 0x00), xmm0); 221 __ movdqa(Operand(dst, 0x10), xmm1); 222 __ add(dst, Immediate(0x20)); 223 224 __ dec(loop_count); 225 __ j(not_zero, &loop); 226 } 227 228 // At most 31 bytes to copy. 229 Label move_less_16; 230 __ test(count, Immediate(0x10)); 231 __ j(zero, &move_less_16); 232 __ movdqa(xmm0, Operand(src, 0)); 233 __ add(src, Immediate(0x10)); 234 __ movdqa(Operand(dst, 0), xmm0); 235 __ add(dst, Immediate(0x10)); 236 __ bind(&move_less_16); 237 238 // At most 15 bytes to copy. Copy 16 bytes at end of string. 239 __ and_(count, 0xF); 240 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 241 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); 242 243 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset)); 244 __ pop(esi); 245 __ pop(edi); 246 __ ret(0); 247 } 248 __ Align(16); 249 { 250 // Copy loop for unaligned source and aligned destination. 251 // If source is not aligned, we can't read it as efficiently. 252 __ bind(&unaligned_source); 253 __ mov(edx, ecx); 254 Register loop_count = ecx; 255 Register count = edx; 256 __ shr(loop_count, 5); 257 { 258 // Main copy loop 259 Label loop; 260 __ bind(&loop); 261 __ prefetch(Operand(src, 0x20), 1); 262 __ movdqu(xmm0, Operand(src, 0x00)); 263 __ movdqu(xmm1, Operand(src, 0x10)); 264 __ add(src, Immediate(0x20)); 265 266 __ movdqa(Operand(dst, 0x00), xmm0); 267 __ movdqa(Operand(dst, 0x10), xmm1); 268 __ add(dst, Immediate(0x20)); 269 270 __ dec(loop_count); 271 __ j(not_zero, &loop); 272 } 273 274 // At most 31 bytes to copy. 275 Label move_less_16; 276 __ test(count, Immediate(0x10)); 277 __ j(zero, &move_less_16); 278 __ movdqu(xmm0, Operand(src, 0)); 279 __ add(src, Immediate(0x10)); 280 __ movdqa(Operand(dst, 0), xmm0); 281 __ add(dst, Immediate(0x10)); 282 __ bind(&move_less_16); 283 284 // At most 15 bytes to copy. Copy 16 bytes at end of string. 285 __ and_(count, 0x0F); 286 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 287 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); 288 289 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset)); 290 __ pop(esi); 291 __ pop(edi); 292 __ ret(0); 293 } 294 295 } else { 296 // SSE2 not supported. Unlikely to happen in practice. 297 __ push(edi); 298 __ push(esi); 299 stack_offset += 2 * kPointerSize; 300 __ cld(); 301 Register dst = edi; 302 Register src = esi; 303 Register count = ecx; 304 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); 305 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); 306 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); 307 308 // Copy the first word. 309 __ mov(eax, Operand(src, 0)); 310 __ mov(Operand(dst, 0), eax); 311 312 // Increment src,dstso that dst is aligned. 313 __ mov(edx, dst); 314 __ and_(edx, 0x03); 315 __ neg(edx); 316 __ add(edx, Immediate(4)); // edx = 4 - (dst & 3) 317 __ add(dst, edx); 318 __ add(src, edx); 319 __ sub(count, edx); 320 // edi is now aligned, ecx holds number of remaning bytes to copy. 321 322 __ mov(edx, count); 323 count = edx; 324 __ shr(ecx, 2); // Make word count instead of byte count. 325 __ rep_movs(); 326 327 // At most 3 bytes left to copy. Copy 4 bytes at end of string. 328 __ and_(count, 3); 329 __ mov(eax, Operand(src, count, times_1, -4)); 330 __ mov(Operand(dst, count, times_1, -4), eax); 331 332 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset)); 333 __ pop(esi); 334 __ pop(edi); 335 __ ret(0); 336 } 337 338 CodeDesc desc; 339 masm.GetCode(&desc); 340 ASSERT(desc.reloc_size == 0); 341 342 CPU::FlushICache(buffer, actual_size); 343 OS::ProtectCode(buffer, actual_size); 344 return FUNCTION_CAST<OS::MemCopyFunction>(buffer); 345 } 346 347 #undef __ 348 349 // ------------------------------------------------------------------------- 350 // Code generators 351 352 #define __ ACCESS_MASM(masm) 353 354 void ElementsTransitionGenerator::GenerateSmiOnlyToObject( 355 MacroAssembler* masm) { 356 // ----------- S t a t e ------------- 357 // -- eax : value 358 // -- ebx : target map 359 // -- ecx : key 360 // -- edx : receiver 361 // -- esp[0] : return address 362 // ----------------------------------- 363 // Set transitioned map. 364 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); 365 __ RecordWriteField(edx, 366 HeapObject::kMapOffset, 367 ebx, 368 edi, 369 kDontSaveFPRegs, 370 EMIT_REMEMBERED_SET, 371 OMIT_SMI_CHECK); 372 } 373 374 375 void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( 376 MacroAssembler* masm, Label* fail) { 377 // ----------- S t a t e ------------- 378 // -- eax : value 379 // -- ebx : target map 380 // -- ecx : key 381 // -- edx : receiver 382 // -- esp[0] : return address 383 // ----------------------------------- 384 Label loop, entry, convert_hole, gc_required, only_change_map; 385 386 // Check for empty arrays, which only require a map transition and no changes 387 // to the backing store. 388 __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); 389 __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); 390 __ j(equal, &only_change_map); 391 392 __ push(eax); 393 __ push(ebx); 394 395 __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset)); 396 397 // Allocate new FixedDoubleArray. 398 // edx: receiver 399 // edi: length of source FixedArray (smi-tagged) 400 __ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize)); 401 __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT); 402 403 // eax: destination FixedDoubleArray 404 // edi: number of elements 405 // edx: receiver 406 __ mov(FieldOperand(eax, HeapObject::kMapOffset), 407 Immediate(masm->isolate()->factory()->fixed_double_array_map())); 408 __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi); 409 __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset)); 410 // Replace receiver's backing store with newly created FixedDoubleArray. 411 __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); 412 __ mov(ebx, eax); 413 __ RecordWriteField(edx, 414 JSObject::kElementsOffset, 415 ebx, 416 edi, 417 kDontSaveFPRegs, 418 EMIT_REMEMBERED_SET, 419 OMIT_SMI_CHECK); 420 421 __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset)); 422 423 // Prepare for conversion loop. 424 ExternalReference canonical_the_hole_nan_reference = 425 ExternalReference::address_of_the_hole_nan(); 426 XMMRegister the_hole_nan = xmm1; 427 if (CpuFeatures::IsSupported(SSE2)) { 428 CpuFeatures::Scope use_sse2(SSE2); 429 __ movdbl(the_hole_nan, 430 Operand::StaticVariable(canonical_the_hole_nan_reference)); 431 } 432 __ jmp(&entry); 433 434 // Call into runtime if GC is required. 435 __ bind(&gc_required); 436 // Restore registers before jumping into runtime. 437 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 438 __ pop(ebx); 439 __ pop(eax); 440 __ jmp(fail); 441 442 // Convert and copy elements 443 // esi: source FixedArray 444 __ bind(&loop); 445 __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize)); 446 // ebx: current element from source 447 // edi: index of current element 448 __ JumpIfNotSmi(ebx, &convert_hole); 449 450 // Normal smi, convert it to double and store. 451 __ SmiUntag(ebx); 452 if (CpuFeatures::IsSupported(SSE2)) { 453 CpuFeatures::Scope fscope(SSE2); 454 __ cvtsi2sd(xmm0, ebx); 455 __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), 456 xmm0); 457 } else { 458 __ push(ebx); 459 __ fild_s(Operand(esp, 0)); 460 __ pop(ebx); 461 __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); 462 } 463 __ jmp(&entry); 464 465 // Found hole, store hole_nan_as_double instead. 466 __ bind(&convert_hole); 467 468 if (FLAG_debug_code) { 469 __ cmp(ebx, masm->isolate()->factory()->the_hole_value()); 470 __ Assert(equal, "object found in smi-only array"); 471 } 472 473 if (CpuFeatures::IsSupported(SSE2)) { 474 CpuFeatures::Scope use_sse2(SSE2); 475 __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), 476 the_hole_nan); 477 } else { 478 __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference)); 479 __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); 480 } 481 482 __ bind(&entry); 483 __ sub(edi, Immediate(Smi::FromInt(1))); 484 __ j(not_sign, &loop); 485 486 __ pop(ebx); 487 __ pop(eax); 488 489 // Restore esi. 490 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 491 492 __ bind(&only_change_map); 493 // eax: value 494 // ebx: target map 495 // Set transitioned map. 496 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); 497 __ RecordWriteField(edx, 498 HeapObject::kMapOffset, 499 ebx, 500 edi, 501 kDontSaveFPRegs, 502 OMIT_REMEMBERED_SET, 503 OMIT_SMI_CHECK); 504 } 505 506 507 void ElementsTransitionGenerator::GenerateDoubleToObject( 508 MacroAssembler* masm, Label* fail) { 509 // ----------- S t a t e ------------- 510 // -- eax : value 511 // -- ebx : target map 512 // -- ecx : key 513 // -- edx : receiver 514 // -- esp[0] : return address 515 // ----------------------------------- 516 Label loop, entry, convert_hole, gc_required, only_change_map, success; 517 518 // Check for empty arrays, which only require a map transition and no changes 519 // to the backing store. 520 __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); 521 __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); 522 __ j(equal, &only_change_map); 523 524 __ push(eax); 525 __ push(edx); 526 __ push(ebx); 527 528 __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset)); 529 530 // Allocate new FixedArray. 531 // ebx: length of source FixedDoubleArray (smi-tagged) 532 __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize)); 533 __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT); 534 535 // eax: destination FixedArray 536 // ebx: number of elements 537 __ mov(FieldOperand(eax, HeapObject::kMapOffset), 538 Immediate(masm->isolate()->factory()->fixed_array_map())); 539 __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx); 540 __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); 541 542 __ jmp(&entry); 543 544 // ebx: target map 545 // edx: receiver 546 // Set transitioned map. 547 __ bind(&only_change_map); 548 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); 549 __ RecordWriteField(edx, 550 HeapObject::kMapOffset, 551 ebx, 552 edi, 553 kDontSaveFPRegs, 554 OMIT_REMEMBERED_SET, 555 OMIT_SMI_CHECK); 556 __ jmp(&success); 557 558 // Call into runtime if GC is required. 559 __ bind(&gc_required); 560 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 561 __ pop(ebx); 562 __ pop(edx); 563 __ pop(eax); 564 __ jmp(fail); 565 566 // Box doubles into heap numbers. 567 // edi: source FixedDoubleArray 568 // eax: destination FixedArray 569 __ bind(&loop); 570 // ebx: index of current element (smi-tagged) 571 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); 572 __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32)); 573 __ j(equal, &convert_hole); 574 575 // Non-hole double, copy value into a heap number. 576 __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); 577 // edx: new heap number 578 if (CpuFeatures::IsSupported(SSE2)) { 579 CpuFeatures::Scope fscope(SSE2); 580 __ movdbl(xmm0, 581 FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); 582 __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); 583 } else { 584 __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); 585 __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi); 586 __ mov(esi, FieldOperand(edi, ebx, times_4, offset)); 587 __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi); 588 } 589 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx); 590 __ mov(esi, ebx); 591 __ RecordWriteArray(eax, 592 edx, 593 esi, 594 kDontSaveFPRegs, 595 EMIT_REMEMBERED_SET, 596 OMIT_SMI_CHECK); 597 __ jmp(&entry, Label::kNear); 598 599 // Replace the-hole NaN with the-hole pointer. 600 __ bind(&convert_hole); 601 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), 602 masm->isolate()->factory()->the_hole_value()); 603 604 __ bind(&entry); 605 __ sub(ebx, Immediate(Smi::FromInt(1))); 606 __ j(not_sign, &loop); 607 608 __ pop(ebx); 609 __ pop(edx); 610 // ebx: target map 611 // edx: receiver 612 // Set transitioned map. 613 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); 614 __ RecordWriteField(edx, 615 HeapObject::kMapOffset, 616 ebx, 617 edi, 618 kDontSaveFPRegs, 619 OMIT_REMEMBERED_SET, 620 OMIT_SMI_CHECK); 621 // Replace receiver's backing store with newly created and filled FixedArray. 622 __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); 623 __ RecordWriteField(edx, 624 JSObject::kElementsOffset, 625 eax, 626 edi, 627 kDontSaveFPRegs, 628 EMIT_REMEMBERED_SET, 629 OMIT_SMI_CHECK); 630 631 // Restore registers. 632 __ pop(eax); 633 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 634 635 __ bind(&success); 636 } 637 638 639 void StringCharLoadGenerator::Generate(MacroAssembler* masm, 640 Factory* factory, 641 Register string, 642 Register index, 643 Register result, 644 Label* call_runtime) { 645 // Fetch the instance type of the receiver into result register. 646 __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); 647 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); 648 649 // We need special handling for indirect strings. 650 Label check_sequential; 651 __ test(result, Immediate(kIsIndirectStringMask)); 652 __ j(zero, &check_sequential, Label::kNear); 653 654 // Dispatch on the indirect string shape: slice or cons. 655 Label cons_string; 656 __ test(result, Immediate(kSlicedNotConsMask)); 657 __ j(zero, &cons_string, Label::kNear); 658 659 // Handle slices. 660 Label indirect_string_loaded; 661 __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset)); 662 __ SmiUntag(result); 663 __ add(index, result); 664 __ mov(string, FieldOperand(string, SlicedString::kParentOffset)); 665 __ jmp(&indirect_string_loaded, Label::kNear); 666 667 // Handle cons strings. 668 // Check whether the right hand side is the empty string (i.e. if 669 // this is really a flat string in a cons string). If that is not 670 // the case we would rather go to the runtime system now to flatten 671 // the string. 672 __ bind(&cons_string); 673 __ cmp(FieldOperand(string, ConsString::kSecondOffset), 674 Immediate(factory->empty_string())); 675 __ j(not_equal, call_runtime); 676 __ mov(string, FieldOperand(string, ConsString::kFirstOffset)); 677 678 __ bind(&indirect_string_loaded); 679 __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); 680 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); 681 682 // Distinguish sequential and external strings. Only these two string 683 // representations can reach here (slices and flat cons strings have been 684 // reduced to the underlying sequential or external string). 685 Label seq_string; 686 __ bind(&check_sequential); 687 STATIC_ASSERT(kSeqStringTag == 0); 688 __ test(result, Immediate(kStringRepresentationMask)); 689 __ j(zero, &seq_string, Label::kNear); 690 691 // Handle external strings. 692 Label ascii_external, done; 693 if (FLAG_debug_code) { 694 // Assert that we do not have a cons or slice (indirect strings) here. 695 // Sequential strings have already been ruled out. 696 __ test(result, Immediate(kIsIndirectStringMask)); 697 __ Assert(zero, "external string expected, but not found"); 698 } 699 // Rule out short external strings. 700 STATIC_CHECK(kShortExternalStringTag != 0); 701 __ test_b(result, kShortExternalStringMask); 702 __ j(not_zero, call_runtime); 703 // Check encoding. 704 STATIC_ASSERT(kTwoByteStringTag == 0); 705 __ test_b(result, kStringEncodingMask); 706 __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset)); 707 __ j(not_equal, &ascii_external, Label::kNear); 708 // Two-byte string. 709 __ movzx_w(result, Operand(result, index, times_2, 0)); 710 __ jmp(&done, Label::kNear); 711 __ bind(&ascii_external); 712 // Ascii string. 713 __ movzx_b(result, Operand(result, index, times_1, 0)); 714 __ jmp(&done, Label::kNear); 715 716 // Dispatch on the encoding: ASCII or two-byte. 717 Label ascii; 718 __ bind(&seq_string); 719 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); 720 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 721 __ test(result, Immediate(kStringEncodingMask)); 722 __ j(not_zero, &ascii, Label::kNear); 723 724 // Two-byte string. 725 // Load the two-byte character code into the result register. 726 __ movzx_w(result, FieldOperand(string, 727 index, 728 times_2, 729 SeqTwoByteString::kHeaderSize)); 730 __ jmp(&done, Label::kNear); 731 732 // Ascii string. 733 // Load the byte into the result register. 734 __ bind(&ascii); 735 __ movzx_b(result, FieldOperand(string, 736 index, 737 times_1, 738 SeqAsciiString::kHeaderSize)); 739 __ bind(&done); 740 } 741 742 #undef __ 743 744 } } // namespace v8::internal 745 746 #endif // V8_TARGET_ARCH_IA32 747