1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_ 29 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_ 30 31 #include "assembler.h" 32 #include "frames.h" 33 #include "v8globals.h" 34 35 namespace v8 { 36 namespace internal { 37 38 // Convenience for platform-independent signatures. We do not normally 39 // distinguish memory operands from other operands on ia32. 40 typedef Operand MemOperand; 41 42 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 43 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 44 45 46 enum RegisterValueType { 47 REGISTER_VALUE_IS_SMI, 48 REGISTER_VALUE_IS_INT32 49 }; 50 51 52 bool AreAliased(Register r1, Register r2, Register r3, Register r4); 53 54 55 // MacroAssembler implements a collection of frequently used macros. 56 class MacroAssembler: public Assembler { 57 public: 58 // The isolate parameter can be NULL if the macro assembler should 59 // not use isolate-dependent functionality. In this case, it's the 60 // responsibility of the caller to never invoke such function on the 61 // macro assembler. 62 MacroAssembler(Isolate* isolate, void* buffer, int size); 63 64 // Operations on roots in the root-array. 65 void LoadRoot(Register destination, Heap::RootListIndex index); 66 void StoreRoot(Register source, Register scratch, Heap::RootListIndex index); 67 void CompareRoot(Register with, Register scratch, Heap::RootListIndex index); 68 // These methods can only be used with constant roots (i.e. non-writable 69 // and not in new space). 70 void CompareRoot(Register with, Heap::RootListIndex index); 71 void CompareRoot(const Operand& with, Heap::RootListIndex index); 72 73 // --------------------------------------------------------------------------- 74 // GC Support 75 enum RememberedSetFinalAction { 76 kReturnAtEnd, 77 kFallThroughAtEnd 78 }; 79 80 // Record in the remembered set the fact that we have a pointer to new space 81 // at the address pointed to by the addr register. Only works if addr is not 82 // in new space. 83 void RememberedSetHelper(Register object, // Used for debug code. 84 Register addr, 85 Register scratch, 86 SaveFPRegsMode save_fp, 87 RememberedSetFinalAction and_then); 88 89 void CheckPageFlag(Register object, 90 Register scratch, 91 int mask, 92 Condition cc, 93 Label* condition_met, 94 Label::Distance condition_met_distance = Label::kFar); 95 96 void CheckPageFlagForMap( 97 Handle<Map> map, 98 int mask, 99 Condition cc, 100 Label* condition_met, 101 Label::Distance condition_met_distance = Label::kFar); 102 103 void CheckMapDeprecated(Handle<Map> map, 104 Register scratch, 105 Label* if_deprecated); 106 107 // Check if object is in new space. Jumps if the object is not in new space. 108 // The register scratch can be object itself, but scratch will be clobbered. 109 void JumpIfNotInNewSpace(Register object, 110 Register scratch, 111 Label* branch, 112 Label::Distance distance = Label::kFar) { 113 InNewSpace(object, scratch, zero, branch, distance); 114 } 115 116 // Check if object is in new space. Jumps if the object is in new space. 117 // The register scratch can be object itself, but it will be clobbered. 118 void JumpIfInNewSpace(Register object, 119 Register scratch, 120 Label* branch, 121 Label::Distance distance = Label::kFar) { 122 InNewSpace(object, scratch, not_zero, branch, distance); 123 } 124 125 // Check if an object has a given incremental marking color. Also uses ecx! 126 void HasColor(Register object, 127 Register scratch0, 128 Register scratch1, 129 Label* has_color, 130 Label::Distance has_color_distance, 131 int first_bit, 132 int second_bit); 133 134 void JumpIfBlack(Register object, 135 Register scratch0, 136 Register scratch1, 137 Label* on_black, 138 Label::Distance on_black_distance = Label::kFar); 139 140 // Checks the color of an object. If the object is already grey or black 141 // then we just fall through, since it is already live. If it is white and 142 // we can determine that it doesn't need to be scanned, then we just mark it 143 // black and fall through. For the rest we jump to the label so the 144 // incremental marker can fix its assumptions. 145 void EnsureNotWhite(Register object, 146 Register scratch1, 147 Register scratch2, 148 Label* object_is_white_and_not_data, 149 Label::Distance distance); 150 151 // Notify the garbage collector that we wrote a pointer into an object. 152 // |object| is the object being stored into, |value| is the object being 153 // stored. value and scratch registers are clobbered by the operation. 154 // The offset is the offset from the start of the object, not the offset from 155 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). 156 void RecordWriteField( 157 Register object, 158 int offset, 159 Register value, 160 Register scratch, 161 SaveFPRegsMode save_fp, 162 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 163 SmiCheck smi_check = INLINE_SMI_CHECK); 164 165 // As above, but the offset has the tag presubtracted. For use with 166 // Operand(reg, off). 167 void RecordWriteContextSlot( 168 Register context, 169 int offset, 170 Register value, 171 Register scratch, 172 SaveFPRegsMode save_fp, 173 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 174 SmiCheck smi_check = INLINE_SMI_CHECK) { 175 RecordWriteField(context, 176 offset + kHeapObjectTag, 177 value, 178 scratch, 179 save_fp, 180 remembered_set_action, 181 smi_check); 182 } 183 184 // Notify the garbage collector that we wrote a pointer into a fixed array. 185 // |array| is the array being stored into, |value| is the 186 // object being stored. |index| is the array index represented as a 187 // Smi. All registers are clobbered by the operation RecordWriteArray 188 // filters out smis so it does not update the write barrier if the 189 // value is a smi. 190 void RecordWriteArray( 191 Register array, 192 Register value, 193 Register index, 194 SaveFPRegsMode save_fp, 195 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 196 SmiCheck smi_check = INLINE_SMI_CHECK); 197 198 // For page containing |object| mark region covering |address| 199 // dirty. |object| is the object being stored into, |value| is the 200 // object being stored. The address and value registers are clobbered by the 201 // operation. RecordWrite filters out smis so it does not update the 202 // write barrier if the value is a smi. 203 void RecordWrite( 204 Register object, 205 Register address, 206 Register value, 207 SaveFPRegsMode save_fp, 208 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 209 SmiCheck smi_check = INLINE_SMI_CHECK); 210 211 // For page containing |object| mark the region covering the object's map 212 // dirty. |object| is the object being stored into, |map| is the Map object 213 // that was stored. 214 void RecordWriteForMap( 215 Register object, 216 Handle<Map> map, 217 Register scratch1, 218 Register scratch2, 219 SaveFPRegsMode save_fp); 220 221 #ifdef ENABLE_DEBUGGER_SUPPORT 222 // --------------------------------------------------------------------------- 223 // Debugger Support 224 225 void DebugBreak(); 226 #endif 227 228 // Enter specific kind of exit frame. Expects the number of 229 // arguments in register eax and sets up the number of arguments in 230 // register edi and the pointer to the first argument in register 231 // esi. 232 void EnterExitFrame(bool save_doubles); 233 234 void EnterApiExitFrame(int argc); 235 236 // Leave the current exit frame. Expects the return value in 237 // register eax:edx (untouched) and the pointer to the first 238 // argument in register esi. 239 void LeaveExitFrame(bool save_doubles); 240 241 // Leave the current exit frame. Expects the return value in 242 // register eax (untouched). 243 void LeaveApiExitFrame(); 244 245 // Find the function context up the context chain. 246 void LoadContext(Register dst, int context_chain_length); 247 248 // Conditionally load the cached Array transitioned map of type 249 // transitioned_kind from the native context if the map in register 250 // map_in_out is the cached Array map in the native context of 251 // expected_kind. 252 void LoadTransitionedArrayMapConditional( 253 ElementsKind expected_kind, 254 ElementsKind transitioned_kind, 255 Register map_in_out, 256 Register scratch, 257 Label* no_map_match); 258 259 // Load the initial map for new Arrays from a JSFunction. 260 void LoadInitialArrayMap(Register function_in, 261 Register scratch, 262 Register map_out, 263 bool can_have_holes); 264 265 void LoadGlobalContext(Register global_context); 266 267 // Load the global function with the given index. 268 void LoadGlobalFunction(int index, Register function); 269 270 // Load the initial map from the global function. The registers 271 // function and map can be the same. 272 void LoadGlobalFunctionInitialMap(Register function, Register map); 273 274 // Push and pop the registers that can hold pointers. 275 void PushSafepointRegisters() { pushad(); } 276 void PopSafepointRegisters() { popad(); } 277 // Store the value in register/immediate src in the safepoint 278 // register stack slot for register dst. 279 void StoreToSafepointRegisterSlot(Register dst, Register src); 280 void StoreToSafepointRegisterSlot(Register dst, Immediate src); 281 void LoadFromSafepointRegisterSlot(Register dst, Register src); 282 283 void LoadHeapObject(Register result, Handle<HeapObject> object); 284 void CmpHeapObject(Register reg, Handle<HeapObject> object); 285 void PushHeapObject(Handle<HeapObject> object); 286 287 void LoadObject(Register result, Handle<Object> object) { 288 AllowDeferredHandleDereference heap_object_check; 289 if (object->IsHeapObject()) { 290 LoadHeapObject(result, Handle<HeapObject>::cast(object)); 291 } else { 292 Set(result, Immediate(object)); 293 } 294 } 295 296 void CmpObject(Register reg, Handle<Object> object) { 297 AllowDeferredHandleDereference heap_object_check; 298 if (object->IsHeapObject()) { 299 CmpHeapObject(reg, Handle<HeapObject>::cast(object)); 300 } else { 301 cmp(reg, Immediate(object)); 302 } 303 } 304 305 // --------------------------------------------------------------------------- 306 // JavaScript invokes 307 308 // Set up call kind marking in ecx. The method takes ecx as an 309 // explicit first parameter to make the code more readable at the 310 // call sites. 311 void SetCallKind(Register dst, CallKind kind); 312 313 // Invoke the JavaScript function code by either calling or jumping. 314 void InvokeCode(Register code, 315 const ParameterCount& expected, 316 const ParameterCount& actual, 317 InvokeFlag flag, 318 const CallWrapper& call_wrapper, 319 CallKind call_kind) { 320 InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind); 321 } 322 323 void InvokeCode(const Operand& code, 324 const ParameterCount& expected, 325 const ParameterCount& actual, 326 InvokeFlag flag, 327 const CallWrapper& call_wrapper, 328 CallKind call_kind); 329 330 void InvokeCode(Handle<Code> code, 331 const ParameterCount& expected, 332 const ParameterCount& actual, 333 RelocInfo::Mode rmode, 334 InvokeFlag flag, 335 const CallWrapper& call_wrapper, 336 CallKind call_kind); 337 338 // Invoke the JavaScript function in the given register. Changes the 339 // current context to the context in the function before invoking. 340 void InvokeFunction(Register function, 341 const ParameterCount& actual, 342 InvokeFlag flag, 343 const CallWrapper& call_wrapper, 344 CallKind call_kind); 345 346 void InvokeFunction(Handle<JSFunction> function, 347 const ParameterCount& expected, 348 const ParameterCount& actual, 349 InvokeFlag flag, 350 const CallWrapper& call_wrapper, 351 CallKind call_kind); 352 353 // Invoke specified builtin JavaScript function. Adds an entry to 354 // the unresolved list if the name does not resolve. 355 void InvokeBuiltin(Builtins::JavaScript id, 356 InvokeFlag flag, 357 const CallWrapper& call_wrapper = NullCallWrapper()); 358 359 // Store the function for the given builtin in the target register. 360 void GetBuiltinFunction(Register target, Builtins::JavaScript id); 361 362 // Store the code object for the given builtin in the target register. 363 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 364 365 // Expression support 366 void Set(Register dst, const Immediate& x); 367 void Set(const Operand& dst, const Immediate& x); 368 369 // Support for constant splitting. 370 bool IsUnsafeImmediate(const Immediate& x); 371 void SafeSet(Register dst, const Immediate& x); 372 void SafePush(const Immediate& x); 373 374 // Compare object type for heap object. 375 // Incoming register is heap_object and outgoing register is map. 376 void CmpObjectType(Register heap_object, InstanceType type, Register map); 377 378 // Compare instance type for map. 379 void CmpInstanceType(Register map, InstanceType type); 380 381 // Check if a map for a JSObject indicates that the object has fast elements. 382 // Jump to the specified label if it does not. 383 void CheckFastElements(Register map, 384 Label* fail, 385 Label::Distance distance = Label::kFar); 386 387 // Check if a map for a JSObject indicates that the object can have both smi 388 // and HeapObject elements. Jump to the specified label if it does not. 389 void CheckFastObjectElements(Register map, 390 Label* fail, 391 Label::Distance distance = Label::kFar); 392 393 // Check if a map for a JSObject indicates that the object has fast smi only 394 // elements. Jump to the specified label if it does not. 395 void CheckFastSmiElements(Register map, 396 Label* fail, 397 Label::Distance distance = Label::kFar); 398 399 // Check to see if maybe_number can be stored as a double in 400 // FastDoubleElements. If it can, store it at the index specified by key in 401 // the FastDoubleElements array elements, otherwise jump to fail. 402 void StoreNumberToDoubleElements(Register maybe_number, 403 Register elements, 404 Register key, 405 Register scratch1, 406 XMMRegister scratch2, 407 Label* fail, 408 bool specialize_for_processor, 409 int offset = 0); 410 411 // Compare an object's map with the specified map and its transitioned 412 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with 413 // result of map compare. If multiple map compares are required, the compare 414 // sequences branches to early_success. 415 void CompareMap(Register obj, 416 Handle<Map> map, 417 Label* early_success); 418 419 // Check if the map of an object is equal to a specified map and branch to 420 // label if not. Skip the smi check if not required (object is known to be a 421 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 422 // against maps that are ElementsKind transition maps of the specified map. 423 void CheckMap(Register obj, 424 Handle<Map> map, 425 Label* fail, 426 SmiCheckType smi_check_type); 427 428 // Check if the map of an object is equal to a specified map and branch to a 429 // specified target if equal. Skip the smi check if not required (object is 430 // known to be a heap object) 431 void DispatchMap(Register obj, 432 Register unused, 433 Handle<Map> map, 434 Handle<Code> success, 435 SmiCheckType smi_check_type); 436 437 // Check if the object in register heap_object is a string. Afterwards the 438 // register map contains the object map and the register instance_type 439 // contains the instance_type. The registers map and instance_type can be the 440 // same in which case it contains the instance type afterwards. Either of the 441 // registers map and instance_type can be the same as heap_object. 442 Condition IsObjectStringType(Register heap_object, 443 Register map, 444 Register instance_type); 445 446 // Check if the object in register heap_object is a name. Afterwards the 447 // register map contains the object map and the register instance_type 448 // contains the instance_type. The registers map and instance_type can be the 449 // same in which case it contains the instance type afterwards. Either of the 450 // registers map and instance_type can be the same as heap_object. 451 Condition IsObjectNameType(Register heap_object, 452 Register map, 453 Register instance_type); 454 455 // Check if a heap object's type is in the JSObject range, not including 456 // JSFunction. The object's map will be loaded in the map register. 457 // Any or all of the three registers may be the same. 458 // The contents of the scratch register will always be overwritten. 459 void IsObjectJSObjectType(Register heap_object, 460 Register map, 461 Register scratch, 462 Label* fail); 463 464 // The contents of the scratch register will be overwritten. 465 void IsInstanceJSObjectType(Register map, Register scratch, Label* fail); 466 467 // FCmp is similar to integer cmp, but requires unsigned 468 // jcc instructions (je, ja, jae, jb, jbe, je, and jz). 469 void FCmp(); 470 471 void ClampUint8(Register reg); 472 473 void ClampDoubleToUint8(XMMRegister input_reg, 474 XMMRegister scratch_reg, 475 Register result_reg); 476 477 478 // Smi tagging support. 479 void SmiTag(Register reg) { 480 STATIC_ASSERT(kSmiTag == 0); 481 STATIC_ASSERT(kSmiTagSize == 1); 482 add(reg, reg); 483 } 484 void SmiUntag(Register reg) { 485 sar(reg, kSmiTagSize); 486 } 487 488 // Modifies the register even if it does not contain a Smi! 489 void SmiUntag(Register reg, Label* is_smi) { 490 STATIC_ASSERT(kSmiTagSize == 1); 491 sar(reg, kSmiTagSize); 492 STATIC_ASSERT(kSmiTag == 0); 493 j(not_carry, is_smi); 494 } 495 496 void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch); 497 498 // Jump the register contains a smi. 499 inline void JumpIfSmi(Register value, 500 Label* smi_label, 501 Label::Distance distance = Label::kFar) { 502 test(value, Immediate(kSmiTagMask)); 503 j(zero, smi_label, distance); 504 } 505 // Jump if the operand is a smi. 506 inline void JumpIfSmi(Operand value, 507 Label* smi_label, 508 Label::Distance distance = Label::kFar) { 509 test(value, Immediate(kSmiTagMask)); 510 j(zero, smi_label, distance); 511 } 512 // Jump if register contain a non-smi. 513 inline void JumpIfNotSmi(Register value, 514 Label* not_smi_label, 515 Label::Distance distance = Label::kFar) { 516 test(value, Immediate(kSmiTagMask)); 517 j(not_zero, not_smi_label, distance); 518 } 519 520 void LoadInstanceDescriptors(Register map, Register descriptors); 521 void EnumLength(Register dst, Register map); 522 void NumberOfOwnDescriptors(Register dst, Register map); 523 524 template<typename Field> 525 void DecodeField(Register reg) { 526 static const int shift = Field::kShift; 527 static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize; 528 sar(reg, shift); 529 and_(reg, Immediate(mask)); 530 } 531 void LoadPowerOf2(XMMRegister dst, Register scratch, int power); 532 533 // Abort execution if argument is not a number, enabled via --debug-code. 534 void AssertNumber(Register object); 535 536 // Abort execution if argument is not a smi, enabled via --debug-code. 537 void AssertSmi(Register object); 538 539 // Abort execution if argument is a smi, enabled via --debug-code. 540 void AssertNotSmi(Register object); 541 542 // Abort execution if argument is not a string, enabled via --debug-code. 543 void AssertString(Register object); 544 545 // Abort execution if argument is not a name, enabled via --debug-code. 546 void AssertName(Register object); 547 548 // --------------------------------------------------------------------------- 549 // Exception handling 550 551 // Push a new try handler and link it into try handler chain. 552 void PushTryHandler(StackHandler::Kind kind, int handler_index); 553 554 // Unlink the stack handler on top of the stack from the try handler chain. 555 void PopTryHandler(); 556 557 // Throw to the top handler in the try hander chain. 558 void Throw(Register value); 559 560 // Throw past all JS frames to the top JS entry frame. 561 void ThrowUncatchable(Register value); 562 563 // --------------------------------------------------------------------------- 564 // Inline caching support 565 566 // Generate code for checking access rights - used for security checks 567 // on access to global objects across environments. The holder register 568 // is left untouched, but the scratch register is clobbered. 569 void CheckAccessGlobalProxy(Register holder_reg, 570 Register scratch1, 571 Register scratch2, 572 Label* miss); 573 574 void GetNumberHash(Register r0, Register scratch); 575 576 void LoadFromNumberDictionary(Label* miss, 577 Register elements, 578 Register key, 579 Register r0, 580 Register r1, 581 Register r2, 582 Register result); 583 584 585 // --------------------------------------------------------------------------- 586 // Allocation support 587 588 // Allocate an object in new space or old pointer space. If the given space 589 // is exhausted control continues at the gc_required label. The allocated 590 // object is returned in result and end of the new object is returned in 591 // result_end. The register scratch can be passed as no_reg in which case 592 // an additional object reference will be added to the reloc info. The 593 // returned pointers in result and result_end have not yet been tagged as 594 // heap objects. If result_contains_top_on_entry is true the content of 595 // result is known to be the allocation top on entry (could be result_end 596 // from a previous call). If result_contains_top_on_entry is true scratch 597 // should be no_reg as it is never used. 598 void Allocate(int object_size, 599 Register result, 600 Register result_end, 601 Register scratch, 602 Label* gc_required, 603 AllocationFlags flags); 604 605 void Allocate(int header_size, 606 ScaleFactor element_size, 607 Register element_count, 608 RegisterValueType element_count_type, 609 Register result, 610 Register result_end, 611 Register scratch, 612 Label* gc_required, 613 AllocationFlags flags); 614 615 void Allocate(Register object_size, 616 Register result, 617 Register result_end, 618 Register scratch, 619 Label* gc_required, 620 AllocationFlags flags); 621 622 // Undo allocation in new space. The object passed and objects allocated after 623 // it will no longer be allocated. Make sure that no pointers are left to the 624 // object(s) no longer allocated as they would be invalid when allocation is 625 // un-done. 626 void UndoAllocationInNewSpace(Register object); 627 628 // Allocate a heap number in new space with undefined value. The 629 // register scratch2 can be passed as no_reg; the others must be 630 // valid registers. Returns tagged pointer in result register, or 631 // jumps to gc_required if new space is full. 632 void AllocateHeapNumber(Register result, 633 Register scratch1, 634 Register scratch2, 635 Label* gc_required); 636 637 // Allocate a sequential string. All the header fields of the string object 638 // are initialized. 639 void AllocateTwoByteString(Register result, 640 Register length, 641 Register scratch1, 642 Register scratch2, 643 Register scratch3, 644 Label* gc_required); 645 void AllocateAsciiString(Register result, 646 Register length, 647 Register scratch1, 648 Register scratch2, 649 Register scratch3, 650 Label* gc_required); 651 void AllocateAsciiString(Register result, 652 int length, 653 Register scratch1, 654 Register scratch2, 655 Label* gc_required); 656 657 // Allocate a raw cons string object. Only the map field of the result is 658 // initialized. 659 void AllocateTwoByteConsString(Register result, 660 Register scratch1, 661 Register scratch2, 662 Label* gc_required); 663 void AllocateAsciiConsString(Register result, 664 Register scratch1, 665 Register scratch2, 666 Label* gc_required); 667 668 // Allocate a raw sliced string object. Only the map field of the result is 669 // initialized. 670 void AllocateTwoByteSlicedString(Register result, 671 Register scratch1, 672 Register scratch2, 673 Label* gc_required); 674 void AllocateAsciiSlicedString(Register result, 675 Register scratch1, 676 Register scratch2, 677 Label* gc_required); 678 679 // Copy memory, byte-by-byte, from source to destination. Not optimized for 680 // long or aligned copies. 681 // The contents of index and scratch are destroyed. 682 void CopyBytes(Register source, 683 Register destination, 684 Register length, 685 Register scratch); 686 687 // Initialize fields with filler values. Fields starting at |start_offset| 688 // not including end_offset are overwritten with the value in |filler|. At 689 // the end the loop, |start_offset| takes the value of |end_offset|. 690 void InitializeFieldsWithFiller(Register start_offset, 691 Register end_offset, 692 Register filler); 693 694 // --------------------------------------------------------------------------- 695 // Support functions. 696 697 // Check a boolean-bit of a Smi field. 698 void BooleanBitTest(Register object, int field_offset, int bit_index); 699 700 // Check if result is zero and op is negative. 701 void NegativeZeroTest(Register result, Register op, Label* then_label); 702 703 // Check if result is zero and any of op1 and op2 are negative. 704 // Register scratch is destroyed, and it must be different from op2. 705 void NegativeZeroTest(Register result, Register op1, Register op2, 706 Register scratch, Label* then_label); 707 708 // Try to get function prototype of a function and puts the value in 709 // the result register. Checks that the function really is a 710 // function and jumps to the miss label if the fast checks fail. The 711 // function register will be untouched; the other registers may be 712 // clobbered. 713 void TryGetFunctionPrototype(Register function, 714 Register result, 715 Register scratch, 716 Label* miss, 717 bool miss_on_bound_function = false); 718 719 // Generates code for reporting that an illegal operation has 720 // occurred. 721 void IllegalOperation(int num_arguments); 722 723 // Picks out an array index from the hash field. 724 // Register use: 725 // hash - holds the index's hash. Clobbered. 726 // index - holds the overwritten index on exit. 727 void IndexFromHash(Register hash, Register index); 728 729 // --------------------------------------------------------------------------- 730 // Runtime calls 731 732 // Call a code stub. Generate the code if necessary. 733 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); 734 735 // Tail call a code stub (jump). Generate the code if necessary. 736 void TailCallStub(CodeStub* stub); 737 738 // Return from a code stub after popping its arguments. 739 void StubReturn(int argc); 740 741 // Call a runtime routine. 742 void CallRuntime(const Runtime::Function* f, int num_arguments); 743 void CallRuntimeSaveDoubles(Runtime::FunctionId id); 744 745 // Convenience function: Same as above, but takes the fid instead. 746 void CallRuntime(Runtime::FunctionId id, int num_arguments); 747 748 // Convenience function: call an external reference. 749 void CallExternalReference(ExternalReference ref, int num_arguments); 750 751 // Tail call of a runtime routine (jump). 752 // Like JumpToExternalReference, but also takes care of passing the number 753 // of parameters. 754 void TailCallExternalReference(const ExternalReference& ext, 755 int num_arguments, 756 int result_size); 757 758 // Convenience function: tail call a runtime routine (jump). 759 void TailCallRuntime(Runtime::FunctionId fid, 760 int num_arguments, 761 int result_size); 762 763 // Before calling a C-function from generated code, align arguments on stack. 764 // After aligning the frame, arguments must be stored in esp[0], esp[4], 765 // etc., not pushed. The argument count assumes all arguments are word sized. 766 // Some compilers/platforms require the stack to be aligned when calling 767 // C++ code. 768 // Needs a scratch register to do some arithmetic. This register will be 769 // trashed. 770 void PrepareCallCFunction(int num_arguments, Register scratch); 771 772 // Calls a C function and cleans up the space for arguments allocated 773 // by PrepareCallCFunction. The called function is not allowed to trigger a 774 // garbage collection, since that might move the code and invalidate the 775 // return address (unless this is somehow accounted for by the called 776 // function). 777 void CallCFunction(ExternalReference function, int num_arguments); 778 void CallCFunction(Register function, int num_arguments); 779 780 // Prepares stack to put arguments (aligns and so on). Reserves 781 // space for return value if needed (assumes the return value is a handle). 782 // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1) 783 // etc. Saves context (esi). If space was reserved for return value then 784 // stores the pointer to the reserved slot into esi. 785 void PrepareCallApiFunction(int argc, bool returns_handle); 786 787 // Calls an API function. Allocates HandleScope, extracts returned value 788 // from handle and propagates exceptions. Clobbers ebx, edi and 789 // caller-save registers. Restores context. On return removes 790 // stack_space * kPointerSize (GCed). 791 void CallApiFunctionAndReturn(Address function_address, 792 Address thunk_address, 793 Operand thunk_last_arg, 794 int stack_space, 795 bool returns_handle, 796 int return_value_offset_from_ebp); 797 798 // Jump to a runtime routine. 799 void JumpToExternalReference(const ExternalReference& ext); 800 801 // --------------------------------------------------------------------------- 802 // Utilities 803 804 void Ret(); 805 806 // Return and drop arguments from stack, where the number of arguments 807 // may be bigger than 2^16 - 1. Requires a scratch register. 808 void Ret(int bytes_dropped, Register scratch); 809 810 // Emit code to discard a non-negative number of pointer-sized elements 811 // from the stack, clobbering only the esp register. 812 void Drop(int element_count); 813 814 void Call(Label* target) { call(target); } 815 void Push(Register src) { push(src); } 816 void Pop(Register dst) { pop(dst); } 817 818 // Emit call to the code we are currently generating. 819 void CallSelf() { 820 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location())); 821 call(self, RelocInfo::CODE_TARGET); 822 } 823 824 // Move if the registers are not identical. 825 void Move(Register target, Register source); 826 827 // Push a handle value. 828 void Push(Handle<Object> handle) { push(Immediate(handle)); } 829 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 830 831 Handle<Object> CodeObject() { 832 ASSERT(!code_object_.is_null()); 833 return code_object_; 834 } 835 836 // Insert code to verify that the x87 stack has the specified depth (0-7) 837 void VerifyX87StackDepth(uint32_t depth); 838 839 // --------------------------------------------------------------------------- 840 // StatsCounter support 841 842 void SetCounter(StatsCounter* counter, int value); 843 void IncrementCounter(StatsCounter* counter, int value); 844 void DecrementCounter(StatsCounter* counter, int value); 845 void IncrementCounter(Condition cc, StatsCounter* counter, int value); 846 void DecrementCounter(Condition cc, StatsCounter* counter, int value); 847 848 849 // --------------------------------------------------------------------------- 850 // Debugging 851 852 // Calls Abort(msg) if the condition cc is not satisfied. 853 // Use --debug_code to enable. 854 void Assert(Condition cc, BailoutReason reason); 855 856 void AssertFastElements(Register elements); 857 858 // Like Assert(), but always enabled. 859 void Check(Condition cc, BailoutReason reason); 860 861 // Print a message to stdout and abort execution. 862 void Abort(BailoutReason reason); 863 864 // Check that the stack is aligned. 865 void CheckStackAlignment(); 866 867 // Verify restrictions about code generated in stubs. 868 void set_generating_stub(bool value) { generating_stub_ = value; } 869 bool generating_stub() { return generating_stub_; } 870 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } 871 bool allow_stub_calls() { return allow_stub_calls_; } 872 void set_has_frame(bool value) { has_frame_ = value; } 873 bool has_frame() { return has_frame_; } 874 inline bool AllowThisStubCall(CodeStub* stub); 875 876 // --------------------------------------------------------------------------- 877 // String utilities. 878 879 // Check whether the instance type represents a flat ASCII string. Jump to the 880 // label if not. If the instance type can be scratched specify same register 881 // for both instance type and scratch. 882 void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type, 883 Register scratch, 884 Label* on_not_flat_ascii_string); 885 886 // Checks if both objects are sequential ASCII strings, and jumps to label 887 // if either is not. 888 void JumpIfNotBothSequentialAsciiStrings(Register object1, 889 Register object2, 890 Register scratch1, 891 Register scratch2, 892 Label* on_not_flat_ascii_strings); 893 894 // Checks if the given register or operand is a unique name 895 void JumpIfNotUniqueName(Register reg, Label* not_unique_name, 896 Label::Distance distance = Label::kFar) { 897 JumpIfNotUniqueName(Operand(reg), not_unique_name, distance); 898 } 899 900 void JumpIfNotUniqueName(Operand operand, Label* not_unique_name, 901 Label::Distance distance = Label::kFar); 902 903 static int SafepointRegisterStackIndex(Register reg) { 904 return SafepointRegisterStackIndex(reg.code()); 905 } 906 907 // Activation support. 908 void EnterFrame(StackFrame::Type type); 909 void LeaveFrame(StackFrame::Type type); 910 911 // Expects object in eax and returns map with validated enum cache 912 // in eax. Assumes that any other register can be used as a scratch. 913 void CheckEnumCache(Label* call_runtime); 914 915 // AllocationMemento support. Arrays may have an associated 916 // AllocationMemento object that can be checked for in order to pretransition 917 // to another type. 918 // On entry, receiver_reg should point to the array object. 919 // scratch_reg gets clobbered. 920 // If allocation info is present, conditional code is set to equal 921 void TestJSArrayForAllocationMemento(Register receiver_reg, 922 Register scratch_reg); 923 924 private: 925 bool generating_stub_; 926 bool allow_stub_calls_; 927 bool has_frame_; 928 // This handle will be patched with the code object on installation. 929 Handle<Object> code_object_; 930 931 // Helper functions for generating invokes. 932 void InvokePrologue(const ParameterCount& expected, 933 const ParameterCount& actual, 934 Handle<Code> code_constant, 935 const Operand& code_operand, 936 Label* done, 937 bool* definitely_mismatches, 938 InvokeFlag flag, 939 Label::Distance done_distance, 940 const CallWrapper& call_wrapper = NullCallWrapper(), 941 CallKind call_kind = CALL_AS_METHOD); 942 943 void EnterExitFramePrologue(); 944 void EnterExitFrameEpilogue(int argc, bool save_doubles); 945 946 void LeaveExitFrameEpilogue(); 947 948 // Allocation support helpers. 949 void LoadAllocationTopHelper(Register result, 950 Register scratch, 951 AllocationFlags flags); 952 953 void UpdateAllocationTopHelper(Register result_end, 954 Register scratch, 955 AllocationFlags flags); 956 957 // Helper for PopHandleScope. Allowed to perform a GC and returns 958 // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and 959 // possibly returns a failure object indicating an allocation failure. 960 MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved, 961 Register scratch, 962 bool gc_allowed); 963 964 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 965 void InNewSpace(Register object, 966 Register scratch, 967 Condition cc, 968 Label* condition_met, 969 Label::Distance condition_met_distance = Label::kFar); 970 971 // Helper for finding the mark bits for an address. Afterwards, the 972 // bitmap register points at the word with the mark bits and the mask 973 // the position of the first bit. Uses ecx as scratch and leaves addr_reg 974 // unchanged. 975 inline void GetMarkBits(Register addr_reg, 976 Register bitmap_reg, 977 Register mask_reg); 978 979 // Helper for throwing exceptions. Compute a handler address and jump to 980 // it. See the implementation for register usage. 981 void JumpToHandlerEntry(); 982 983 // Compute memory operands for safepoint stack slots. 984 Operand SafepointRegisterSlot(Register reg); 985 static int SafepointRegisterStackIndex(int reg_code); 986 987 // Needs access to SafepointRegisterStackIndex for compiled frame 988 // traversal. 989 friend class StandardFrame; 990 }; 991 992 993 // The code patcher is used to patch (typically) small parts of code e.g. for 994 // debugging and other types of instrumentation. When using the code patcher 995 // the exact number of bytes specified must be emitted. Is not legal to emit 996 // relocation information. If any of these constraints are violated it causes 997 // an assertion. 998 class CodePatcher { 999 public: 1000 CodePatcher(byte* address, int size); 1001 virtual ~CodePatcher(); 1002 1003 // Macro assembler to emit code. 1004 MacroAssembler* masm() { return &masm_; } 1005 1006 private: 1007 byte* address_; // The address of the code being patched. 1008 int size_; // Number of bytes of the expected patch size. 1009 MacroAssembler masm_; // Macro assembler used to generate the code. 1010 }; 1011 1012 1013 // ----------------------------------------------------------------------------- 1014 // Static helper functions. 1015 1016 // Generate an Operand for loading a field from an object. 1017 inline Operand FieldOperand(Register object, int offset) { 1018 return Operand(object, offset - kHeapObjectTag); 1019 } 1020 1021 1022 // Generate an Operand for loading an indexed field from an object. 1023 inline Operand FieldOperand(Register object, 1024 Register index, 1025 ScaleFactor scale, 1026 int offset) { 1027 return Operand(object, index, scale, offset - kHeapObjectTag); 1028 } 1029 1030 1031 inline Operand ContextOperand(Register context, int index) { 1032 return Operand(context, Context::SlotOffset(index)); 1033 } 1034 1035 1036 inline Operand GlobalObjectOperand() { 1037 return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX); 1038 } 1039 1040 1041 // Generates an Operand for saving parameters after PrepareCallApiFunction. 1042 Operand ApiParameterOperand(int index, bool returns_handle); 1043 1044 1045 #ifdef GENERATED_CODE_COVERAGE 1046 extern void LogGeneratedCodeCoverage(const char* file_line); 1047 #define CODE_COVERAGE_STRINGIFY(x) #x 1048 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1049 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1050 #define ACCESS_MASM(masm) { \ 1051 byte* ia32_coverage_function = \ 1052 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \ 1053 masm->pushfd(); \ 1054 masm->pushad(); \ 1055 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \ 1056 masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \ 1057 masm->pop(eax); \ 1058 masm->popad(); \ 1059 masm->popfd(); \ 1060 } \ 1061 masm-> 1062 #else 1063 #define ACCESS_MASM(masm) masm-> 1064 #endif 1065 1066 1067 } } // namespace v8::internal 1068 1069 #endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_ 1070