Home | History | Annotate | Download | only in ppc
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include <assert.h>  // For assert
      6 #include <limits.h>  // For LONG_MIN, LONG_MAX.
      7 
      8 #if V8_TARGET_ARCH_PPC
      9 
     10 #include "src/base/bits.h"
     11 #include "src/base/division-by-constant.h"
     12 #include "src/bootstrapper.h"
     13 #include "src/codegen.h"
     14 #include "src/debug/debug.h"
     15 #include "src/register-configuration.h"
     16 #include "src/runtime/runtime.h"
     17 
     18 #include "src/ppc/macro-assembler-ppc.h"
     19 
     20 namespace v8 {
     21 namespace internal {
     22 
     23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
     24                                CodeObjectRequired create_code_object)
     25     : Assembler(arg_isolate, buffer, size),
     26       generating_stub_(false),
     27       has_frame_(false) {
     28   if (create_code_object == CodeObjectRequired::kYes) {
     29     code_object_ =
     30         Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
     31   }
     32 }
     33 
     34 
     35 void MacroAssembler::Jump(Register target) {
     36   mtctr(target);
     37   bctr();
     38 }
     39 
     40 
     41 void MacroAssembler::JumpToJSEntry(Register target) {
     42   Move(ip, target);
     43   Jump(ip);
     44 }
     45 
     46 
     47 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
     48                           Condition cond, CRegister cr) {
     49   Label skip;
     50 
     51   if (cond != al) b(NegateCondition(cond), &skip, cr);
     52 
     53   DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
     54 
     55   mov(ip, Operand(target, rmode));
     56   mtctr(ip);
     57   bctr();
     58 
     59   bind(&skip);
     60 }
     61 
     62 
     63 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
     64                           CRegister cr) {
     65   DCHECK(!RelocInfo::IsCodeTarget(rmode));
     66   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
     67 }
     68 
     69 
     70 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
     71                           Condition cond) {
     72   DCHECK(RelocInfo::IsCodeTarget(rmode));
     73   // 'code' is always generated ppc code, never THUMB code
     74   AllowDeferredHandleDereference embedding_raw_address;
     75   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
     76 }
     77 
     78 
     79 int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
     80 
     81 
     82 void MacroAssembler::Call(Register target) {
     83   BlockTrampolinePoolScope block_trampoline_pool(this);
     84   Label start;
     85   bind(&start);
     86 
     87   // branch via link register and set LK bit for return point
     88   mtctr(target);
     89   bctrl();
     90 
     91   DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
     92 }
     93 
     94 
     95 void MacroAssembler::CallJSEntry(Register target) {
     96   DCHECK(target.is(ip));
     97   Call(target);
     98 }
     99 
    100 
    101 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
    102                              Condition cond) {
    103   Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
    104   return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
    105 }
    106 
    107 
    108 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
    109                                                    RelocInfo::Mode rmode,
    110                                                    Condition cond) {
    111   return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
    112 }
    113 
    114 
    115 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
    116                           Condition cond) {
    117   BlockTrampolinePoolScope block_trampoline_pool(this);
    118   DCHECK(cond == al);
    119 
    120 #ifdef DEBUG
    121   // Check the expected size before generating code to ensure we assume the same
    122   // constant pool availability (e.g., whether constant pool is full or not).
    123   int expected_size = CallSize(target, rmode, cond);
    124   Label start;
    125   bind(&start);
    126 #endif
    127   // This can likely be optimized to make use of bc() with 24bit relative
    128   //
    129   // RecordRelocInfo(x.rmode_, x.imm_);
    130   // bc( BA, .... offset, LKset);
    131   //
    132 
    133   mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
    134   mtctr(ip);
    135   bctrl();
    136 
    137   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
    138 }
    139 
    140 
    141 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
    142                              TypeFeedbackId ast_id, Condition cond) {
    143   AllowDeferredHandleDereference using_raw_address;
    144   return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
    145 }
    146 
    147 
    148 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
    149                           TypeFeedbackId ast_id, Condition cond) {
    150   BlockTrampolinePoolScope block_trampoline_pool(this);
    151   DCHECK(RelocInfo::IsCodeTarget(rmode));
    152 
    153 #ifdef DEBUG
    154   // Check the expected size before generating code to ensure we assume the same
    155   // constant pool availability (e.g., whether constant pool is full or not).
    156   int expected_size = CallSize(code, rmode, ast_id, cond);
    157   Label start;
    158   bind(&start);
    159 #endif
    160 
    161   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
    162     SetRecordedAstId(ast_id);
    163     rmode = RelocInfo::CODE_TARGET_WITH_ID;
    164   }
    165   AllowDeferredHandleDereference using_raw_address;
    166   Call(reinterpret_cast<Address>(code.location()), rmode, cond);
    167   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
    168 }
    169 
    170 
    171 void MacroAssembler::Drop(int count) {
    172   if (count > 0) {
    173     Add(sp, sp, count * kPointerSize, r0);
    174   }
    175 }
    176 
    177 void MacroAssembler::Drop(Register count, Register scratch) {
    178   ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
    179   add(sp, sp, scratch);
    180 }
    181 
    182 void MacroAssembler::Call(Label* target) { b(target, SetLK); }
    183 
    184 
    185 void MacroAssembler::Push(Handle<Object> handle) {
    186   mov(r0, Operand(handle));
    187   push(r0);
    188 }
    189 
    190 
    191 void MacroAssembler::Move(Register dst, Handle<Object> value) {
    192   AllowDeferredHandleDereference smi_check;
    193   if (value->IsSmi()) {
    194     LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
    195   } else {
    196     DCHECK(value->IsHeapObject());
    197     if (isolate()->heap()->InNewSpace(*value)) {
    198       Handle<Cell> cell = isolate()->factory()->NewCell(value);
    199       mov(dst, Operand(cell));
    200       LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
    201     } else {
    202       mov(dst, Operand(value));
    203     }
    204   }
    205 }
    206 
    207 
    208 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
    209   DCHECK(cond == al);
    210   if (!dst.is(src)) {
    211     mr(dst, src);
    212   }
    213 }
    214 
    215 
    216 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
    217   if (!dst.is(src)) {
    218     fmr(dst, src);
    219   }
    220 }
    221 
    222 
    223 void MacroAssembler::MultiPush(RegList regs, Register location) {
    224   int16_t num_to_push = NumberOfBitsSet(regs);
    225   int16_t stack_offset = num_to_push * kPointerSize;
    226 
    227   subi(location, location, Operand(stack_offset));
    228   for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
    229     if ((regs & (1 << i)) != 0) {
    230       stack_offset -= kPointerSize;
    231       StoreP(ToRegister(i), MemOperand(location, stack_offset));
    232     }
    233   }
    234 }
    235 
    236 
    237 void MacroAssembler::MultiPop(RegList regs, Register location) {
    238   int16_t stack_offset = 0;
    239 
    240   for (int16_t i = 0; i < Register::kNumRegisters; i++) {
    241     if ((regs & (1 << i)) != 0) {
    242       LoadP(ToRegister(i), MemOperand(location, stack_offset));
    243       stack_offset += kPointerSize;
    244     }
    245   }
    246   addi(location, location, Operand(stack_offset));
    247 }
    248 
    249 
    250 void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
    251   int16_t num_to_push = NumberOfBitsSet(dregs);
    252   int16_t stack_offset = num_to_push * kDoubleSize;
    253 
    254   subi(location, location, Operand(stack_offset));
    255   for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
    256     if ((dregs & (1 << i)) != 0) {
    257       DoubleRegister dreg = DoubleRegister::from_code(i);
    258       stack_offset -= kDoubleSize;
    259       stfd(dreg, MemOperand(location, stack_offset));
    260     }
    261   }
    262 }
    263 
    264 
    265 void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
    266   int16_t stack_offset = 0;
    267 
    268   for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
    269     if ((dregs & (1 << i)) != 0) {
    270       DoubleRegister dreg = DoubleRegister::from_code(i);
    271       lfd(dreg, MemOperand(location, stack_offset));
    272       stack_offset += kDoubleSize;
    273     }
    274   }
    275   addi(location, location, Operand(stack_offset));
    276 }
    277 
    278 
    279 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
    280                               Condition cond) {
    281   DCHECK(cond == al);
    282   LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
    283 }
    284 
    285 
    286 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
    287                                Condition cond) {
    288   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
    289   DCHECK(cond == al);
    290   StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
    291 }
    292 
    293 
    294 void MacroAssembler::InNewSpace(Register object, Register scratch,
    295                                 Condition cond, Label* branch) {
    296   DCHECK(cond == eq || cond == ne);
    297   const int mask =
    298       (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
    299   CheckPageFlag(object, scratch, mask, cond, branch);
    300 }
    301 
    302 
    303 void MacroAssembler::RecordWriteField(
    304     Register object, int offset, Register value, Register dst,
    305     LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
    306     RememberedSetAction remembered_set_action, SmiCheck smi_check,
    307     PointersToHereCheck pointers_to_here_check_for_value) {
    308   // First, check if a write barrier is even needed. The tests below
    309   // catch stores of Smis.
    310   Label done;
    311 
    312   // Skip barrier if writing a smi.
    313   if (smi_check == INLINE_SMI_CHECK) {
    314     JumpIfSmi(value, &done);
    315   }
    316 
    317   // Although the object register is tagged, the offset is relative to the start
    318   // of the object, so so offset must be a multiple of kPointerSize.
    319   DCHECK(IsAligned(offset, kPointerSize));
    320 
    321   Add(dst, object, offset - kHeapObjectTag, r0);
    322   if (emit_debug_code()) {
    323     Label ok;
    324     andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
    325     beq(&ok, cr0);
    326     stop("Unaligned cell in write barrier");
    327     bind(&ok);
    328   }
    329 
    330   RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
    331               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
    332 
    333   bind(&done);
    334 
    335   // Clobber clobbered input registers when running with the debug-code flag
    336   // turned on to provoke errors.
    337   if (emit_debug_code()) {
    338     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
    339     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
    340   }
    341 }
    342 
    343 
    344 // Will clobber 4 registers: object, map, dst, ip.  The
    345 // register 'object' contains a heap object pointer.
    346 void MacroAssembler::RecordWriteForMap(Register object, Register map,
    347                                        Register dst,
    348                                        LinkRegisterStatus lr_status,
    349                                        SaveFPRegsMode fp_mode) {
    350   if (emit_debug_code()) {
    351     LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
    352     Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
    353     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
    354   }
    355 
    356   if (!FLAG_incremental_marking) {
    357     return;
    358   }
    359 
    360   if (emit_debug_code()) {
    361     LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
    362     cmp(ip, map);
    363     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
    364   }
    365 
    366   Label done;
    367 
    368   // A single check of the map's pages interesting flag suffices, since it is
    369   // only set during incremental collection, and then it's also guaranteed that
    370   // the from object's page's interesting flag is also set.  This optimization
    371   // relies on the fact that maps can never be in new space.
    372   CheckPageFlag(map,
    373                 map,  // Used as scratch.
    374                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
    375 
    376   addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
    377   if (emit_debug_code()) {
    378     Label ok;
    379     andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
    380     beq(&ok, cr0);
    381     stop("Unaligned cell in write barrier");
    382     bind(&ok);
    383   }
    384 
    385   // Record the actual write.
    386   if (lr_status == kLRHasNotBeenSaved) {
    387     mflr(r0);
    388     push(r0);
    389   }
    390   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
    391                        fp_mode);
    392   CallStub(&stub);
    393   if (lr_status == kLRHasNotBeenSaved) {
    394     pop(r0);
    395     mtlr(r0);
    396   }
    397 
    398   bind(&done);
    399 
    400   // Count number of write barriers in generated code.
    401   isolate()->counters()->write_barriers_static()->Increment();
    402   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
    403 
    404   // Clobber clobbered registers when running with the debug-code flag
    405   // turned on to provoke errors.
    406   if (emit_debug_code()) {
    407     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
    408     mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
    409   }
    410 }
    411 
    412 
    413 // Will clobber 4 registers: object, address, scratch, ip.  The
    414 // register 'object' contains a heap object pointer.  The heap object
    415 // tag is shifted away.
    416 void MacroAssembler::RecordWrite(
    417     Register object, Register address, Register value,
    418     LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
    419     RememberedSetAction remembered_set_action, SmiCheck smi_check,
    420     PointersToHereCheck pointers_to_here_check_for_value) {
    421   DCHECK(!object.is(value));
    422   if (emit_debug_code()) {
    423     LoadP(r0, MemOperand(address));
    424     cmp(r0, value);
    425     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
    426   }
    427 
    428   if (remembered_set_action == OMIT_REMEMBERED_SET &&
    429       !FLAG_incremental_marking) {
    430     return;
    431   }
    432 
    433   // First, check if a write barrier is even needed. The tests below
    434   // catch stores of smis and stores into the young generation.
    435   Label done;
    436 
    437   if (smi_check == INLINE_SMI_CHECK) {
    438     JumpIfSmi(value, &done);
    439   }
    440 
    441   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
    442     CheckPageFlag(value,
    443                   value,  // Used as scratch.
    444                   MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
    445   }
    446   CheckPageFlag(object,
    447                 value,  // Used as scratch.
    448                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
    449 
    450   // Record the actual write.
    451   if (lr_status == kLRHasNotBeenSaved) {
    452     mflr(r0);
    453     push(r0);
    454   }
    455   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
    456                        fp_mode);
    457   CallStub(&stub);
    458   if (lr_status == kLRHasNotBeenSaved) {
    459     pop(r0);
    460     mtlr(r0);
    461   }
    462 
    463   bind(&done);
    464 
    465   // Count number of write barriers in generated code.
    466   isolate()->counters()->write_barriers_static()->Increment();
    467   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
    468                    value);
    469 
    470   // Clobber clobbered registers when running with the debug-code flag
    471   // turned on to provoke errors.
    472   if (emit_debug_code()) {
    473     mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
    474     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
    475   }
    476 }
    477 
    478 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
    479                                                Register code_entry,
    480                                                Register scratch) {
    481   const int offset = JSFunction::kCodeEntryOffset;
    482 
    483   // Since a code entry (value) is always in old space, we don't need to update
    484   // remembered set. If incremental marking is off, there is nothing for us to
    485   // do.
    486   if (!FLAG_incremental_marking) return;
    487 
    488   DCHECK(js_function.is(r4));
    489   DCHECK(code_entry.is(r7));
    490   DCHECK(scratch.is(r8));
    491   AssertNotSmi(js_function);
    492 
    493   if (emit_debug_code()) {
    494     addi(scratch, js_function, Operand(offset - kHeapObjectTag));
    495     LoadP(ip, MemOperand(scratch));
    496     cmp(ip, code_entry);
    497     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
    498   }
    499 
    500   // First, check if a write barrier is even needed. The tests below
    501   // catch stores of Smis and stores into young gen.
    502   Label done;
    503 
    504   CheckPageFlag(code_entry, scratch,
    505                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
    506   CheckPageFlag(js_function, scratch,
    507                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
    508 
    509   const Register dst = scratch;
    510   addi(dst, js_function, Operand(offset - kHeapObjectTag));
    511 
    512   // Save caller-saved registers.  js_function and code_entry are in the
    513   // caller-saved register list.
    514   DCHECK(kJSCallerSaved & js_function.bit());
    515   DCHECK(kJSCallerSaved & code_entry.bit());
    516   mflr(r0);
    517   MultiPush(kJSCallerSaved | r0.bit());
    518 
    519   int argument_count = 3;
    520   PrepareCallCFunction(argument_count, code_entry);
    521 
    522   mr(r3, js_function);
    523   mr(r4, dst);
    524   mov(r5, Operand(ExternalReference::isolate_address(isolate())));
    525 
    526   {
    527     AllowExternalCallThatCantCauseGC scope(this);
    528     CallCFunction(
    529         ExternalReference::incremental_marking_record_write_code_entry_function(
    530             isolate()),
    531         argument_count);
    532   }
    533 
    534   // Restore caller-saved registers (including js_function and code_entry).
    535   MultiPop(kJSCallerSaved | r0.bit());
    536   mtlr(r0);
    537 
    538   bind(&done);
    539 }
    540 
    541 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
    542                                          Register address, Register scratch,
    543                                          SaveFPRegsMode fp_mode,
    544                                          RememberedSetFinalAction and_then) {
    545   Label done;
    546   if (emit_debug_code()) {
    547     Label ok;
    548     JumpIfNotInNewSpace(object, scratch, &ok);
    549     stop("Remembered set pointer is in new space");
    550     bind(&ok);
    551   }
    552   // Load store buffer top.
    553   ExternalReference store_buffer =
    554       ExternalReference::store_buffer_top(isolate());
    555   mov(ip, Operand(store_buffer));
    556   LoadP(scratch, MemOperand(ip));
    557   // Store pointer to buffer and increment buffer top.
    558   StoreP(address, MemOperand(scratch));
    559   addi(scratch, scratch, Operand(kPointerSize));
    560   // Write back new top of buffer.
    561   StoreP(scratch, MemOperand(ip));
    562   // Call stub on end of buffer.
    563   // Check for end of buffer.
    564   TestBitMask(scratch, StoreBuffer::kStoreBufferMask, r0);
    565 
    566   if (and_then == kFallThroughAtEnd) {
    567     bne(&done, cr0);
    568   } else {
    569     DCHECK(and_then == kReturnAtEnd);
    570     Ret(ne, cr0);
    571   }
    572   mflr(r0);
    573   push(r0);
    574   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
    575   CallStub(&store_buffer_overflow);
    576   pop(r0);
    577   mtlr(r0);
    578   bind(&done);
    579   if (and_then == kReturnAtEnd) {
    580     Ret();
    581   }
    582 }
    583 
    584 void MacroAssembler::PushCommonFrame(Register marker_reg) {
    585   int fp_delta = 0;
    586   mflr(r0);
    587   if (FLAG_enable_embedded_constant_pool) {
    588     if (marker_reg.is_valid()) {
    589       Push(r0, fp, kConstantPoolRegister, marker_reg);
    590       fp_delta = 2;
    591     } else {
    592       Push(r0, fp, kConstantPoolRegister);
    593       fp_delta = 1;
    594     }
    595   } else {
    596     if (marker_reg.is_valid()) {
    597       Push(r0, fp, marker_reg);
    598       fp_delta = 1;
    599     } else {
    600       Push(r0, fp);
    601       fp_delta = 0;
    602     }
    603   }
    604   addi(fp, sp, Operand(fp_delta * kPointerSize));
    605 }
    606 
    607 void MacroAssembler::PopCommonFrame(Register marker_reg) {
    608   if (FLAG_enable_embedded_constant_pool) {
    609     if (marker_reg.is_valid()) {
    610       Pop(r0, fp, kConstantPoolRegister, marker_reg);
    611     } else {
    612       Pop(r0, fp, kConstantPoolRegister);
    613     }
    614   } else {
    615     if (marker_reg.is_valid()) {
    616       Pop(r0, fp, marker_reg);
    617     } else {
    618       Pop(r0, fp);
    619     }
    620   }
    621   mtlr(r0);
    622 }
    623 
    624 void MacroAssembler::PushStandardFrame(Register function_reg) {
    625   int fp_delta = 0;
    626   mflr(r0);
    627   if (FLAG_enable_embedded_constant_pool) {
    628     if (function_reg.is_valid()) {
    629       Push(r0, fp, kConstantPoolRegister, cp, function_reg);
    630       fp_delta = 3;
    631     } else {
    632       Push(r0, fp, kConstantPoolRegister, cp);
    633       fp_delta = 2;
    634     }
    635   } else {
    636     if (function_reg.is_valid()) {
    637       Push(r0, fp, cp, function_reg);
    638       fp_delta = 2;
    639     } else {
    640       Push(r0, fp, cp);
    641       fp_delta = 1;
    642     }
    643   }
    644   addi(fp, sp, Operand(fp_delta * kPointerSize));
    645 }
    646 
    647 void MacroAssembler::RestoreFrameStateForTailCall() {
    648   if (FLAG_enable_embedded_constant_pool) {
    649     LoadP(kConstantPoolRegister,
    650           MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
    651     set_constant_pool_available(false);
    652   }
    653   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
    654   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
    655   mtlr(r0);
    656 }
    657 
    658 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
    659 const int MacroAssembler::kNumSafepointSavedRegisters =
    660     Register::kNumAllocatable;
    661 
    662 // Push and pop all registers that can hold pointers.
    663 void MacroAssembler::PushSafepointRegisters() {
    664   // Safepoints expect a block of kNumSafepointRegisters values on the
    665   // stack, so adjust the stack for unsaved registers.
    666   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
    667   DCHECK(num_unsaved >= 0);
    668   if (num_unsaved > 0) {
    669     subi(sp, sp, Operand(num_unsaved * kPointerSize));
    670   }
    671   MultiPush(kSafepointSavedRegisters);
    672 }
    673 
    674 
    675 void MacroAssembler::PopSafepointRegisters() {
    676   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
    677   MultiPop(kSafepointSavedRegisters);
    678   if (num_unsaved > 0) {
    679     addi(sp, sp, Operand(num_unsaved * kPointerSize));
    680   }
    681 }
    682 
    683 
    684 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
    685   StoreP(src, SafepointRegisterSlot(dst));
    686 }
    687 
    688 
    689 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
    690   LoadP(dst, SafepointRegisterSlot(src));
    691 }
    692 
    693 
    694 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
    695   // The registers are pushed starting with the highest encoding,
    696   // which means that lowest encodings are closest to the stack pointer.
    697   RegList regs = kSafepointSavedRegisters;
    698   int index = 0;
    699 
    700   DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
    701 
    702   for (int16_t i = 0; i < reg_code; i++) {
    703     if ((regs & (1 << i)) != 0) {
    704       index++;
    705     }
    706   }
    707 
    708   return index;
    709 }
    710 
    711 
    712 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
    713   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
    714 }
    715 
    716 
    717 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
    718   // General purpose registers are pushed last on the stack.
    719   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
    720   int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
    721   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
    722   return MemOperand(sp, doubles_size + register_offset);
    723 }
    724 
    725 
    726 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
    727                                      const DoubleRegister src) {
    728   // Turn potential sNaN into qNaN.
    729   fsub(dst, src, kDoubleRegZero);
    730 }
    731 
    732 void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
    733   MovIntToDouble(dst, src, r0);
    734   fcfid(dst, dst);
    735 }
    736 
    737 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
    738                                                 DoubleRegister dst) {
    739   MovUnsignedIntToDouble(dst, src, r0);
    740   fcfid(dst, dst);
    741 }
    742 
    743 void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
    744   MovIntToDouble(dst, src, r0);
    745   fcfids(dst, dst);
    746 }
    747 
    748 void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
    749                                                DoubleRegister dst) {
    750   MovUnsignedIntToDouble(dst, src, r0);
    751   fcfids(dst, dst);
    752 }
    753 
    754 #if V8_TARGET_ARCH_PPC64
    755 void MacroAssembler::ConvertInt64ToDouble(Register src,
    756                                           DoubleRegister double_dst) {
    757   MovInt64ToDouble(double_dst, src);
    758   fcfid(double_dst, double_dst);
    759 }
    760 
    761 
    762 void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
    763                                                  DoubleRegister double_dst) {
    764   MovInt64ToDouble(double_dst, src);
    765   fcfidus(double_dst, double_dst);
    766 }
    767 
    768 
    769 void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
    770                                                   DoubleRegister double_dst) {
    771   MovInt64ToDouble(double_dst, src);
    772   fcfidu(double_dst, double_dst);
    773 }
    774 
    775 
    776 void MacroAssembler::ConvertInt64ToFloat(Register src,
    777                                          DoubleRegister double_dst) {
    778   MovInt64ToDouble(double_dst, src);
    779   fcfids(double_dst, double_dst);
    780 }
    781 #endif
    782 
    783 
    784 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
    785 #if !V8_TARGET_ARCH_PPC64
    786                                           const Register dst_hi,
    787 #endif
    788                                           const Register dst,
    789                                           const DoubleRegister double_dst,
    790                                           FPRoundingMode rounding_mode) {
    791   if (rounding_mode == kRoundToZero) {
    792     fctidz(double_dst, double_input);
    793   } else {
    794     SetRoundingMode(rounding_mode);
    795     fctid(double_dst, double_input);
    796     ResetRoundingMode();
    797   }
    798 
    799   MovDoubleToInt64(
    800 #if !V8_TARGET_ARCH_PPC64
    801       dst_hi,
    802 #endif
    803       dst, double_dst);
    804 }
    805 
    806 #if V8_TARGET_ARCH_PPC64
    807 void MacroAssembler::ConvertDoubleToUnsignedInt64(
    808     const DoubleRegister double_input, const Register dst,
    809     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
    810   if (rounding_mode == kRoundToZero) {
    811     fctiduz(double_dst, double_input);
    812   } else {
    813     SetRoundingMode(rounding_mode);
    814     fctidu(double_dst, double_input);
    815     ResetRoundingMode();
    816   }
    817 
    818   MovDoubleToInt64(dst, double_dst);
    819 }
    820 #endif
    821 
    822 #if !V8_TARGET_ARCH_PPC64
    823 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
    824                                    Register src_low, Register src_high,
    825                                    Register scratch, Register shift) {
    826   DCHECK(!AreAliased(dst_low, src_high, shift));
    827   DCHECK(!AreAliased(dst_high, src_low, shift));
    828   Label less_than_32;
    829   Label done;
    830   cmpi(shift, Operand(32));
    831   blt(&less_than_32);
    832   // If shift >= 32
    833   andi(scratch, shift, Operand(0x1f));
    834   slw(dst_high, src_low, scratch);
    835   li(dst_low, Operand::Zero());
    836   b(&done);
    837   bind(&less_than_32);
    838   // If shift < 32
    839   subfic(scratch, shift, Operand(32));
    840   slw(dst_high, src_high, shift);
    841   srw(scratch, src_low, scratch);
    842   orx(dst_high, dst_high, scratch);
    843   slw(dst_low, src_low, shift);
    844   bind(&done);
    845 }
    846 
    847 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
    848                                    Register src_low, Register src_high,
    849                                    uint32_t shift) {
    850   DCHECK(!AreAliased(dst_low, src_high));
    851   DCHECK(!AreAliased(dst_high, src_low));
    852   if (shift == 32) {
    853     Move(dst_high, src_low);
    854     li(dst_low, Operand::Zero());
    855   } else if (shift > 32) {
    856     shift &= 0x1f;
    857     slwi(dst_high, src_low, Operand(shift));
    858     li(dst_low, Operand::Zero());
    859   } else if (shift == 0) {
    860     Move(dst_low, src_low);
    861     Move(dst_high, src_high);
    862   } else {
    863     slwi(dst_high, src_high, Operand(shift));
    864     rlwimi(dst_high, src_low, shift, 32 - shift, 31);
    865     slwi(dst_low, src_low, Operand(shift));
    866   }
    867 }
    868 
    869 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
    870                                     Register src_low, Register src_high,
    871                                     Register scratch, Register shift) {
    872   DCHECK(!AreAliased(dst_low, src_high, shift));
    873   DCHECK(!AreAliased(dst_high, src_low, shift));
    874   Label less_than_32;
    875   Label done;
    876   cmpi(shift, Operand(32));
    877   blt(&less_than_32);
    878   // If shift >= 32
    879   andi(scratch, shift, Operand(0x1f));
    880   srw(dst_low, src_high, scratch);
    881   li(dst_high, Operand::Zero());
    882   b(&done);
    883   bind(&less_than_32);
    884   // If shift < 32
    885   subfic(scratch, shift, Operand(32));
    886   srw(dst_low, src_low, shift);
    887   slw(scratch, src_high, scratch);
    888   orx(dst_low, dst_low, scratch);
    889   srw(dst_high, src_high, shift);
    890   bind(&done);
    891 }
    892 
    893 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
    894                                     Register src_low, Register src_high,
    895                                     uint32_t shift) {
    896   DCHECK(!AreAliased(dst_low, src_high));
    897   DCHECK(!AreAliased(dst_high, src_low));
    898   if (shift == 32) {
    899     Move(dst_low, src_high);
    900     li(dst_high, Operand::Zero());
    901   } else if (shift > 32) {
    902     shift &= 0x1f;
    903     srwi(dst_low, src_high, Operand(shift));
    904     li(dst_high, Operand::Zero());
    905   } else if (shift == 0) {
    906     Move(dst_low, src_low);
    907     Move(dst_high, src_high);
    908   } else {
    909     srwi(dst_low, src_low, Operand(shift));
    910     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
    911     srwi(dst_high, src_high, Operand(shift));
    912   }
    913 }
    914 
    915 void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
    916                                        Register src_low, Register src_high,
    917                                        Register scratch, Register shift) {
    918   DCHECK(!AreAliased(dst_low, src_high, shift));
    919   DCHECK(!AreAliased(dst_high, src_low, shift));
    920   Label less_than_32;
    921   Label done;
    922   cmpi(shift, Operand(32));
    923   blt(&less_than_32);
    924   // If shift >= 32
    925   andi(scratch, shift, Operand(0x1f));
    926   sraw(dst_low, src_high, scratch);
    927   srawi(dst_high, src_high, 31);
    928   b(&done);
    929   bind(&less_than_32);
    930   // If shift < 32
    931   subfic(scratch, shift, Operand(32));
    932   srw(dst_low, src_low, shift);
    933   slw(scratch, src_high, scratch);
    934   orx(dst_low, dst_low, scratch);
    935   sraw(dst_high, src_high, shift);
    936   bind(&done);
    937 }
    938 
    939 void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
    940                                        Register src_low, Register src_high,
    941                                        uint32_t shift) {
    942   DCHECK(!AreAliased(dst_low, src_high));
    943   DCHECK(!AreAliased(dst_high, src_low));
    944   if (shift == 32) {
    945     Move(dst_low, src_high);
    946     srawi(dst_high, src_high, 31);
    947   } else if (shift > 32) {
    948     shift &= 0x1f;
    949     srawi(dst_low, src_high, shift);
    950     srawi(dst_high, src_high, 31);
    951   } else if (shift == 0) {
    952     Move(dst_low, src_low);
    953     Move(dst_high, src_high);
    954   } else {
    955     srwi(dst_low, src_low, Operand(shift));
    956     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
    957     srawi(dst_high, src_high, shift);
    958   }
    959 }
    960 #endif
    961 
    962 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
    963     Register code_target_address) {
    964   lwz(kConstantPoolRegister,
    965       MemOperand(code_target_address,
    966                  Code::kConstantPoolOffset - Code::kHeaderSize));
    967   add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
    968 }
    969 
    970 
    971 void MacroAssembler::LoadConstantPoolPointerRegister(Register base,
    972                                                      int code_start_delta) {
    973   add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
    974                    code_start_delta);
    975 }
    976 
    977 
    978 void MacroAssembler::LoadConstantPoolPointerRegister() {
    979   mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
    980 }
    981 
    982 void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
    983                                   int prologue_offset) {
    984   {
    985     ConstantPoolUnavailableScope constant_pool_unavailable(this);
    986     LoadSmiLiteral(r11, Smi::FromInt(type));
    987     PushCommonFrame(r11);
    988   }
    989   if (FLAG_enable_embedded_constant_pool) {
    990     if (!base.is(no_reg)) {
    991       // base contains prologue address
    992       LoadConstantPoolPointerRegister(base, -prologue_offset);
    993     } else {
    994       LoadConstantPoolPointerRegister();
    995     }
    996     set_constant_pool_available(true);
    997   }
    998 }
    999 
   1000 
   1001 void MacroAssembler::Prologue(bool code_pre_aging, Register base,
   1002                               int prologue_offset) {
   1003   DCHECK(!base.is(no_reg));
   1004   {
   1005     PredictableCodeSizeScope predictible_code_size_scope(
   1006         this, kNoCodeAgeSequenceLength);
   1007     Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
   1008     // The following instructions must remain together and unmodified
   1009     // for code aging to work properly.
   1010     if (code_pre_aging) {
   1011       // Pre-age the code.
   1012       // This matches the code found in PatchPlatformCodeAge()
   1013       Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
   1014       intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
   1015       // Don't use Call -- we need to preserve ip and lr
   1016       nop();  // marker to detect sequence (see IsOld)
   1017       mov(r3, Operand(target));
   1018       Jump(r3);
   1019       for (int i = 0; i < kCodeAgingSequenceNops; i++) {
   1020         nop();
   1021       }
   1022     } else {
   1023       // This matches the code found in GetNoCodeAgeSequence()
   1024       PushStandardFrame(r4);
   1025       for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
   1026         nop();
   1027       }
   1028     }
   1029   }
   1030   if (FLAG_enable_embedded_constant_pool) {
   1031     // base contains prologue address
   1032     LoadConstantPoolPointerRegister(base, -prologue_offset);
   1033     set_constant_pool_available(true);
   1034   }
   1035 }
   1036 
   1037 
   1038 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   1039   LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   1040   LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
   1041   LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
   1042 }
   1043 
   1044 
   1045 void MacroAssembler::EnterFrame(StackFrame::Type type,
   1046                                 bool load_constant_pool_pointer_reg) {
   1047   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
   1048     // Push type explicitly so we can leverage the constant pool.
   1049     // This path cannot rely on ip containing code entry.
   1050     PushCommonFrame();
   1051     LoadConstantPoolPointerRegister();
   1052     LoadSmiLiteral(ip, Smi::FromInt(type));
   1053     push(ip);
   1054   } else {
   1055     LoadSmiLiteral(ip, Smi::FromInt(type));
   1056     PushCommonFrame(ip);
   1057   }
   1058   if (type == StackFrame::INTERNAL) {
   1059     mov(r0, Operand(CodeObject()));
   1060     push(r0);
   1061   }
   1062 }
   1063 
   1064 
   1065 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
   1066   ConstantPoolUnavailableScope constant_pool_unavailable(this);
   1067   // r3: preserved
   1068   // r4: preserved
   1069   // r5: preserved
   1070 
   1071   // Drop the execution stack down to the frame pointer and restore
   1072   // the caller's state.
   1073   int frame_ends;
   1074   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
   1075   LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1076   if (FLAG_enable_embedded_constant_pool) {
   1077     LoadP(kConstantPoolRegister,
   1078           MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
   1079   }
   1080   mtlr(r0);
   1081   frame_ends = pc_offset();
   1082   Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
   1083   mr(fp, ip);
   1084   return frame_ends;
   1085 }
   1086 
   1087 
   1088 // ExitFrame layout (probably wrongish.. needs updating)
   1089 //
   1090 //  SP -> previousSP
   1091 //        LK reserved
   1092 //        code
   1093 //        sp_on_exit (for debug?)
   1094 // oldSP->prev SP
   1095 //        LK
   1096 //        <parameters on stack>
   1097 
   1098 // Prior to calling EnterExitFrame, we've got a bunch of parameters
   1099 // on the stack that we need to wrap a real frame around.. so first
   1100 // we reserve a slot for LK and push the previous SP which is captured
   1101 // in the fp register (r31)
   1102 // Then - we buy a new frame
   1103 
   1104 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
   1105   // Set up the frame structure on the stack.
   1106   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
   1107   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
   1108   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
   1109   DCHECK(stack_space > 0);
   1110 
   1111   // This is an opportunity to build a frame to wrap
   1112   // all of the pushes that have happened inside of V8
   1113   // since we were called from C code
   1114 
   1115   LoadSmiLiteral(ip, Smi::FromInt(StackFrame::EXIT));
   1116   PushCommonFrame(ip);
   1117   // Reserve room for saved entry sp and code object.
   1118   subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
   1119 
   1120   if (emit_debug_code()) {
   1121     li(r8, Operand::Zero());
   1122     StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
   1123   }
   1124   if (FLAG_enable_embedded_constant_pool) {
   1125     StoreP(kConstantPoolRegister,
   1126            MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
   1127   }
   1128   mov(r8, Operand(CodeObject()));
   1129   StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
   1130 
   1131   // Save the frame pointer and the context in top.
   1132   mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1133   StoreP(fp, MemOperand(r8));
   1134   mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
   1135   StoreP(cp, MemOperand(r8));
   1136 
   1137   // Optionally save all volatile double registers.
   1138   if (save_doubles) {
   1139     MultiPushDoubles(kCallerSavedDoubles);
   1140     // Note that d0 will be accessible at
   1141     //   fp - ExitFrameConstants::kFrameSize -
   1142     //   kNumCallerSavedDoubles * kDoubleSize,
   1143     // since the sp slot and code slot were pushed after the fp.
   1144   }
   1145 
   1146   addi(sp, sp, Operand(-stack_space * kPointerSize));
   1147 
   1148   // Allocate and align the frame preparing for calling the runtime
   1149   // function.
   1150   const int frame_alignment = ActivationFrameAlignment();
   1151   if (frame_alignment > kPointerSize) {
   1152     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   1153     ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
   1154   }
   1155   li(r0, Operand::Zero());
   1156   StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
   1157 
   1158   // Set the exit frame sp value to point just before the return address
   1159   // location.
   1160   addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
   1161   StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
   1162 }
   1163 
   1164 
   1165 void MacroAssembler::InitializeNewString(Register string, Register length,
   1166                                          Heap::RootListIndex map_index,
   1167                                          Register scratch1, Register scratch2) {
   1168   SmiTag(scratch1, length);
   1169   LoadRoot(scratch2, map_index);
   1170   StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
   1171   li(scratch1, Operand(String::kEmptyHashField));
   1172   StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
   1173   StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
   1174 }
   1175 
   1176 
   1177 int MacroAssembler::ActivationFrameAlignment() {
   1178 #if !defined(USE_SIMULATOR)
   1179   // Running on the real platform. Use the alignment as mandated by the local
   1180   // environment.
   1181   // Note: This will break if we ever start generating snapshots on one PPC
   1182   // platform for another PPC platform with a different alignment.
   1183   return base::OS::ActivationFrameAlignment();
   1184 #else  // Simulated
   1185   // If we are using the simulator then we should always align to the expected
   1186   // alignment. As the simulator is used to generate snapshots we do not know
   1187   // if the target platform will need alignment, so this is controlled from a
   1188   // flag.
   1189   return FLAG_sim_stack_alignment;
   1190 #endif
   1191 }
   1192 
   1193 
   1194 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
   1195                                     bool restore_context,
   1196                                     bool argument_count_is_length) {
   1197   ConstantPoolUnavailableScope constant_pool_unavailable(this);
   1198   // Optionally restore all double registers.
   1199   if (save_doubles) {
   1200     // Calculate the stack location of the saved doubles and restore them.
   1201     const int kNumRegs = kNumCallerSavedDoubles;
   1202     const int offset =
   1203         (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
   1204     addi(r6, fp, Operand(-offset));
   1205     MultiPopDoubles(kCallerSavedDoubles, r6);
   1206   }
   1207 
   1208   // Clear top frame.
   1209   li(r6, Operand::Zero());
   1210   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1211   StoreP(r6, MemOperand(ip));
   1212 
   1213   // Restore current context from top and clear it in debug mode.
   1214   if (restore_context) {
   1215     mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
   1216     LoadP(cp, MemOperand(ip));
   1217   }
   1218 #ifdef DEBUG
   1219   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
   1220   StoreP(r6, MemOperand(ip));
   1221 #endif
   1222 
   1223   // Tear down the exit frame, pop the arguments, and return.
   1224   LeaveFrame(StackFrame::EXIT);
   1225 
   1226   if (argument_count.is_valid()) {
   1227     if (!argument_count_is_length) {
   1228       ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
   1229     }
   1230     add(sp, sp, argument_count);
   1231   }
   1232 }
   1233 
   1234 
   1235 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
   1236   Move(dst, d1);
   1237 }
   1238 
   1239 
   1240 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
   1241   Move(dst, d1);
   1242 }
   1243 
   1244 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
   1245                                         Register caller_args_count_reg,
   1246                                         Register scratch0, Register scratch1) {
   1247 #if DEBUG
   1248   if (callee_args_count.is_reg()) {
   1249     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
   1250                        scratch1));
   1251   } else {
   1252     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
   1253   }
   1254 #endif
   1255 
   1256   // Calculate the end of destination area where we will put the arguments
   1257   // after we drop current frame. We add kPointerSize to count the receiver
   1258   // argument which is not included into formal parameters count.
   1259   Register dst_reg = scratch0;
   1260   ShiftLeftImm(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
   1261   add(dst_reg, fp, dst_reg);
   1262   addi(dst_reg, dst_reg,
   1263        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
   1264 
   1265   Register src_reg = caller_args_count_reg;
   1266   // Calculate the end of source area. +kPointerSize is for the receiver.
   1267   if (callee_args_count.is_reg()) {
   1268     ShiftLeftImm(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
   1269     add(src_reg, sp, src_reg);
   1270     addi(src_reg, src_reg, Operand(kPointerSize));
   1271   } else {
   1272     Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize, r0);
   1273   }
   1274 
   1275   if (FLAG_debug_code) {
   1276     cmpl(src_reg, dst_reg);
   1277     Check(lt, kStackAccessBelowStackPointer);
   1278   }
   1279 
   1280   // Restore caller's frame pointer and return address now as they will be
   1281   // overwritten by the copying loop.
   1282   RestoreFrameStateForTailCall();
   1283 
   1284   // Now copy callee arguments to the caller frame going backwards to avoid
   1285   // callee arguments corruption (source and destination areas could overlap).
   1286 
   1287   // Both src_reg and dst_reg are pointing to the word after the one to copy,
   1288   // so they must be pre-decremented in the loop.
   1289   Register tmp_reg = scratch1;
   1290   Label loop;
   1291   if (callee_args_count.is_reg()) {
   1292     addi(tmp_reg, callee_args_count.reg(), Operand(1));  // +1 for receiver
   1293   } else {
   1294     mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
   1295   }
   1296   mtctr(tmp_reg);
   1297   bind(&loop);
   1298   LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
   1299   StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
   1300   bdnz(&loop);
   1301 
   1302   // Leave current frame.
   1303   mr(sp, dst_reg);
   1304 }
   1305 
   1306 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
   1307                                     const ParameterCount& actual, Label* done,
   1308                                     bool* definitely_mismatches,
   1309                                     InvokeFlag flag,
   1310                                     const CallWrapper& call_wrapper) {
   1311   bool definitely_matches = false;
   1312   *definitely_mismatches = false;
   1313   Label regular_invoke;
   1314 
   1315   // Check whether the expected and actual arguments count match. If not,
   1316   // setup registers according to contract with ArgumentsAdaptorTrampoline:
   1317   //  r3: actual arguments count
   1318   //  r4: function (passed through to callee)
   1319   //  r5: expected arguments count
   1320 
   1321   // The code below is made a lot easier because the calling code already sets
   1322   // up actual and expected registers according to the contract if values are
   1323   // passed in registers.
   1324 
   1325   // ARM has some sanity checks as per below, considering add them for PPC
   1326   //  DCHECK(actual.is_immediate() || actual.reg().is(r3));
   1327   //  DCHECK(expected.is_immediate() || expected.reg().is(r5));
   1328 
   1329   if (expected.is_immediate()) {
   1330     DCHECK(actual.is_immediate());
   1331     mov(r3, Operand(actual.immediate()));
   1332     if (expected.immediate() == actual.immediate()) {
   1333       definitely_matches = true;
   1334     } else {
   1335       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   1336       if (expected.immediate() == sentinel) {
   1337         // Don't worry about adapting arguments for builtins that
   1338         // don't want that done. Skip adaption code by making it look
   1339         // like we have a match between expected and actual number of
   1340         // arguments.
   1341         definitely_matches = true;
   1342       } else {
   1343         *definitely_mismatches = true;
   1344         mov(r5, Operand(expected.immediate()));
   1345       }
   1346     }
   1347   } else {
   1348     if (actual.is_immediate()) {
   1349       mov(r3, Operand(actual.immediate()));
   1350       cmpi(expected.reg(), Operand(actual.immediate()));
   1351       beq(&regular_invoke);
   1352     } else {
   1353       cmp(expected.reg(), actual.reg());
   1354       beq(&regular_invoke);
   1355     }
   1356   }
   1357 
   1358   if (!definitely_matches) {
   1359     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
   1360     if (flag == CALL_FUNCTION) {
   1361       call_wrapper.BeforeCall(CallSize(adaptor));
   1362       Call(adaptor);
   1363       call_wrapper.AfterCall();
   1364       if (!*definitely_mismatches) {
   1365         b(done);
   1366       }
   1367     } else {
   1368       Jump(adaptor, RelocInfo::CODE_TARGET);
   1369     }
   1370     bind(&regular_invoke);
   1371   }
   1372 }
   1373 
   1374 
   1375 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
   1376                                              const ParameterCount& expected,
   1377                                              const ParameterCount& actual) {
   1378   Label skip_flooding;
   1379   ExternalReference last_step_action =
   1380       ExternalReference::debug_last_step_action_address(isolate());
   1381   STATIC_ASSERT(StepFrame > StepIn);
   1382   mov(r7, Operand(last_step_action));
   1383   LoadByte(r7, MemOperand(r7), r0);
   1384   extsb(r7, r7);
   1385   cmpi(r7, Operand(StepIn));
   1386   blt(&skip_flooding);
   1387   {
   1388     FrameScope frame(this,
   1389                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
   1390     if (expected.is_reg()) {
   1391       SmiTag(expected.reg());
   1392       Push(expected.reg());
   1393     }
   1394     if (actual.is_reg()) {
   1395       SmiTag(actual.reg());
   1396       Push(actual.reg());
   1397     }
   1398     if (new_target.is_valid()) {
   1399       Push(new_target);
   1400     }
   1401     Push(fun, fun);
   1402     CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
   1403     Pop(fun);
   1404     if (new_target.is_valid()) {
   1405       Pop(new_target);
   1406     }
   1407     if (actual.is_reg()) {
   1408       Pop(actual.reg());
   1409       SmiUntag(actual.reg());
   1410     }
   1411     if (expected.is_reg()) {
   1412       Pop(expected.reg());
   1413       SmiUntag(expected.reg());
   1414     }
   1415   }
   1416   bind(&skip_flooding);
   1417 }
   1418 
   1419 
   1420 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
   1421                                         const ParameterCount& expected,
   1422                                         const ParameterCount& actual,
   1423                                         InvokeFlag flag,
   1424                                         const CallWrapper& call_wrapper) {
   1425   // You can't call a function without a valid frame.
   1426   DCHECK(flag == JUMP_FUNCTION || has_frame());
   1427   DCHECK(function.is(r4));
   1428   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
   1429 
   1430   if (call_wrapper.NeedsDebugStepCheck()) {
   1431     FloodFunctionIfStepping(function, new_target, expected, actual);
   1432   }
   1433 
   1434   // Clear the new.target register if not given.
   1435   if (!new_target.is_valid()) {
   1436     LoadRoot(r6, Heap::kUndefinedValueRootIndex);
   1437   }
   1438 
   1439   Label done;
   1440   bool definitely_mismatches = false;
   1441   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
   1442                  call_wrapper);
   1443   if (!definitely_mismatches) {
   1444     // We call indirectly through the code field in the function to
   1445     // allow recompilation to take effect without changing any of the
   1446     // call sites.
   1447     Register code = ip;
   1448     LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   1449     if (flag == CALL_FUNCTION) {
   1450       call_wrapper.BeforeCall(CallSize(code));
   1451       CallJSEntry(code);
   1452       call_wrapper.AfterCall();
   1453     } else {
   1454       DCHECK(flag == JUMP_FUNCTION);
   1455       JumpToJSEntry(code);
   1456     }
   1457 
   1458     // Continue here if InvokePrologue does handle the invocation due to
   1459     // mismatched parameter counts.
   1460     bind(&done);
   1461   }
   1462 }
   1463 
   1464 
   1465 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
   1466                                     const ParameterCount& actual,
   1467                                     InvokeFlag flag,
   1468                                     const CallWrapper& call_wrapper) {
   1469   // You can't call a function without a valid frame.
   1470   DCHECK(flag == JUMP_FUNCTION || has_frame());
   1471 
   1472   // Contract with called JS functions requires that function is passed in r4.
   1473   DCHECK(fun.is(r4));
   1474 
   1475   Register expected_reg = r5;
   1476   Register temp_reg = r7;
   1477 
   1478   LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
   1479   LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
   1480   LoadWordArith(expected_reg,
   1481                 FieldMemOperand(
   1482                     temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
   1483 #if !defined(V8_TARGET_ARCH_PPC64)
   1484   SmiUntag(expected_reg);
   1485 #endif
   1486 
   1487   ParameterCount expected(expected_reg);
   1488   InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
   1489 }
   1490 
   1491 
   1492 void MacroAssembler::InvokeFunction(Register function,
   1493                                     const ParameterCount& expected,
   1494                                     const ParameterCount& actual,
   1495                                     InvokeFlag flag,
   1496                                     const CallWrapper& call_wrapper) {
   1497   // You can't call a function without a valid frame.
   1498   DCHECK(flag == JUMP_FUNCTION || has_frame());
   1499 
   1500   // Contract with called JS functions requires that function is passed in r4.
   1501   DCHECK(function.is(r4));
   1502 
   1503   // Get the function and setup the context.
   1504   LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
   1505 
   1506   InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper);
   1507 }
   1508 
   1509 
   1510 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
   1511                                     const ParameterCount& expected,
   1512                                     const ParameterCount& actual,
   1513                                     InvokeFlag flag,
   1514                                     const CallWrapper& call_wrapper) {
   1515   Move(r4, function);
   1516   InvokeFunction(r4, expected, actual, flag, call_wrapper);
   1517 }
   1518 
   1519 
   1520 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
   1521                                           Label* fail) {
   1522   DCHECK(kNotStringTag != 0);
   1523 
   1524   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   1525   lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1526   andi(r0, scratch, Operand(kIsNotStringMask));
   1527   bne(fail, cr0);
   1528 }
   1529 
   1530 
   1531 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
   1532                                       Label* fail) {
   1533   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   1534   lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1535   cmpi(scratch, Operand(LAST_NAME_TYPE));
   1536   bgt(fail);
   1537 }
   1538 
   1539 
   1540 void MacroAssembler::DebugBreak() {
   1541   li(r3, Operand::Zero());
   1542   mov(r4,
   1543       Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
   1544   CEntryStub ces(isolate(), 1);
   1545   DCHECK(AllowThisStubCall(&ces));
   1546   Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
   1547 }
   1548 
   1549 
   1550 void MacroAssembler::PushStackHandler() {
   1551   // Adjust this code if not the case.
   1552   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
   1553   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
   1554 
   1555   // Link the current handler as the next handler.
   1556   // Preserve r3-r7.
   1557   mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   1558   LoadP(r0, MemOperand(r8));
   1559   push(r0);
   1560 
   1561   // Set this new handler as the current one.
   1562   StoreP(sp, MemOperand(r8));
   1563 }
   1564 
   1565 
   1566 void MacroAssembler::PopStackHandler() {
   1567   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
   1568   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   1569 
   1570   pop(r4);
   1571   mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   1572   StoreP(r4, MemOperand(ip));
   1573 }
   1574 
   1575 
   1576 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
   1577                                             Register scratch, Label* miss) {
   1578   Label same_contexts;
   1579 
   1580   DCHECK(!holder_reg.is(scratch));
   1581   DCHECK(!holder_reg.is(ip));
   1582   DCHECK(!scratch.is(ip));
   1583 
   1584   // Load current lexical context from the active StandardFrame, which
   1585   // may require crawling past STUB frames.
   1586   Label load_context;
   1587   Label has_context;
   1588   DCHECK(!ip.is(scratch));
   1589   mr(ip, fp);
   1590   bind(&load_context);
   1591   LoadP(scratch,
   1592         MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
   1593   JumpIfNotSmi(scratch, &has_context);
   1594   LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
   1595   b(&load_context);
   1596   bind(&has_context);
   1597 
   1598 // In debug mode, make sure the lexical context is set.
   1599 #ifdef DEBUG
   1600   cmpi(scratch, Operand::Zero());
   1601   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
   1602 #endif
   1603 
   1604   // Load the native context of the current context.
   1605   LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
   1606 
   1607   // Check the context is a native context.
   1608   if (emit_debug_code()) {
   1609     // Cannot use ip as a temporary in this verification code. Due to the fact
   1610     // that ip is clobbered as part of cmp with an object Operand.
   1611     push(holder_reg);  // Temporarily save holder on the stack.
   1612     // Read the first word and compare to the native_context_map.
   1613     LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
   1614     LoadRoot(ip, Heap::kNativeContextMapRootIndex);
   1615     cmp(holder_reg, ip);
   1616     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
   1617     pop(holder_reg);  // Restore holder.
   1618   }
   1619 
   1620   // Check if both contexts are the same.
   1621   LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   1622   cmp(scratch, ip);
   1623   beq(&same_contexts);
   1624 
   1625   // Check the context is a native context.
   1626   if (emit_debug_code()) {
   1627     // Cannot use ip as a temporary in this verification code. Due to the fact
   1628     // that ip is clobbered as part of cmp with an object Operand.
   1629     push(holder_reg);    // Temporarily save holder on the stack.
   1630     mr(holder_reg, ip);  // Move ip to its holding place.
   1631     LoadRoot(ip, Heap::kNullValueRootIndex);
   1632     cmp(holder_reg, ip);
   1633     Check(ne, kJSGlobalProxyContextShouldNotBeNull);
   1634 
   1635     LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
   1636     LoadRoot(ip, Heap::kNativeContextMapRootIndex);
   1637     cmp(holder_reg, ip);
   1638     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
   1639     // Restore ip is not needed. ip is reloaded below.
   1640     pop(holder_reg);  // Restore holder.
   1641     // Restore ip to holder's context.
   1642     LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   1643   }
   1644 
   1645   // Check that the security token in the calling global object is
   1646   // compatible with the security token in the receiving global
   1647   // object.
   1648   int token_offset =
   1649       Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
   1650 
   1651   LoadP(scratch, FieldMemOperand(scratch, token_offset));
   1652   LoadP(ip, FieldMemOperand(ip, token_offset));
   1653   cmp(scratch, ip);
   1654   bne(miss);
   1655 
   1656   bind(&same_contexts);
   1657 }
   1658 
   1659 
   1660 // Compute the hash code from the untagged key.  This must be kept in sync with
   1661 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
   1662 // code-stub-hydrogen.cc
   1663 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
   1664   // First of all we assign the hash seed to scratch.
   1665   LoadRoot(scratch, Heap::kHashSeedRootIndex);
   1666   SmiUntag(scratch);
   1667 
   1668   // Xor original key with a seed.
   1669   xor_(t0, t0, scratch);
   1670 
   1671   // Compute the hash code from the untagged key.  This must be kept in sync
   1672   // with ComputeIntegerHash in utils.h.
   1673   //
   1674   // hash = ~hash + (hash << 15);
   1675   notx(scratch, t0);
   1676   slwi(t0, t0, Operand(15));
   1677   add(t0, scratch, t0);
   1678   // hash = hash ^ (hash >> 12);
   1679   srwi(scratch, t0, Operand(12));
   1680   xor_(t0, t0, scratch);
   1681   // hash = hash + (hash << 2);
   1682   slwi(scratch, t0, Operand(2));
   1683   add(t0, t0, scratch);
   1684   // hash = hash ^ (hash >> 4);
   1685   srwi(scratch, t0, Operand(4));
   1686   xor_(t0, t0, scratch);
   1687   // hash = hash * 2057;
   1688   mr(r0, t0);
   1689   slwi(scratch, t0, Operand(3));
   1690   add(t0, t0, scratch);
   1691   slwi(scratch, r0, Operand(11));
   1692   add(t0, t0, scratch);
   1693   // hash = hash ^ (hash >> 16);
   1694   srwi(scratch, t0, Operand(16));
   1695   xor_(t0, t0, scratch);
   1696   // hash & 0x3fffffff
   1697   ExtractBitRange(t0, t0, 29, 0);
   1698 }
   1699 
   1700 
   1701 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
   1702                                               Register key, Register result,
   1703                                               Register t0, Register t1,
   1704                                               Register t2) {
   1705   // Register use:
   1706   //
   1707   // elements - holds the slow-case elements of the receiver on entry.
   1708   //            Unchanged unless 'result' is the same register.
   1709   //
   1710   // key      - holds the smi key on entry.
   1711   //            Unchanged unless 'result' is the same register.
   1712   //
   1713   // result   - holds the result on exit if the load succeeded.
   1714   //            Allowed to be the same as 'key' or 'result'.
   1715   //            Unchanged on bailout so 'key' or 'result' can be used
   1716   //            in further computation.
   1717   //
   1718   // Scratch registers:
   1719   //
   1720   // t0 - holds the untagged key on entry and holds the hash once computed.
   1721   //
   1722   // t1 - used to hold the capacity mask of the dictionary
   1723   //
   1724   // t2 - used for the index into the dictionary.
   1725   Label done;
   1726 
   1727   GetNumberHash(t0, t1);
   1728 
   1729   // Compute the capacity mask.
   1730   LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
   1731   SmiUntag(t1);
   1732   subi(t1, t1, Operand(1));
   1733 
   1734   // Generate an unrolled loop that performs a few probes before giving up.
   1735   for (int i = 0; i < kNumberDictionaryProbes; i++) {
   1736     // Use t2 for index calculations and keep the hash intact in t0.
   1737     mr(t2, t0);
   1738     // Compute the masked index: (hash + i + i * i) & mask.
   1739     if (i > 0) {
   1740       addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
   1741     }
   1742     and_(t2, t2, t1);
   1743 
   1744     // Scale the index by multiplying by the element size.
   1745     DCHECK(SeededNumberDictionary::kEntrySize == 3);
   1746     slwi(ip, t2, Operand(1));
   1747     add(t2, t2, ip);  // t2 = t2 * 3
   1748 
   1749     // Check if the key is identical to the name.
   1750     slwi(t2, t2, Operand(kPointerSizeLog2));
   1751     add(t2, elements, t2);
   1752     LoadP(ip,
   1753           FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
   1754     cmp(key, ip);
   1755     if (i != kNumberDictionaryProbes - 1) {
   1756       beq(&done);
   1757     } else {
   1758       bne(miss);
   1759     }
   1760   }
   1761 
   1762   bind(&done);
   1763   // Check that the value is a field property.
   1764   // t2: elements + (index * kPointerSize)
   1765   const int kDetailsOffset =
   1766       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   1767   LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
   1768   LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
   1769   DCHECK_EQ(DATA, 0);
   1770   and_(r0, t1, ip, SetRC);
   1771   bne(miss, cr0);
   1772 
   1773   // Get the value at the masked, scaled index and return.
   1774   const int kValueOffset =
   1775       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
   1776   LoadP(result, FieldMemOperand(t2, kValueOffset));
   1777 }
   1778 
   1779 
   1780 void MacroAssembler::Allocate(int object_size, Register result,
   1781                               Register scratch1, Register scratch2,
   1782                               Label* gc_required, AllocationFlags flags) {
   1783   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   1784   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   1785   if (!FLAG_inline_new) {
   1786     if (emit_debug_code()) {
   1787       // Trash the registers to simulate an allocation failure.
   1788       li(result, Operand(0x7091));
   1789       li(scratch1, Operand(0x7191));
   1790       li(scratch2, Operand(0x7291));
   1791     }
   1792     b(gc_required);
   1793     return;
   1794   }
   1795 
   1796   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
   1797 
   1798   // Make object size into bytes.
   1799   if ((flags & SIZE_IN_WORDS) != 0) {
   1800     object_size *= kPointerSize;
   1801   }
   1802   DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
   1803 
   1804   // Check relative positions of allocation top and limit addresses.
   1805   ExternalReference allocation_top =
   1806       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   1807   ExternalReference allocation_limit =
   1808       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   1809 
   1810   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
   1811   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
   1812   DCHECK((limit - top) == kPointerSize);
   1813 
   1814   // Set up allocation top address register.
   1815   Register top_address = scratch1;
   1816   // This code stores a temporary value in ip. This is OK, as the code below
   1817   // does not need ip for implicit literal generation.
   1818   Register alloc_limit = ip;
   1819   Register result_end = scratch2;
   1820   mov(top_address, Operand(allocation_top));
   1821 
   1822   if ((flags & RESULT_CONTAINS_TOP) == 0) {
   1823     // Load allocation top into result and allocation limit into ip.
   1824     LoadP(result, MemOperand(top_address));
   1825     LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
   1826   } else {
   1827     if (emit_debug_code()) {
   1828       // Assert that result actually contains top on entry.
   1829       LoadP(alloc_limit, MemOperand(top_address));
   1830       cmp(result, alloc_limit);
   1831       Check(eq, kUnexpectedAllocationTop);
   1832     }
   1833     // Load allocation limit. Result already contains allocation top.
   1834     LoadP(alloc_limit, MemOperand(top_address, limit - top));
   1835   }
   1836 
   1837   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   1838     // Align the next allocation. Storing the filler map without checking top is
   1839     // safe in new-space because the limit of the heap is aligned there.
   1840 #if V8_TARGET_ARCH_PPC64
   1841     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
   1842 #else
   1843     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
   1844     andi(result_end, result, Operand(kDoubleAlignmentMask));
   1845     Label aligned;
   1846     beq(&aligned, cr0);
   1847     if ((flags & PRETENURE) != 0) {
   1848       cmpl(result, alloc_limit);
   1849       bge(gc_required);
   1850     }
   1851     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
   1852     stw(result_end, MemOperand(result));
   1853     addi(result, result, Operand(kDoubleSize / 2));
   1854     bind(&aligned);
   1855 #endif
   1856   }
   1857 
   1858   // Calculate new top and bail out if new space is exhausted. Use result
   1859   // to calculate the new top.
   1860   sub(r0, alloc_limit, result);
   1861   if (is_int16(object_size)) {
   1862     cmpi(r0, Operand(object_size));
   1863     blt(gc_required);
   1864     addi(result_end, result, Operand(object_size));
   1865   } else {
   1866     Cmpi(r0, Operand(object_size), result_end);
   1867     blt(gc_required);
   1868     add(result_end, result, result_end);
   1869   }
   1870 
   1871   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
   1872     // The top pointer is not updated for allocation folding dominators.
   1873     StoreP(result_end, MemOperand(top_address));
   1874   }
   1875 
   1876   // Tag object.
   1877   addi(result, result, Operand(kHeapObjectTag));
   1878 }
   1879 
   1880 
   1881 void MacroAssembler::Allocate(Register object_size, Register result,
   1882                               Register result_end, Register scratch,
   1883                               Label* gc_required, AllocationFlags flags) {
   1884   DCHECK((flags & ALLOCATION_FOLDED) == 0);
   1885   if (!FLAG_inline_new) {
   1886     if (emit_debug_code()) {
   1887       // Trash the registers to simulate an allocation failure.
   1888       li(result, Operand(0x7091));
   1889       li(scratch, Operand(0x7191));
   1890       li(result_end, Operand(0x7291));
   1891     }
   1892     b(gc_required);
   1893     return;
   1894   }
   1895 
   1896   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
   1897   // is not specified. Other registers must not overlap.
   1898   DCHECK(!AreAliased(object_size, result, scratch, ip));
   1899   DCHECK(!AreAliased(result_end, result, scratch, ip));
   1900   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
   1901 
   1902   // Check relative positions of allocation top and limit addresses.
   1903   ExternalReference allocation_top =
   1904       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   1905   ExternalReference allocation_limit =
   1906       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   1907   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
   1908   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
   1909   DCHECK((limit - top) == kPointerSize);
   1910 
   1911   // Set up allocation top address and allocation limit registers.
   1912   Register top_address = scratch;
   1913   // This code stores a temporary value in ip. This is OK, as the code below
   1914   // does not need ip for implicit literal generation.
   1915   Register alloc_limit = ip;
   1916   mov(top_address, Operand(allocation_top));
   1917 
   1918   if ((flags & RESULT_CONTAINS_TOP) == 0) {
   1919     // Load allocation top into result and allocation limit into alloc_limit..
   1920     LoadP(result, MemOperand(top_address));
   1921     LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
   1922   } else {
   1923     if (emit_debug_code()) {
   1924       // Assert that result actually contains top on entry.
   1925       LoadP(alloc_limit, MemOperand(top_address));
   1926       cmp(result, alloc_limit);
   1927       Check(eq, kUnexpectedAllocationTop);
   1928     }
   1929     // Load allocation limit. Result already contains allocation top.
   1930     LoadP(alloc_limit, MemOperand(top_address, limit - top));
   1931   }
   1932 
   1933   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   1934     // Align the next allocation. Storing the filler map without checking top is
   1935     // safe in new-space because the limit of the heap is aligned there.
   1936 #if V8_TARGET_ARCH_PPC64
   1937     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
   1938 #else
   1939     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
   1940     andi(result_end, result, Operand(kDoubleAlignmentMask));
   1941     Label aligned;
   1942     beq(&aligned, cr0);
   1943     if ((flags & PRETENURE) != 0) {
   1944       cmpl(result, alloc_limit);
   1945       bge(gc_required);
   1946     }
   1947     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
   1948     stw(result_end, MemOperand(result));
   1949     addi(result, result, Operand(kDoubleSize / 2));
   1950     bind(&aligned);
   1951 #endif
   1952   }
   1953 
   1954   // Calculate new top and bail out if new space is exhausted. Use result
   1955   // to calculate the new top. Object size may be in words so a shift is
   1956   // required to get the number of bytes.
   1957   sub(r0, alloc_limit, result);
   1958   if ((flags & SIZE_IN_WORDS) != 0) {
   1959     ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
   1960     cmp(r0, result_end);
   1961     blt(gc_required);
   1962     add(result_end, result, result_end);
   1963   } else {
   1964     cmp(r0, object_size);
   1965     blt(gc_required);
   1966     add(result_end, result, object_size);
   1967   }
   1968 
   1969   // Update allocation top. result temporarily holds the new top.
   1970   if (emit_debug_code()) {
   1971     andi(r0, result_end, Operand(kObjectAlignmentMask));
   1972     Check(eq, kUnalignedAllocationInNewSpace, cr0);
   1973   }
   1974   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
   1975     // The top pointer is not updated for allocation folding dominators.
   1976     StoreP(result_end, MemOperand(top_address));
   1977   }
   1978 
   1979   // Tag object.
   1980   addi(result, result, Operand(kHeapObjectTag));
   1981 }
   1982 
   1983 void MacroAssembler::FastAllocate(Register object_size, Register result,
   1984                                   Register result_end, Register scratch,
   1985                                   AllocationFlags flags) {
   1986   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
   1987   // is not specified. Other registers must not overlap.
   1988   DCHECK(!AreAliased(object_size, result, scratch, ip));
   1989   DCHECK(!AreAliased(result_end, result, scratch, ip));
   1990   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
   1991 
   1992   ExternalReference allocation_top =
   1993       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   1994 
   1995   Register top_address = scratch;
   1996   mov(top_address, Operand(allocation_top));
   1997   LoadP(result, MemOperand(top_address));
   1998 
   1999   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   2000     // Align the next allocation. Storing the filler map without checking top is
   2001     // safe in new-space because the limit of the heap is aligned there.
   2002 #if V8_TARGET_ARCH_PPC64
   2003     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
   2004 #else
   2005     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
   2006     andi(result_end, result, Operand(kDoubleAlignmentMask));
   2007     Label aligned;
   2008     beq(&aligned);
   2009     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
   2010     stw(result_end, MemOperand(result));
   2011     addi(result, result, Operand(kDoubleSize / 2));
   2012     bind(&aligned);
   2013 #endif
   2014   }
   2015 
   2016   // Calculate new top using result. Object size may be in words so a shift is
   2017   // required to get the number of bytes.
   2018   if ((flags & SIZE_IN_WORDS) != 0) {
   2019     ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
   2020     add(result_end, result, result_end);
   2021   } else {
   2022     add(result_end, result, object_size);
   2023   }
   2024 
   2025   // Update allocation top. result temporarily holds the new top.
   2026   if (emit_debug_code()) {
   2027     andi(r0, result_end, Operand(kObjectAlignmentMask));
   2028     Check(eq, kUnalignedAllocationInNewSpace, cr0);
   2029   }
   2030   StoreP(result_end, MemOperand(top_address));
   2031 
   2032   // Tag object.
   2033   addi(result, result, Operand(kHeapObjectTag));
   2034 }
   2035 
   2036 void MacroAssembler::FastAllocate(int object_size, Register result,
   2037                                   Register scratch1, Register scratch2,
   2038                                   AllocationFlags flags) {
   2039   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   2040   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
   2041 
   2042   // Make object size into bytes.
   2043   if ((flags & SIZE_IN_WORDS) != 0) {
   2044     object_size *= kPointerSize;
   2045   }
   2046   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
   2047 
   2048   ExternalReference allocation_top =
   2049       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   2050 
   2051   // Set up allocation top address register.
   2052   Register top_address = scratch1;
   2053   Register result_end = scratch2;
   2054   mov(top_address, Operand(allocation_top));
   2055   LoadP(result, MemOperand(top_address));
   2056 
   2057   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   2058     // Align the next allocation. Storing the filler map without checking top is
   2059     // safe in new-space because the limit of the heap is aligned there.
   2060 #if V8_TARGET_ARCH_PPC64
   2061     STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
   2062 #else
   2063     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
   2064     andi(result_end, result, Operand(kDoubleAlignmentMask));
   2065     Label aligned;
   2066     beq(&aligned);
   2067     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
   2068     stw(result_end, MemOperand(result));
   2069     addi(result, result, Operand(kDoubleSize / 2));
   2070     bind(&aligned);
   2071 #endif
   2072   }
   2073 
   2074   // Calculate new top using result.
   2075   Add(result_end, result, object_size, r0);
   2076 
   2077   // The top pointer is not updated for allocation folding dominators.
   2078   StoreP(result_end, MemOperand(top_address));
   2079 
   2080   // Tag object.
   2081   addi(result, result, Operand(kHeapObjectTag));
   2082 }
   2083 
   2084 
   2085 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
   2086                                            Register scratch1, Register scratch2,
   2087                                            Register scratch3,
   2088                                            Label* gc_required) {
   2089   // Calculate the number of bytes needed for the characters in the string while
   2090   // observing object alignment.
   2091   DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   2092   slwi(scratch1, length, Operand(1));  // Length in bytes, not chars.
   2093   addi(scratch1, scratch1,
   2094        Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
   2095   mov(r0, Operand(~kObjectAlignmentMask));
   2096   and_(scratch1, scratch1, r0);
   2097 
   2098   // Allocate two-byte string in new space.
   2099   Allocate(scratch1, result, scratch2, scratch3, gc_required,
   2100            NO_ALLOCATION_FLAGS);
   2101 
   2102   // Set the map, length and hash field.
   2103   InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
   2104                       scratch2);
   2105 }
   2106 
   2107 
   2108 void MacroAssembler::AllocateOneByteString(Register result, Register length,
   2109                                            Register scratch1, Register scratch2,
   2110                                            Register scratch3,
   2111                                            Label* gc_required) {
   2112   // Calculate the number of bytes needed for the characters in the string while
   2113   // observing object alignment.
   2114   DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   2115   DCHECK(kCharSize == 1);
   2116   addi(scratch1, length,
   2117        Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
   2118   li(r0, Operand(~kObjectAlignmentMask));
   2119   and_(scratch1, scratch1, r0);
   2120 
   2121   // Allocate one-byte string in new space.
   2122   Allocate(scratch1, result, scratch2, scratch3, gc_required,
   2123            NO_ALLOCATION_FLAGS);
   2124 
   2125   // Set the map, length and hash field.
   2126   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
   2127                       scratch1, scratch2);
   2128 }
   2129 
   2130 
   2131 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
   2132                                                Register scratch1,
   2133                                                Register scratch2,
   2134                                                Label* gc_required) {
   2135   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
   2136            NO_ALLOCATION_FLAGS);
   2137 
   2138   InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
   2139                       scratch2);
   2140 }
   2141 
   2142 
   2143 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
   2144                                                Register scratch1,
   2145                                                Register scratch2,
   2146                                                Label* gc_required) {
   2147   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
   2148            NO_ALLOCATION_FLAGS);
   2149 
   2150   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
   2151                       scratch1, scratch2);
   2152 }
   2153 
   2154 
   2155 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
   2156                                                  Register length,
   2157                                                  Register scratch1,
   2158                                                  Register scratch2,
   2159                                                  Label* gc_required) {
   2160   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   2161            NO_ALLOCATION_FLAGS);
   2162 
   2163   InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
   2164                       scratch2);
   2165 }
   2166 
   2167 
   2168 void MacroAssembler::AllocateOneByteSlicedString(Register result,
   2169                                                  Register length,
   2170                                                  Register scratch1,
   2171                                                  Register scratch2,
   2172                                                  Label* gc_required) {
   2173   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   2174            NO_ALLOCATION_FLAGS);
   2175 
   2176   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
   2177                       scratch1, scratch2);
   2178 }
   2179 
   2180 
   2181 void MacroAssembler::CompareObjectType(Register object, Register map,
   2182                                        Register type_reg, InstanceType type) {
   2183   const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
   2184 
   2185   LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
   2186   CompareInstanceType(map, temp, type);
   2187 }
   2188 
   2189 
   2190 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
   2191                                          InstanceType type) {
   2192   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
   2193   STATIC_ASSERT(LAST_TYPE < 256);
   2194   lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2195   cmpi(type_reg, Operand(type));
   2196 }
   2197 
   2198 
   2199 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
   2200   DCHECK(!obj.is(r0));
   2201   LoadRoot(r0, index);
   2202   cmp(obj, r0);
   2203 }
   2204 
   2205 
   2206 void MacroAssembler::CheckFastElements(Register map, Register scratch,
   2207                                        Label* fail) {
   2208   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2209   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2210   STATIC_ASSERT(FAST_ELEMENTS == 2);
   2211   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   2212   lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   2213   STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
   2214   cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
   2215   bgt(fail);
   2216 }
   2217 
   2218 
   2219 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
   2220                                              Label* fail) {
   2221   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2222   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2223   STATIC_ASSERT(FAST_ELEMENTS == 2);
   2224   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   2225   lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   2226   cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
   2227   ble(fail);
   2228   cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
   2229   bgt(fail);
   2230 }
   2231 
   2232 
   2233 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
   2234                                           Label* fail) {
   2235   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2236   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2237   lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   2238   cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
   2239   bgt(fail);
   2240 }
   2241 
   2242 
   2243 void MacroAssembler::StoreNumberToDoubleElements(
   2244     Register value_reg, Register key_reg, Register elements_reg,
   2245     Register scratch1, DoubleRegister double_scratch, Label* fail,
   2246     int elements_offset) {
   2247   DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
   2248   Label smi_value, store;
   2249 
   2250   // Handle smi values specially.
   2251   JumpIfSmi(value_reg, &smi_value);
   2252 
   2253   // Ensure that the object is a heap number
   2254   CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
   2255            DONT_DO_SMI_CHECK);
   2256 
   2257   lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
   2258   // Double value, turn potential sNaN into qNaN.
   2259   CanonicalizeNaN(double_scratch);
   2260   b(&store);
   2261 
   2262   bind(&smi_value);
   2263   SmiToDouble(double_scratch, value_reg);
   2264 
   2265   bind(&store);
   2266   SmiToDoubleArrayOffset(scratch1, key_reg);
   2267   add(scratch1, elements_reg, scratch1);
   2268   stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
   2269                                                      elements_offset));
   2270 }
   2271 
   2272 
   2273 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
   2274                                             Register right,
   2275                                             Register overflow_dst,
   2276                                             Register scratch) {
   2277   DCHECK(!dst.is(overflow_dst));
   2278   DCHECK(!dst.is(scratch));
   2279   DCHECK(!overflow_dst.is(scratch));
   2280   DCHECK(!overflow_dst.is(left));
   2281   DCHECK(!overflow_dst.is(right));
   2282 
   2283   bool left_is_right = left.is(right);
   2284   RCBit xorRC = left_is_right ? SetRC : LeaveRC;
   2285 
   2286   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
   2287   if (dst.is(left)) {
   2288     mr(scratch, left);            // Preserve left.
   2289     add(dst, left, right);        // Left is overwritten.
   2290     xor_(overflow_dst, dst, scratch, xorRC);  // Original left.
   2291     if (!left_is_right) xor_(scratch, dst, right);
   2292   } else if (dst.is(right)) {
   2293     mr(scratch, right);           // Preserve right.
   2294     add(dst, left, right);        // Right is overwritten.
   2295     xor_(overflow_dst, dst, left, xorRC);
   2296     if (!left_is_right) xor_(scratch, dst, scratch);  // Original right.
   2297   } else {
   2298     add(dst, left, right);
   2299     xor_(overflow_dst, dst, left, xorRC);
   2300     if (!left_is_right) xor_(scratch, dst, right);
   2301   }
   2302   if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
   2303 }
   2304 
   2305 
   2306 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
   2307                                             intptr_t right,
   2308                                             Register overflow_dst,
   2309                                             Register scratch) {
   2310   Register original_left = left;
   2311   DCHECK(!dst.is(overflow_dst));
   2312   DCHECK(!dst.is(scratch));
   2313   DCHECK(!overflow_dst.is(scratch));
   2314   DCHECK(!overflow_dst.is(left));
   2315 
   2316   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
   2317   if (dst.is(left)) {
   2318     // Preserve left.
   2319     original_left = overflow_dst;
   2320     mr(original_left, left);
   2321   }
   2322   Add(dst, left, right, scratch);
   2323   xor_(overflow_dst, dst, original_left);
   2324   if (right >= 0) {
   2325     and_(overflow_dst, overflow_dst, dst, SetRC);
   2326   } else {
   2327     andc(overflow_dst, overflow_dst, dst, SetRC);
   2328   }
   2329 }
   2330 
   2331 
   2332 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
   2333                                             Register right,
   2334                                             Register overflow_dst,
   2335                                             Register scratch) {
   2336   DCHECK(!dst.is(overflow_dst));
   2337   DCHECK(!dst.is(scratch));
   2338   DCHECK(!overflow_dst.is(scratch));
   2339   DCHECK(!overflow_dst.is(left));
   2340   DCHECK(!overflow_dst.is(right));
   2341 
   2342   // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
   2343   if (dst.is(left)) {
   2344     mr(scratch, left);      // Preserve left.
   2345     sub(dst, left, right);  // Left is overwritten.
   2346     xor_(overflow_dst, dst, scratch);
   2347     xor_(scratch, scratch, right);
   2348     and_(overflow_dst, overflow_dst, scratch, SetRC);
   2349   } else if (dst.is(right)) {
   2350     mr(scratch, right);     // Preserve right.
   2351     sub(dst, left, right);  // Right is overwritten.
   2352     xor_(overflow_dst, dst, left);
   2353     xor_(scratch, left, scratch);
   2354     and_(overflow_dst, overflow_dst, scratch, SetRC);
   2355   } else {
   2356     sub(dst, left, right);
   2357     xor_(overflow_dst, dst, left);
   2358     xor_(scratch, left, right);
   2359     and_(overflow_dst, scratch, overflow_dst, SetRC);
   2360   }
   2361 }
   2362 
   2363 
   2364 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
   2365                                 Label* early_success) {
   2366   LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   2367   CompareMap(scratch, map, early_success);
   2368 }
   2369 
   2370 
   2371 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
   2372                                 Label* early_success) {
   2373   mov(r0, Operand(map));
   2374   cmp(obj_map, r0);
   2375 }
   2376 
   2377 
   2378 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
   2379                               Label* fail, SmiCheckType smi_check_type) {
   2380   if (smi_check_type == DO_SMI_CHECK) {
   2381     JumpIfSmi(obj, fail);
   2382   }
   2383 
   2384   Label success;
   2385   CompareMap(obj, scratch, map, &success);
   2386   bne(fail);
   2387   bind(&success);
   2388 }
   2389 
   2390 
   2391 void MacroAssembler::CheckMap(Register obj, Register scratch,
   2392                               Heap::RootListIndex index, Label* fail,
   2393                               SmiCheckType smi_check_type) {
   2394   if (smi_check_type == DO_SMI_CHECK) {
   2395     JumpIfSmi(obj, fail);
   2396   }
   2397   LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   2398   LoadRoot(r0, index);
   2399   cmp(scratch, r0);
   2400   bne(fail);
   2401 }
   2402 
   2403 
   2404 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
   2405                                      Register scratch2, Handle<WeakCell> cell,
   2406                                      Handle<Code> success,
   2407                                      SmiCheckType smi_check_type) {
   2408   Label fail;
   2409   if (smi_check_type == DO_SMI_CHECK) {
   2410     JumpIfSmi(obj, &fail);
   2411   }
   2412   LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
   2413   CmpWeakValue(scratch1, cell, scratch2);
   2414   Jump(success, RelocInfo::CODE_TARGET, eq);
   2415   bind(&fail);
   2416 }
   2417 
   2418 
   2419 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
   2420                                   Register scratch, CRegister cr) {
   2421   mov(scratch, Operand(cell));
   2422   LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
   2423   cmp(value, scratch, cr);
   2424 }
   2425 
   2426 
   2427 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
   2428   mov(value, Operand(cell));
   2429   LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
   2430 }
   2431 
   2432 
   2433 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
   2434                                    Label* miss) {
   2435   GetWeakValue(value, cell);
   2436   JumpIfSmi(value, miss);
   2437 }
   2438 
   2439 
   2440 void MacroAssembler::GetMapConstructor(Register result, Register map,
   2441                                        Register temp, Register temp2) {
   2442   Label done, loop;
   2443   LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
   2444   bind(&loop);
   2445   JumpIfSmi(result, &done);
   2446   CompareObjectType(result, temp, temp2, MAP_TYPE);
   2447   bne(&done);
   2448   LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
   2449   b(&loop);
   2450   bind(&done);
   2451 }
   2452 
   2453 
   2454 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
   2455                                              Register scratch, Label* miss) {
   2456   // Get the prototype or initial map from the function.
   2457   LoadP(result,
   2458         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2459 
   2460   // If the prototype or initial map is the hole, don't return it and
   2461   // simply miss the cache instead. This will allow us to allocate a
   2462   // prototype object on-demand in the runtime system.
   2463   LoadRoot(r0, Heap::kTheHoleValueRootIndex);
   2464   cmp(result, r0);
   2465   beq(miss);
   2466 
   2467   // If the function does not have an initial map, we're done.
   2468   Label done;
   2469   CompareObjectType(result, scratch, scratch, MAP_TYPE);
   2470   bne(&done);
   2471 
   2472   // Get the prototype from the initial map.
   2473   LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
   2474 
   2475   // All done.
   2476   bind(&done);
   2477 }
   2478 
   2479 
   2480 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
   2481                               Condition cond) {
   2482   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   2483   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
   2484 }
   2485 
   2486 
   2487 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
   2488   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
   2489 }
   2490 
   2491 
   2492 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
   2493   return has_frame_ || !stub->SometimesSetsUpAFrame();
   2494 }
   2495 
   2496 
   2497 void MacroAssembler::IndexFromHash(Register hash, Register index) {
   2498   // If the hash field contains an array index pick it out. The assert checks
   2499   // that the constants for the maximum number of digits for an array index
   2500   // cached in the hash field and the number of bits reserved for it does not
   2501   // conflict.
   2502   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
   2503          (1 << String::kArrayIndexValueBits));
   2504   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
   2505 }
   2506 
   2507 
   2508 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
   2509   SmiUntag(ip, smi);
   2510   ConvertIntToDouble(ip, value);
   2511 }
   2512 
   2513 
   2514 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
   2515                                        Register scratch1, Register scratch2,
   2516                                        DoubleRegister double_scratch) {
   2517   TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
   2518 }
   2519 
   2520 void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
   2521                                            Register scratch1,
   2522                                            Register scratch2) {
   2523 #if V8_TARGET_ARCH_PPC64
   2524   MovDoubleToInt64(scratch1, input);
   2525   rotldi(scratch1, scratch1, 1);
   2526   cmpi(scratch1, Operand(1));
   2527 #else
   2528   MovDoubleToInt64(scratch1, scratch2, input);
   2529   Label done;
   2530   cmpi(scratch2, Operand::Zero());
   2531   bne(&done);
   2532   lis(scratch2, Operand(SIGN_EXT_IMM16(0x8000)));
   2533   cmp(scratch1, scratch2);
   2534   bind(&done);
   2535 #endif
   2536 }
   2537 
   2538 void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
   2539 #if V8_TARGET_ARCH_PPC64
   2540   MovDoubleToInt64(scratch, input);
   2541 #else
   2542   MovDoubleHighToInt(scratch, input);
   2543 #endif
   2544   cmpi(scratch, Operand::Zero());
   2545 }
   2546 
   2547 void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
   2548 #if V8_TARGET_ARCH_PPC64
   2549   LoadP(scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
   2550 #else
   2551   lwz(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset));
   2552 #endif
   2553   cmpi(scratch, Operand::Zero());
   2554 }
   2555 
   2556 void MacroAssembler::TryDoubleToInt32Exact(Register result,
   2557                                            DoubleRegister double_input,
   2558                                            Register scratch,
   2559                                            DoubleRegister double_scratch) {
   2560   Label done;
   2561   DCHECK(!double_input.is(double_scratch));
   2562 
   2563   ConvertDoubleToInt64(double_input,
   2564 #if !V8_TARGET_ARCH_PPC64
   2565                        scratch,
   2566 #endif
   2567                        result, double_scratch);
   2568 
   2569 #if V8_TARGET_ARCH_PPC64
   2570   TestIfInt32(result, r0);
   2571 #else
   2572   TestIfInt32(scratch, result, r0);
   2573 #endif
   2574   bne(&done);
   2575 
   2576   // convert back and compare
   2577   fcfid(double_scratch, double_scratch);
   2578   fcmpu(double_scratch, double_input);
   2579   bind(&done);
   2580 }
   2581 
   2582 
   2583 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
   2584                                    Register input_high, Register scratch,
   2585                                    DoubleRegister double_scratch, Label* done,
   2586                                    Label* exact) {
   2587   DCHECK(!result.is(input_high));
   2588   DCHECK(!double_input.is(double_scratch));
   2589   Label exception;
   2590 
   2591   MovDoubleHighToInt(input_high, double_input);
   2592 
   2593   // Test for NaN/Inf
   2594   ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
   2595   cmpli(result, Operand(0x7ff));
   2596   beq(&exception);
   2597 
   2598   // Convert (rounding to -Inf)
   2599   ConvertDoubleToInt64(double_input,
   2600 #if !V8_TARGET_ARCH_PPC64
   2601                        scratch,
   2602 #endif
   2603                        result, double_scratch, kRoundToMinusInf);
   2604 
   2605 // Test for overflow
   2606 #if V8_TARGET_ARCH_PPC64
   2607   TestIfInt32(result, r0);
   2608 #else
   2609   TestIfInt32(scratch, result, r0);
   2610 #endif
   2611   bne(&exception);
   2612 
   2613   // Test for exactness
   2614   fcfid(double_scratch, double_scratch);
   2615   fcmpu(double_scratch, double_input);
   2616   beq(exact);
   2617   b(done);
   2618 
   2619   bind(&exception);
   2620 }
   2621 
   2622 
   2623 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
   2624                                                 DoubleRegister double_input,
   2625                                                 Label* done) {
   2626   DoubleRegister double_scratch = kScratchDoubleReg;
   2627 #if !V8_TARGET_ARCH_PPC64
   2628   Register scratch = ip;
   2629 #endif
   2630 
   2631   ConvertDoubleToInt64(double_input,
   2632 #if !V8_TARGET_ARCH_PPC64
   2633                        scratch,
   2634 #endif
   2635                        result, double_scratch);
   2636 
   2637 // Test for overflow
   2638 #if V8_TARGET_ARCH_PPC64
   2639   TestIfInt32(result, r0);
   2640 #else
   2641   TestIfInt32(scratch, result, r0);
   2642 #endif
   2643   beq(done);
   2644 }
   2645 
   2646 
   2647 void MacroAssembler::TruncateDoubleToI(Register result,
   2648                                        DoubleRegister double_input) {
   2649   Label done;
   2650 
   2651   TryInlineTruncateDoubleToI(result, double_input, &done);
   2652 
   2653   // If we fell through then inline version didn't succeed - call stub instead.
   2654   mflr(r0);
   2655   push(r0);
   2656   // Put input on stack.
   2657   stfdu(double_input, MemOperand(sp, -kDoubleSize));
   2658 
   2659   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
   2660   CallStub(&stub);
   2661 
   2662   addi(sp, sp, Operand(kDoubleSize));
   2663   pop(r0);
   2664   mtlr(r0);
   2665 
   2666   bind(&done);
   2667 }
   2668 
   2669 
   2670 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
   2671   Label done;
   2672   DoubleRegister double_scratch = kScratchDoubleReg;
   2673   DCHECK(!result.is(object));
   2674 
   2675   lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
   2676   TryInlineTruncateDoubleToI(result, double_scratch, &done);
   2677 
   2678   // If we fell through then inline version didn't succeed - call stub instead.
   2679   mflr(r0);
   2680   push(r0);
   2681   DoubleToIStub stub(isolate(), object, result,
   2682                      HeapNumber::kValueOffset - kHeapObjectTag, true, true);
   2683   CallStub(&stub);
   2684   pop(r0);
   2685   mtlr(r0);
   2686 
   2687   bind(&done);
   2688 }
   2689 
   2690 
   2691 void MacroAssembler::TruncateNumberToI(Register object, Register result,
   2692                                        Register heap_number_map,
   2693                                        Register scratch1, Label* not_number) {
   2694   Label done;
   2695   DCHECK(!result.is(object));
   2696 
   2697   UntagAndJumpIfSmi(result, object, &done);
   2698   JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
   2699   TruncateHeapNumberToI(result, object);
   2700 
   2701   bind(&done);
   2702 }
   2703 
   2704 
   2705 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
   2706                                          int num_least_bits) {
   2707 #if V8_TARGET_ARCH_PPC64
   2708   rldicl(dst, src, kBitsPerPointer - kSmiShift,
   2709          kBitsPerPointer - num_least_bits);
   2710 #else
   2711   rlwinm(dst, src, kBitsPerPointer - kSmiShift,
   2712          kBitsPerPointer - num_least_bits, 31);
   2713 #endif
   2714 }
   2715 
   2716 
   2717 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
   2718                                            int num_least_bits) {
   2719   rlwinm(dst, src, 0, 32 - num_least_bits, 31);
   2720 }
   2721 
   2722 
   2723 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
   2724                                  SaveFPRegsMode save_doubles) {
   2725   // All parameters are on the stack.  r3 has the return value after call.
   2726 
   2727   // If the expected number of arguments of the runtime function is
   2728   // constant, we check that the actual number of arguments match the
   2729   // expectation.
   2730   CHECK(f->nargs < 0 || f->nargs == num_arguments);
   2731 
   2732   // TODO(1236192): Most runtime routines don't need the number of
   2733   // arguments passed in because it is constant. At some point we
   2734   // should remove this need and make the runtime routine entry code
   2735   // smarter.
   2736   mov(r3, Operand(num_arguments));
   2737   mov(r4, Operand(ExternalReference(f, isolate())));
   2738   CEntryStub stub(isolate(),
   2739 #if V8_TARGET_ARCH_PPC64
   2740                   f->result_size,
   2741 #else
   2742                   1,
   2743 #endif
   2744                   save_doubles);
   2745   CallStub(&stub);
   2746 }
   2747 
   2748 
   2749 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
   2750                                            int num_arguments) {
   2751   mov(r3, Operand(num_arguments));
   2752   mov(r4, Operand(ext));
   2753 
   2754   CEntryStub stub(isolate(), 1);
   2755   CallStub(&stub);
   2756 }
   2757 
   2758 
   2759 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
   2760   const Runtime::Function* function = Runtime::FunctionForId(fid);
   2761   DCHECK_EQ(1, function->result_size);
   2762   if (function->nargs >= 0) {
   2763     mov(r3, Operand(function->nargs));
   2764   }
   2765   JumpToExternalReference(ExternalReference(fid, isolate()));
   2766 }
   2767 
   2768 
   2769 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
   2770   mov(r4, Operand(builtin));
   2771   CEntryStub stub(isolate(), 1);
   2772   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   2773 }
   2774 
   2775 
   2776 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
   2777                                 Register scratch1, Register scratch2) {
   2778   if (FLAG_native_code_counters && counter->Enabled()) {
   2779     mov(scratch1, Operand(value));
   2780     mov(scratch2, Operand(ExternalReference(counter)));
   2781     stw(scratch1, MemOperand(scratch2));
   2782   }
   2783 }
   2784 
   2785 
   2786 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
   2787                                       Register scratch1, Register scratch2) {
   2788   DCHECK(value > 0);
   2789   if (FLAG_native_code_counters && counter->Enabled()) {
   2790     mov(scratch2, Operand(ExternalReference(counter)));
   2791     lwz(scratch1, MemOperand(scratch2));
   2792     addi(scratch1, scratch1, Operand(value));
   2793     stw(scratch1, MemOperand(scratch2));
   2794   }
   2795 }
   2796 
   2797 
   2798 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
   2799                                       Register scratch1, Register scratch2) {
   2800   DCHECK(value > 0);
   2801   if (FLAG_native_code_counters && counter->Enabled()) {
   2802     mov(scratch2, Operand(ExternalReference(counter)));
   2803     lwz(scratch1, MemOperand(scratch2));
   2804     subi(scratch1, scratch1, Operand(value));
   2805     stw(scratch1, MemOperand(scratch2));
   2806   }
   2807 }
   2808 
   2809 
   2810 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
   2811                             CRegister cr) {
   2812   if (emit_debug_code()) Check(cond, reason, cr);
   2813 }
   2814 
   2815 
   2816 void MacroAssembler::AssertFastElements(Register elements) {
   2817   if (emit_debug_code()) {
   2818     DCHECK(!elements.is(r0));
   2819     Label ok;
   2820     push(elements);
   2821     LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
   2822     LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
   2823     cmp(elements, r0);
   2824     beq(&ok);
   2825     LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
   2826     cmp(elements, r0);
   2827     beq(&ok);
   2828     LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
   2829     cmp(elements, r0);
   2830     beq(&ok);
   2831     Abort(kJSObjectWithFastElementsMapHasSlowElements);
   2832     bind(&ok);
   2833     pop(elements);
   2834   }
   2835 }
   2836 
   2837 
   2838 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
   2839   Label L;
   2840   b(cond, &L, cr);
   2841   Abort(reason);
   2842   // will not return here
   2843   bind(&L);
   2844 }
   2845 
   2846 
   2847 void MacroAssembler::Abort(BailoutReason reason) {
   2848   Label abort_start;
   2849   bind(&abort_start);
   2850 #ifdef DEBUG
   2851   const char* msg = GetBailoutReason(reason);
   2852   if (msg != NULL) {
   2853     RecordComment("Abort message: ");
   2854     RecordComment(msg);
   2855   }
   2856 
   2857   if (FLAG_trap_on_abort) {
   2858     stop(msg);
   2859     return;
   2860   }
   2861 #endif
   2862 
   2863   LoadSmiLiteral(r0, Smi::FromInt(reason));
   2864   push(r0);
   2865   // Disable stub call restrictions to always allow calls to abort.
   2866   if (!has_frame_) {
   2867     // We don't actually want to generate a pile of code for this, so just
   2868     // claim there is a stack frame, without generating one.
   2869     FrameScope scope(this, StackFrame::NONE);
   2870     CallRuntime(Runtime::kAbort);
   2871   } else {
   2872     CallRuntime(Runtime::kAbort);
   2873   }
   2874   // will not return here
   2875 }
   2876 
   2877 
   2878 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   2879   if (context_chain_length > 0) {
   2880     // Move up the chain of contexts to the context containing the slot.
   2881     LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   2882     for (int i = 1; i < context_chain_length; i++) {
   2883       LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   2884     }
   2885   } else {
   2886     // Slot is in the current function context.  Move it into the
   2887     // destination register in case we store into it (the write barrier
   2888     // cannot be allowed to destroy the context in esi).
   2889     mr(dst, cp);
   2890   }
   2891 }
   2892 
   2893 
   2894 void MacroAssembler::LoadTransitionedArrayMapConditional(
   2895     ElementsKind expected_kind, ElementsKind transitioned_kind,
   2896     Register map_in_out, Register scratch, Label* no_map_match) {
   2897   DCHECK(IsFastElementsKind(expected_kind));
   2898   DCHECK(IsFastElementsKind(transitioned_kind));
   2899 
   2900   // Check that the function's map is the same as the expected cached map.
   2901   LoadP(scratch, NativeContextMemOperand());
   2902   LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
   2903   cmp(map_in_out, ip);
   2904   bne(no_map_match);
   2905 
   2906   // Use the transitioned cached map.
   2907   LoadP(map_in_out,
   2908         ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
   2909 }
   2910 
   2911 
   2912 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
   2913   LoadP(dst, NativeContextMemOperand());
   2914   LoadP(dst, ContextMemOperand(dst, index));
   2915 }
   2916 
   2917 
   2918 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
   2919                                                   Register map,
   2920                                                   Register scratch) {
   2921   // Load the initial map. The global functions all have initial maps.
   2922   LoadP(map,
   2923         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2924   if (emit_debug_code()) {
   2925     Label ok, fail;
   2926     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
   2927     b(&ok);
   2928     bind(&fail);
   2929     Abort(kGlobalFunctionsMustHaveInitialMap);
   2930     bind(&ok);
   2931   }
   2932 }
   2933 
   2934 
   2935 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
   2936     Register reg, Register scratch, Label* not_power_of_two_or_zero) {
   2937   subi(scratch, reg, Operand(1));
   2938   cmpi(scratch, Operand::Zero());
   2939   blt(not_power_of_two_or_zero);
   2940   and_(r0, scratch, reg, SetRC);
   2941   bne(not_power_of_two_or_zero, cr0);
   2942 }
   2943 
   2944 
   2945 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
   2946                                                      Register scratch,
   2947                                                      Label* zero_and_neg,
   2948                                                      Label* not_power_of_two) {
   2949   subi(scratch, reg, Operand(1));
   2950   cmpi(scratch, Operand::Zero());
   2951   blt(zero_and_neg);
   2952   and_(r0, scratch, reg, SetRC);
   2953   bne(not_power_of_two, cr0);
   2954 }
   2955 
   2956 #if !V8_TARGET_ARCH_PPC64
   2957 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
   2958   DCHECK(!reg.is(overflow));
   2959   mr(overflow, reg);  // Save original value.
   2960   SmiTag(reg);
   2961   xor_(overflow, overflow, reg, SetRC);  // Overflow if (value ^ 2 * value) < 0.
   2962 }
   2963 
   2964 
   2965 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
   2966                                          Register overflow) {
   2967   if (dst.is(src)) {
   2968     // Fall back to slower case.
   2969     SmiTagCheckOverflow(dst, overflow);
   2970   } else {
   2971     DCHECK(!dst.is(src));
   2972     DCHECK(!dst.is(overflow));
   2973     DCHECK(!src.is(overflow));
   2974     SmiTag(dst, src);
   2975     xor_(overflow, dst, src, SetRC);  // Overflow if (value ^ 2 * value) < 0.
   2976   }
   2977 }
   2978 #endif
   2979 
   2980 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
   2981                                       Label* on_not_both_smi) {
   2982   STATIC_ASSERT(kSmiTag == 0);
   2983   orx(r0, reg1, reg2, LeaveRC);
   2984   JumpIfNotSmi(r0, on_not_both_smi);
   2985 }
   2986 
   2987 
   2988 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
   2989                                        Label* smi_case) {
   2990   STATIC_ASSERT(kSmiTag == 0);
   2991   TestBitRange(src, kSmiTagSize - 1, 0, r0);
   2992   SmiUntag(dst, src);
   2993   beq(smi_case, cr0);
   2994 }
   2995 
   2996 
   2997 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
   2998                                           Label* non_smi_case) {
   2999   STATIC_ASSERT(kSmiTag == 0);
   3000   TestBitRange(src, kSmiTagSize - 1, 0, r0);
   3001   SmiUntag(dst, src);
   3002   bne(non_smi_case, cr0);
   3003 }
   3004 
   3005 
   3006 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
   3007                                      Label* on_either_smi) {
   3008   STATIC_ASSERT(kSmiTag == 0);
   3009   JumpIfSmi(reg1, on_either_smi);
   3010   JumpIfSmi(reg2, on_either_smi);
   3011 }
   3012 
   3013 void MacroAssembler::AssertNotNumber(Register object) {
   3014   if (emit_debug_code()) {
   3015     STATIC_ASSERT(kSmiTag == 0);
   3016     TestIfSmi(object, r0);
   3017     Check(ne, kOperandIsANumber, cr0);
   3018     push(object);
   3019     CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
   3020     pop(object);
   3021     Check(ne, kOperandIsANumber);
   3022   }
   3023 }
   3024 
   3025 void MacroAssembler::AssertNotSmi(Register object) {
   3026   if (emit_debug_code()) {
   3027     STATIC_ASSERT(kSmiTag == 0);
   3028     TestIfSmi(object, r0);
   3029     Check(ne, kOperandIsASmi, cr0);
   3030   }
   3031 }
   3032 
   3033 
   3034 void MacroAssembler::AssertSmi(Register object) {
   3035   if (emit_debug_code()) {
   3036     STATIC_ASSERT(kSmiTag == 0);
   3037     TestIfSmi(object, r0);
   3038     Check(eq, kOperandIsNotSmi, cr0);
   3039   }
   3040 }
   3041 
   3042 
   3043 void MacroAssembler::AssertString(Register object) {
   3044   if (emit_debug_code()) {
   3045     STATIC_ASSERT(kSmiTag == 0);
   3046     TestIfSmi(object, r0);
   3047     Check(ne, kOperandIsASmiAndNotAString, cr0);
   3048     push(object);
   3049     LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
   3050     CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
   3051     pop(object);
   3052     Check(lt, kOperandIsNotAString);
   3053   }
   3054 }
   3055 
   3056 
   3057 void MacroAssembler::AssertName(Register object) {
   3058   if (emit_debug_code()) {
   3059     STATIC_ASSERT(kSmiTag == 0);
   3060     TestIfSmi(object, r0);
   3061     Check(ne, kOperandIsASmiAndNotAName, cr0);
   3062     push(object);
   3063     LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
   3064     CompareInstanceType(object, object, LAST_NAME_TYPE);
   3065     pop(object);
   3066     Check(le, kOperandIsNotAName);
   3067   }
   3068 }
   3069 
   3070 
   3071 void MacroAssembler::AssertFunction(Register object) {
   3072   if (emit_debug_code()) {
   3073     STATIC_ASSERT(kSmiTag == 0);
   3074     TestIfSmi(object, r0);
   3075     Check(ne, kOperandIsASmiAndNotAFunction, cr0);
   3076     push(object);
   3077     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
   3078     pop(object);
   3079     Check(eq, kOperandIsNotAFunction);
   3080   }
   3081 }
   3082 
   3083 
   3084 void MacroAssembler::AssertBoundFunction(Register object) {
   3085   if (emit_debug_code()) {
   3086     STATIC_ASSERT(kSmiTag == 0);
   3087     TestIfSmi(object, r0);
   3088     Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
   3089     push(object);
   3090     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
   3091     pop(object);
   3092     Check(eq, kOperandIsNotABoundFunction);
   3093   }
   3094 }
   3095 
   3096 void MacroAssembler::AssertGeneratorObject(Register object) {
   3097   if (emit_debug_code()) {
   3098     STATIC_ASSERT(kSmiTag == 0);
   3099     TestIfSmi(object, r0);
   3100     Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
   3101     push(object);
   3102     CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
   3103     pop(object);
   3104     Check(eq, kOperandIsNotAGeneratorObject);
   3105   }
   3106 }
   3107 
   3108 void MacroAssembler::AssertReceiver(Register object) {
   3109   if (emit_debug_code()) {
   3110     STATIC_ASSERT(kSmiTag == 0);
   3111     TestIfSmi(object, r0);
   3112     Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
   3113     push(object);
   3114     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   3115     CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
   3116     pop(object);
   3117     Check(ge, kOperandIsNotAReceiver);
   3118   }
   3119 }
   3120 
   3121 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
   3122                                                      Register scratch) {
   3123   if (emit_debug_code()) {
   3124     Label done_checking;
   3125     AssertNotSmi(object);
   3126     CompareRoot(object, Heap::kUndefinedValueRootIndex);
   3127     beq(&done_checking);
   3128     LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   3129     CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
   3130     Assert(eq, kExpectedUndefinedOrCell);
   3131     bind(&done_checking);
   3132   }
   3133 }
   3134 
   3135 
   3136 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
   3137   if (emit_debug_code()) {
   3138     CompareRoot(reg, index);
   3139     Check(eq, kHeapNumberMapRegisterClobbered);
   3140   }
   3141 }
   3142 
   3143 
   3144 void MacroAssembler::JumpIfNotHeapNumber(Register object,
   3145                                          Register heap_number_map,
   3146                                          Register scratch,
   3147                                          Label* on_not_heap_number) {
   3148   LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   3149   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   3150   cmp(scratch, heap_number_map);
   3151   bne(on_not_heap_number);
   3152 }
   3153 
   3154 
   3155 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
   3156     Register first, Register second, Register scratch1, Register scratch2,
   3157     Label* failure) {
   3158   // Test that both first and second are sequential one-byte strings.
   3159   // Assume that they are non-smis.
   3160   LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
   3161   LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   3162   lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   3163   lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
   3164 
   3165   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
   3166                                                  scratch2, failure);
   3167 }
   3168 
   3169 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
   3170                                                            Register second,
   3171                                                            Register scratch1,
   3172                                                            Register scratch2,
   3173                                                            Label* failure) {
   3174   // Check that neither is a smi.
   3175   and_(scratch1, first, second);
   3176   JumpIfSmi(scratch1, failure);
   3177   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
   3178                                                scratch2, failure);
   3179 }
   3180 
   3181 
   3182 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
   3183                                                      Label* not_unique_name) {
   3184   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   3185   Label succeed;
   3186   andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
   3187   beq(&succeed, cr0);
   3188   cmpi(reg, Operand(SYMBOL_TYPE));
   3189   bne(not_unique_name);
   3190 
   3191   bind(&succeed);
   3192 }
   3193 
   3194 
   3195 // Allocates a heap number or jumps to the need_gc label if the young space
   3196 // is full and a scavenge is needed.
   3197 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
   3198                                         Register scratch2,
   3199                                         Register heap_number_map,
   3200                                         Label* gc_required,
   3201                                         MutableMode mode) {
   3202   // Allocate an object in the heap for the heap number and tag it as a heap
   3203   // object.
   3204   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
   3205            NO_ALLOCATION_FLAGS);
   3206 
   3207   Heap::RootListIndex map_index = mode == MUTABLE
   3208                                       ? Heap::kMutableHeapNumberMapRootIndex
   3209                                       : Heap::kHeapNumberMapRootIndex;
   3210   AssertIsRoot(heap_number_map, map_index);
   3211 
   3212   // Store heap number map in the allocated object.
   3213   StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
   3214         r0);
   3215 }
   3216 
   3217 
   3218 void MacroAssembler::AllocateHeapNumberWithValue(
   3219     Register result, DoubleRegister value, Register scratch1, Register scratch2,
   3220     Register heap_number_map, Label* gc_required) {
   3221   AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
   3222   stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
   3223 }
   3224 
   3225 
   3226 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
   3227                                      Register value, Register scratch1,
   3228                                      Register scratch2, Label* gc_required) {
   3229   DCHECK(!result.is(constructor));
   3230   DCHECK(!result.is(scratch1));
   3231   DCHECK(!result.is(scratch2));
   3232   DCHECK(!result.is(value));
   3233 
   3234   // Allocate JSValue in new space.
   3235   Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
   3236            NO_ALLOCATION_FLAGS);
   3237 
   3238   // Initialize the JSValue.
   3239   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
   3240   StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
   3241   LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
   3242   StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
   3243   StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
   3244   StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
   3245   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
   3246 }
   3247 
   3248 
   3249 void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
   3250                                Register scratch) {
   3251   Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
   3252 
   3253   DCHECK(!scratch.is(r0));
   3254 
   3255   cmpi(length, Operand::Zero());
   3256   beq(&done);
   3257 
   3258   // Check src alignment and length to see whether word_loop is possible
   3259   andi(scratch, src, Operand(kPointerSize - 1));
   3260   beq(&aligned, cr0);
   3261   subfic(scratch, scratch, Operand(kPointerSize * 2));
   3262   cmp(length, scratch);
   3263   blt(&byte_loop);
   3264 
   3265   // Align src before copying in word size chunks.
   3266   subi(scratch, scratch, Operand(kPointerSize));
   3267   mtctr(scratch);
   3268   bind(&align_loop);
   3269   lbz(scratch, MemOperand(src));
   3270   addi(src, src, Operand(1));
   3271   subi(length, length, Operand(1));
   3272   stb(scratch, MemOperand(dst));
   3273   addi(dst, dst, Operand(1));
   3274   bdnz(&align_loop);
   3275 
   3276   bind(&aligned);
   3277 
   3278   // Copy bytes in word size chunks.
   3279   if (emit_debug_code()) {
   3280     andi(r0, src, Operand(kPointerSize - 1));
   3281     Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
   3282   }
   3283 
   3284   ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
   3285   cmpi(scratch, Operand::Zero());
   3286   beq(&byte_loop);
   3287 
   3288   mtctr(scratch);
   3289   bind(&word_loop);
   3290   LoadP(scratch, MemOperand(src));
   3291   addi(src, src, Operand(kPointerSize));
   3292   subi(length, length, Operand(kPointerSize));
   3293   if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
   3294     // currently false for PPC - but possible future opt
   3295     StoreP(scratch, MemOperand(dst));
   3296     addi(dst, dst, Operand(kPointerSize));
   3297   } else {
   3298 #if V8_TARGET_LITTLE_ENDIAN
   3299     stb(scratch, MemOperand(dst, 0));
   3300     ShiftRightImm(scratch, scratch, Operand(8));
   3301     stb(scratch, MemOperand(dst, 1));
   3302     ShiftRightImm(scratch, scratch, Operand(8));
   3303     stb(scratch, MemOperand(dst, 2));
   3304     ShiftRightImm(scratch, scratch, Operand(8));
   3305     stb(scratch, MemOperand(dst, 3));
   3306 #if V8_TARGET_ARCH_PPC64
   3307     ShiftRightImm(scratch, scratch, Operand(8));
   3308     stb(scratch, MemOperand(dst, 4));
   3309     ShiftRightImm(scratch, scratch, Operand(8));
   3310     stb(scratch, MemOperand(dst, 5));
   3311     ShiftRightImm(scratch, scratch, Operand(8));
   3312     stb(scratch, MemOperand(dst, 6));
   3313     ShiftRightImm(scratch, scratch, Operand(8));
   3314     stb(scratch, MemOperand(dst, 7));
   3315 #endif
   3316 #else
   3317 #if V8_TARGET_ARCH_PPC64
   3318     stb(scratch, MemOperand(dst, 7));
   3319     ShiftRightImm(scratch, scratch, Operand(8));
   3320     stb(scratch, MemOperand(dst, 6));
   3321     ShiftRightImm(scratch, scratch, Operand(8));
   3322     stb(scratch, MemOperand(dst, 5));
   3323     ShiftRightImm(scratch, scratch, Operand(8));
   3324     stb(scratch, MemOperand(dst, 4));
   3325     ShiftRightImm(scratch, scratch, Operand(8));
   3326 #endif
   3327     stb(scratch, MemOperand(dst, 3));
   3328     ShiftRightImm(scratch, scratch, Operand(8));
   3329     stb(scratch, MemOperand(dst, 2));
   3330     ShiftRightImm(scratch, scratch, Operand(8));
   3331     stb(scratch, MemOperand(dst, 1));
   3332     ShiftRightImm(scratch, scratch, Operand(8));
   3333     stb(scratch, MemOperand(dst, 0));
   3334 #endif
   3335     addi(dst, dst, Operand(kPointerSize));
   3336   }
   3337   bdnz(&word_loop);
   3338 
   3339   // Copy the last bytes if any left.
   3340   cmpi(length, Operand::Zero());
   3341   beq(&done);
   3342 
   3343   bind(&byte_loop);
   3344   mtctr(length);
   3345   bind(&byte_loop_1);
   3346   lbz(scratch, MemOperand(src));
   3347   addi(src, src, Operand(1));
   3348   stb(scratch, MemOperand(dst));
   3349   addi(dst, dst, Operand(1));
   3350   bdnz(&byte_loop_1);
   3351 
   3352   bind(&done);
   3353 }
   3354 
   3355 
   3356 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
   3357                                                  Register count,
   3358                                                  Register filler) {
   3359   Label loop;
   3360   mtctr(count);
   3361   bind(&loop);
   3362   StoreP(filler, MemOperand(current_address));
   3363   addi(current_address, current_address, Operand(kPointerSize));
   3364   bdnz(&loop);
   3365 }
   3366 
   3367 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
   3368                                                 Register end_address,
   3369                                                 Register filler) {
   3370   Label done;
   3371   sub(r0, end_address, current_address, LeaveOE, SetRC);
   3372   beq(&done, cr0);
   3373   ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
   3374   InitializeNFieldsWithFiller(current_address, r0, filler);
   3375   bind(&done);
   3376 }
   3377 
   3378 
   3379 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
   3380     Register first, Register second, Register scratch1, Register scratch2,
   3381     Label* failure) {
   3382   const int kFlatOneByteStringMask =
   3383       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   3384   const int kFlatOneByteStringTag =
   3385       kStringTag | kOneByteStringTag | kSeqStringTag;
   3386   andi(scratch1, first, Operand(kFlatOneByteStringMask));
   3387   andi(scratch2, second, Operand(kFlatOneByteStringMask));
   3388   cmpi(scratch1, Operand(kFlatOneByteStringTag));
   3389   bne(failure);
   3390   cmpi(scratch2, Operand(kFlatOneByteStringTag));
   3391   bne(failure);
   3392 }
   3393 
   3394 
   3395 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
   3396                                                               Register scratch,
   3397                                                               Label* failure) {
   3398   const int kFlatOneByteStringMask =
   3399       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   3400   const int kFlatOneByteStringTag =
   3401       kStringTag | kOneByteStringTag | kSeqStringTag;
   3402   andi(scratch, type, Operand(kFlatOneByteStringMask));
   3403   cmpi(scratch, Operand(kFlatOneByteStringTag));
   3404   bne(failure);
   3405 }
   3406 
   3407 static const int kRegisterPassedArguments = 8;
   3408 
   3409 
   3410 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
   3411                                               int num_double_arguments) {
   3412   int stack_passed_words = 0;
   3413   if (num_double_arguments > DoubleRegister::kNumRegisters) {
   3414     stack_passed_words +=
   3415         2 * (num_double_arguments - DoubleRegister::kNumRegisters);
   3416   }
   3417   // Up to 8 simple arguments are passed in registers r3..r10.
   3418   if (num_reg_arguments > kRegisterPassedArguments) {
   3419     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
   3420   }
   3421   return stack_passed_words;
   3422 }
   3423 
   3424 
   3425 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
   3426                                                Register value,
   3427                                                uint32_t encoding_mask) {
   3428   Label is_object;
   3429   TestIfSmi(string, r0);
   3430   Check(ne, kNonObject, cr0);
   3431 
   3432   LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
   3433   lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
   3434 
   3435   andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
   3436   cmpi(ip, Operand(encoding_mask));
   3437   Check(eq, kUnexpectedStringType);
   3438 
   3439 // The index is assumed to be untagged coming in, tag it to compare with the
   3440 // string length without using a temp register, it is restored at the end of
   3441 // this function.
   3442 #if !V8_TARGET_ARCH_PPC64
   3443   Label index_tag_ok, index_tag_bad;
   3444   JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
   3445 #endif
   3446   SmiTag(index, index);
   3447 #if !V8_TARGET_ARCH_PPC64
   3448   b(&index_tag_ok);
   3449   bind(&index_tag_bad);
   3450   Abort(kIndexIsTooLarge);
   3451   bind(&index_tag_ok);
   3452 #endif
   3453 
   3454   LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
   3455   cmp(index, ip);
   3456   Check(lt, kIndexIsTooLarge);
   3457 
   3458   DCHECK(Smi::FromInt(0) == 0);
   3459   cmpi(index, Operand::Zero());
   3460   Check(ge, kIndexIsNegative);
   3461 
   3462   SmiUntag(index, index);
   3463 }
   3464 
   3465 
   3466 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
   3467                                           int num_double_arguments,
   3468                                           Register scratch) {
   3469   int frame_alignment = ActivationFrameAlignment();
   3470   int stack_passed_arguments =
   3471       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
   3472   int stack_space = kNumRequiredStackFrameSlots;
   3473 
   3474   if (frame_alignment > kPointerSize) {
   3475     // Make stack end at alignment and make room for stack arguments
   3476     // -- preserving original value of sp.
   3477     mr(scratch, sp);
   3478     addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
   3479     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   3480     ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
   3481     StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
   3482   } else {
   3483     // Make room for stack arguments
   3484     stack_space += stack_passed_arguments;
   3485   }
   3486 
   3487   // Allocate frame with required slots to make ABI work.
   3488   li(r0, Operand::Zero());
   3489   StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
   3490 }
   3491 
   3492 
   3493 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
   3494                                           Register scratch) {
   3495   PrepareCallCFunction(num_reg_arguments, 0, scratch);
   3496 }
   3497 
   3498 
   3499 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
   3500 
   3501 
   3502 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
   3503 
   3504 
   3505 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
   3506                                           DoubleRegister src2) {
   3507   if (src2.is(d1)) {
   3508     DCHECK(!src1.is(d2));
   3509     Move(d2, src2);
   3510     Move(d1, src1);
   3511   } else {
   3512     Move(d1, src1);
   3513     Move(d2, src2);
   3514   }
   3515 }
   3516 
   3517 
   3518 void MacroAssembler::CallCFunction(ExternalReference function,
   3519                                    int num_reg_arguments,
   3520                                    int num_double_arguments) {
   3521   mov(ip, Operand(function));
   3522   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
   3523 }
   3524 
   3525 
   3526 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
   3527                                    int num_double_arguments) {
   3528   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
   3529 }
   3530 
   3531 
   3532 void MacroAssembler::CallCFunction(ExternalReference function,
   3533                                    int num_arguments) {
   3534   CallCFunction(function, num_arguments, 0);
   3535 }
   3536 
   3537 
   3538 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
   3539   CallCFunction(function, num_arguments, 0);
   3540 }
   3541 
   3542 
   3543 void MacroAssembler::CallCFunctionHelper(Register function,
   3544                                          int num_reg_arguments,
   3545                                          int num_double_arguments) {
   3546   DCHECK(has_frame());
   3547 
   3548   // Just call directly. The function called cannot cause a GC, or
   3549   // allow preemption, so the return address in the link register
   3550   // stays correct.
   3551   Register dest = function;
   3552   if (ABI_USES_FUNCTION_DESCRIPTORS) {
   3553     // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
   3554     // aware of this descriptor and pick up values from it
   3555     LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
   3556     LoadP(ip, MemOperand(function, 0));
   3557     dest = ip;
   3558   } else if (ABI_CALL_VIA_IP) {
   3559     Move(ip, function);
   3560     dest = ip;
   3561   }
   3562 
   3563   Call(dest);
   3564 
   3565   // Remove frame bought in PrepareCallCFunction
   3566   int stack_passed_arguments =
   3567       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
   3568   int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
   3569   if (ActivationFrameAlignment() > kPointerSize) {
   3570     LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
   3571   } else {
   3572     addi(sp, sp, Operand(stack_space * kPointerSize));
   3573   }
   3574 }
   3575 
   3576 
   3577 void MacroAssembler::DecodeConstantPoolOffset(Register result,
   3578                                               Register location) {
   3579   Label overflow_access, done;
   3580   DCHECK(!AreAliased(result, location, r0));
   3581 
   3582   // Determine constant pool access type
   3583   // Caller has already placed the instruction word at location in result.
   3584   ExtractBitRange(r0, result, 31, 26);
   3585   cmpi(r0, Operand(ADDIS >> 26));
   3586   beq(&overflow_access);
   3587 
   3588   // Regular constant pool access
   3589   // extract the load offset
   3590   andi(result, result, Operand(kImm16Mask));
   3591   b(&done);
   3592 
   3593   bind(&overflow_access);
   3594   // Overflow constant pool access
   3595   // shift addis immediate
   3596   slwi(r0, result, Operand(16));
   3597   // sign-extend and add the load offset
   3598   lwz(result, MemOperand(location, kInstrSize));
   3599   extsh(result, result);
   3600   add(result, r0, result);
   3601 
   3602   bind(&done);
   3603 }
   3604 
   3605 
   3606 void MacroAssembler::CheckPageFlag(
   3607     Register object,
   3608     Register scratch,  // scratch may be same register as object
   3609     int mask, Condition cc, Label* condition_met) {
   3610   DCHECK(cc == ne || cc == eq);
   3611   ClearRightImm(scratch, object, Operand(kPageSizeBits));
   3612   LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
   3613 
   3614   And(r0, scratch, Operand(mask), SetRC);
   3615 
   3616   if (cc == ne) {
   3617     bne(condition_met, cr0);
   3618   }
   3619   if (cc == eq) {
   3620     beq(condition_met, cr0);
   3621   }
   3622 }
   3623 
   3624 
   3625 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
   3626                                  Register scratch1, Label* on_black) {
   3627   HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
   3628   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
   3629 }
   3630 
   3631 
   3632 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
   3633                               Register mask_scratch, Label* has_color,
   3634                               int first_bit, int second_bit) {
   3635   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
   3636 
   3637   GetMarkBits(object, bitmap_scratch, mask_scratch);
   3638 
   3639   Label other_color, word_boundary;
   3640   lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   3641   // Test the first bit
   3642   and_(r0, ip, mask_scratch, SetRC);
   3643   b(first_bit == 1 ? eq : ne, &other_color, cr0);
   3644   // Shift left 1
   3645   // May need to load the next cell
   3646   slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
   3647   beq(&word_boundary, cr0);
   3648   // Test the second bit
   3649   and_(r0, ip, mask_scratch, SetRC);
   3650   b(second_bit == 1 ? ne : eq, has_color, cr0);
   3651   b(&other_color);
   3652 
   3653   bind(&word_boundary);
   3654   lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
   3655   andi(r0, ip, Operand(1));
   3656   b(second_bit == 1 ? ne : eq, has_color, cr0);
   3657   bind(&other_color);
   3658 }
   3659 
   3660 
   3661 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
   3662                                  Register mask_reg) {
   3663   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
   3664   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
   3665   lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
   3666   and_(bitmap_reg, addr_reg, r0);
   3667   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
   3668   ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
   3669   ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
   3670   ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
   3671   add(bitmap_reg, bitmap_reg, ip);
   3672   li(ip, Operand(1));
   3673   slw(mask_reg, ip, mask_reg);
   3674 }
   3675 
   3676 
   3677 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
   3678                                  Register mask_scratch, Register load_scratch,
   3679                                  Label* value_is_white) {
   3680   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
   3681   GetMarkBits(value, bitmap_scratch, mask_scratch);
   3682 
   3683   // If the value is black or grey we don't need to do anything.
   3684   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   3685   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
   3686   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
   3687   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
   3688 
   3689   // Since both black and grey have a 1 in the first position and white does
   3690   // not have a 1 there we only need to check one bit.
   3691   lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   3692   and_(r0, mask_scratch, load_scratch, SetRC);
   3693   beq(value_is_white, cr0);
   3694 }
   3695 
   3696 
   3697 // Saturate a value into 8-bit unsigned integer
   3698 //   if input_value < 0, output_value is 0
   3699 //   if input_value > 255, output_value is 255
   3700 //   otherwise output_value is the input_value
   3701 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
   3702   int satval = (1 << 8) - 1;
   3703 
   3704   if (CpuFeatures::IsSupported(ISELECT)) {
   3705     // set to 0 if negative
   3706     cmpi(input_reg, Operand::Zero());
   3707     isel(lt, output_reg, r0, input_reg);
   3708 
   3709     // set to satval if > satval
   3710     li(r0, Operand(satval));
   3711     cmpi(output_reg, Operand(satval));
   3712     isel(lt, output_reg, output_reg, r0);
   3713   } else {
   3714     Label done, negative_label, overflow_label;
   3715     cmpi(input_reg, Operand::Zero());
   3716     blt(&negative_label);
   3717 
   3718     cmpi(input_reg, Operand(satval));
   3719     bgt(&overflow_label);
   3720     if (!output_reg.is(input_reg)) {
   3721       mr(output_reg, input_reg);
   3722     }
   3723     b(&done);
   3724 
   3725     bind(&negative_label);
   3726     li(output_reg, Operand::Zero());  // set to 0 if negative
   3727     b(&done);
   3728 
   3729     bind(&overflow_label);  // set to satval if > satval
   3730     li(output_reg, Operand(satval));
   3731 
   3732     bind(&done);
   3733   }
   3734 }
   3735 
   3736 
   3737 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
   3738 
   3739 
   3740 void MacroAssembler::ResetRoundingMode() {
   3741   mtfsfi(7, kRoundToNearest);  // reset (default is kRoundToNearest)
   3742 }
   3743 
   3744 
   3745 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
   3746                                         DoubleRegister input_reg,
   3747                                         DoubleRegister double_scratch) {
   3748   Label above_zero;
   3749   Label done;
   3750   Label in_bounds;
   3751 
   3752   LoadDoubleLiteral(double_scratch, 0.0, result_reg);
   3753   fcmpu(input_reg, double_scratch);
   3754   bgt(&above_zero);
   3755 
   3756   // Double value is less than zero, NaN or Inf, return 0.
   3757   LoadIntLiteral(result_reg, 0);
   3758   b(&done);
   3759 
   3760   // Double value is >= 255, return 255.
   3761   bind(&above_zero);
   3762   LoadDoubleLiteral(double_scratch, 255.0, result_reg);
   3763   fcmpu(input_reg, double_scratch);
   3764   ble(&in_bounds);
   3765   LoadIntLiteral(result_reg, 255);
   3766   b(&done);
   3767 
   3768   // In 0-255 range, round and truncate.
   3769   bind(&in_bounds);
   3770 
   3771   // round to nearest (default rounding mode)
   3772   fctiw(double_scratch, input_reg);
   3773   MovDoubleLowToInt(result_reg, double_scratch);
   3774   bind(&done);
   3775 }
   3776 
   3777 
   3778 void MacroAssembler::LoadInstanceDescriptors(Register map,
   3779                                              Register descriptors) {
   3780   LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
   3781 }
   3782 
   3783 
   3784 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
   3785   lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
   3786   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
   3787 }
   3788 
   3789 
   3790 void MacroAssembler::EnumLength(Register dst, Register map) {
   3791   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
   3792   lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
   3793   ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
   3794   SmiTag(dst);
   3795 }
   3796 
   3797 
   3798 void MacroAssembler::LoadAccessor(Register dst, Register holder,
   3799                                   int accessor_index,
   3800                                   AccessorComponent accessor) {
   3801   LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
   3802   LoadInstanceDescriptors(dst, dst);
   3803   LoadP(dst,
   3804         FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
   3805   const int getterOffset = AccessorPair::kGetterOffset;
   3806   const int setterOffset = AccessorPair::kSetterOffset;
   3807   int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
   3808   LoadP(dst, FieldMemOperand(dst, offset));
   3809 }
   3810 
   3811 
   3812 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
   3813   Register null_value = r8;
   3814   Register empty_fixed_array_value = r9;
   3815   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   3816   Label next, start;
   3817   mr(r5, r3);
   3818 
   3819   // Check if the enum length field is properly initialized, indicating that
   3820   // there is an enum cache.
   3821   LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
   3822 
   3823   EnumLength(r6, r4);
   3824   CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
   3825   beq(call_runtime);
   3826 
   3827   LoadRoot(null_value, Heap::kNullValueRootIndex);
   3828   b(&start);
   3829 
   3830   bind(&next);
   3831   LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
   3832 
   3833   // For all objects but the receiver, check that the cache is empty.
   3834   EnumLength(r6, r4);
   3835   CmpSmiLiteral(r6, Smi::FromInt(0), r0);
   3836   bne(call_runtime);
   3837 
   3838   bind(&start);
   3839 
   3840   // Check that there are no elements. Register r5 contains the current JS
   3841   // object we've reached through the prototype chain.
   3842   Label no_elements;
   3843   LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
   3844   cmp(r5, empty_fixed_array_value);
   3845   beq(&no_elements);
   3846 
   3847   // Second chance, the object may be using the empty slow element dictionary.
   3848   CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
   3849   bne(call_runtime);
   3850 
   3851   bind(&no_elements);
   3852   LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
   3853   cmp(r5, null_value);
   3854   bne(&next);
   3855 }
   3856 
   3857 
   3858 ////////////////////////////////////////////////////////////////////////////////
   3859 //
   3860 // New MacroAssembler Interfaces added for PPC
   3861 //
   3862 ////////////////////////////////////////////////////////////////////////////////
   3863 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
   3864   mov(dst, Operand(value));
   3865 }
   3866 
   3867 
   3868 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
   3869   mov(dst, Operand(smi));
   3870 }
   3871 
   3872 
   3873 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
   3874                                        Register scratch) {
   3875   if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
   3876       !(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) {
   3877     ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
   3878     if (access == ConstantPoolEntry::OVERFLOWED) {
   3879       addis(scratch, kConstantPoolRegister, Operand::Zero());
   3880       lfd(result, MemOperand(scratch, 0));
   3881     } else {
   3882       lfd(result, MemOperand(kConstantPoolRegister, 0));
   3883     }
   3884     return;
   3885   }
   3886 
   3887   // avoid gcc strict aliasing error using union cast
   3888   union {
   3889     double dval;
   3890 #if V8_TARGET_ARCH_PPC64
   3891     intptr_t ival;
   3892 #else
   3893     intptr_t ival[2];
   3894 #endif
   3895   } litVal;
   3896 
   3897   litVal.dval = value;
   3898 
   3899 #if V8_TARGET_ARCH_PPC64
   3900   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   3901     mov(scratch, Operand(litVal.ival));
   3902     mtfprd(result, scratch);
   3903     return;
   3904   }
   3905 #endif
   3906 
   3907   addi(sp, sp, Operand(-kDoubleSize));
   3908 #if V8_TARGET_ARCH_PPC64
   3909   mov(scratch, Operand(litVal.ival));
   3910   std(scratch, MemOperand(sp));
   3911 #else
   3912   LoadIntLiteral(scratch, litVal.ival[0]);
   3913   stw(scratch, MemOperand(sp, 0));
   3914   LoadIntLiteral(scratch, litVal.ival[1]);
   3915   stw(scratch, MemOperand(sp, 4));
   3916 #endif
   3917   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   3918   lfd(result, MemOperand(sp, 0));
   3919   addi(sp, sp, Operand(kDoubleSize));
   3920 }
   3921 
   3922 
   3923 void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
   3924                                     Register scratch) {
   3925 // sign-extend src to 64-bit
   3926 #if V8_TARGET_ARCH_PPC64
   3927   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   3928     mtfprwa(dst, src);
   3929     return;
   3930   }
   3931 #endif
   3932 
   3933   DCHECK(!src.is(scratch));
   3934   subi(sp, sp, Operand(kDoubleSize));
   3935 #if V8_TARGET_ARCH_PPC64
   3936   extsw(scratch, src);
   3937   std(scratch, MemOperand(sp, 0));
   3938 #else
   3939   srawi(scratch, src, 31);
   3940   stw(scratch, MemOperand(sp, Register::kExponentOffset));
   3941   stw(src, MemOperand(sp, Register::kMantissaOffset));
   3942 #endif
   3943   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   3944   lfd(dst, MemOperand(sp, 0));
   3945   addi(sp, sp, Operand(kDoubleSize));
   3946 }
   3947 
   3948 
   3949 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
   3950                                             Register scratch) {
   3951 // zero-extend src to 64-bit
   3952 #if V8_TARGET_ARCH_PPC64
   3953   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   3954     mtfprwz(dst, src);
   3955     return;
   3956   }
   3957 #endif
   3958 
   3959   DCHECK(!src.is(scratch));
   3960   subi(sp, sp, Operand(kDoubleSize));
   3961 #if V8_TARGET_ARCH_PPC64
   3962   clrldi(scratch, src, Operand(32));
   3963   std(scratch, MemOperand(sp, 0));
   3964 #else
   3965   li(scratch, Operand::Zero());
   3966   stw(scratch, MemOperand(sp, Register::kExponentOffset));
   3967   stw(src, MemOperand(sp, Register::kMantissaOffset));
   3968 #endif
   3969   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   3970   lfd(dst, MemOperand(sp, 0));
   3971   addi(sp, sp, Operand(kDoubleSize));
   3972 }
   3973 
   3974 
   3975 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
   3976 #if !V8_TARGET_ARCH_PPC64
   3977                                       Register src_hi,
   3978 #endif
   3979                                       Register src) {
   3980 #if V8_TARGET_ARCH_PPC64
   3981   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   3982     mtfprd(dst, src);
   3983     return;
   3984   }
   3985 #endif
   3986 
   3987   subi(sp, sp, Operand(kDoubleSize));
   3988 #if V8_TARGET_ARCH_PPC64
   3989   std(src, MemOperand(sp, 0));
   3990 #else
   3991   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
   3992   stw(src, MemOperand(sp, Register::kMantissaOffset));
   3993 #endif
   3994   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   3995   lfd(dst, MemOperand(sp, 0));
   3996   addi(sp, sp, Operand(kDoubleSize));
   3997 }
   3998 
   3999 
   4000 #if V8_TARGET_ARCH_PPC64
   4001 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
   4002                                                 Register src_hi,
   4003                                                 Register src_lo,
   4004                                                 Register scratch) {
   4005   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   4006     sldi(scratch, src_hi, Operand(32));
   4007     rldimi(scratch, src_lo, 0, 32);
   4008     mtfprd(dst, scratch);
   4009     return;
   4010   }
   4011 
   4012   subi(sp, sp, Operand(kDoubleSize));
   4013   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
   4014   stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
   4015   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   4016   lfd(dst, MemOperand(sp));
   4017   addi(sp, sp, Operand(kDoubleSize));
   4018 }
   4019 #endif
   4020 
   4021 
   4022 void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
   4023                                      Register scratch) {
   4024 #if V8_TARGET_ARCH_PPC64
   4025   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   4026     mffprd(scratch, dst);
   4027     rldimi(scratch, src, 0, 32);
   4028     mtfprd(dst, scratch);
   4029     return;
   4030   }
   4031 #endif
   4032 
   4033   subi(sp, sp, Operand(kDoubleSize));
   4034   stfd(dst, MemOperand(sp));
   4035   stw(src, MemOperand(sp, Register::kMantissaOffset));
   4036   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   4037   lfd(dst, MemOperand(sp));
   4038   addi(sp, sp, Operand(kDoubleSize));
   4039 }
   4040 
   4041 
   4042 void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
   4043                                       Register scratch) {
   4044 #if V8_TARGET_ARCH_PPC64
   4045   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   4046     mffprd(scratch, dst);
   4047     rldimi(scratch, src, 32, 0);
   4048     mtfprd(dst, scratch);
   4049     return;
   4050   }
   4051 #endif
   4052 
   4053   subi(sp, sp, Operand(kDoubleSize));
   4054   stfd(dst, MemOperand(sp));
   4055   stw(src, MemOperand(sp, Register::kExponentOffset));
   4056   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   4057   lfd(dst, MemOperand(sp));
   4058   addi(sp, sp, Operand(kDoubleSize));
   4059 }
   4060 
   4061 
   4062 void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
   4063 #if V8_TARGET_ARCH_PPC64
   4064   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   4065     mffprwz(dst, src);
   4066     return;
   4067   }
   4068 #endif
   4069 
   4070   subi(sp, sp, Operand(kDoubleSize));
   4071   stfd(src, MemOperand(sp));
   4072   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   4073   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
   4074   addi(sp, sp, Operand(kDoubleSize));
   4075 }
   4076 
   4077 
   4078 void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
   4079 #if V8_TARGET_ARCH_PPC64
   4080   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   4081     mffprd(dst, src);
   4082     srdi(dst, dst, Operand(32));
   4083     return;
   4084   }
   4085 #endif
   4086 
   4087   subi(sp, sp, Operand(kDoubleSize));
   4088   stfd(src, MemOperand(sp));
   4089   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   4090   lwz(dst, MemOperand(sp, Register::kExponentOffset));
   4091   addi(sp, sp, Operand(kDoubleSize));
   4092 }
   4093 
   4094 
   4095 void MacroAssembler::MovDoubleToInt64(
   4096 #if !V8_TARGET_ARCH_PPC64
   4097     Register dst_hi,
   4098 #endif
   4099     Register dst, DoubleRegister src) {
   4100 #if V8_TARGET_ARCH_PPC64
   4101   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
   4102     mffprd(dst, src);
   4103     return;
   4104   }
   4105 #endif
   4106 
   4107   subi(sp, sp, Operand(kDoubleSize));
   4108   stfd(src, MemOperand(sp));
   4109   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   4110 #if V8_TARGET_ARCH_PPC64
   4111   ld(dst, MemOperand(sp, 0));
   4112 #else
   4113   lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
   4114   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
   4115 #endif
   4116   addi(sp, sp, Operand(kDoubleSize));
   4117 }
   4118 
   4119 
   4120 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
   4121   subi(sp, sp, Operand(kFloatSize));
   4122   stw(src, MemOperand(sp, 0));
   4123   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   4124   lfs(dst, MemOperand(sp, 0));
   4125   addi(sp, sp, Operand(kFloatSize));
   4126 }
   4127 
   4128 
   4129 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
   4130   subi(sp, sp, Operand(kFloatSize));
   4131   frsp(src, src);
   4132   stfs(src, MemOperand(sp, 0));
   4133   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   4134   lwz(dst, MemOperand(sp, 0));
   4135   addi(sp, sp, Operand(kFloatSize));
   4136 }
   4137 
   4138 
   4139 void MacroAssembler::Add(Register dst, Register src, intptr_t value,
   4140                          Register scratch) {
   4141   if (is_int16(value)) {
   4142     addi(dst, src, Operand(value));
   4143   } else {
   4144     mov(scratch, Operand(value));
   4145     add(dst, src, scratch);
   4146   }
   4147 }
   4148 
   4149 
   4150 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
   4151                           CRegister cr) {
   4152   intptr_t value = src2.immediate();
   4153   if (is_int16(value)) {
   4154     cmpi(src1, src2, cr);
   4155   } else {
   4156     mov(scratch, src2);
   4157     cmp(src1, scratch, cr);
   4158   }
   4159 }
   4160 
   4161 
   4162 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
   4163                            CRegister cr) {
   4164   intptr_t value = src2.immediate();
   4165   if (is_uint16(value)) {
   4166     cmpli(src1, src2, cr);
   4167   } else {
   4168     mov(scratch, src2);
   4169     cmpl(src1, scratch, cr);
   4170   }
   4171 }
   4172 
   4173 
   4174 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
   4175                            CRegister cr) {
   4176   intptr_t value = src2.immediate();
   4177   if (is_int16(value)) {
   4178     cmpwi(src1, src2, cr);
   4179   } else {
   4180     mov(scratch, src2);
   4181     cmpw(src1, scratch, cr);
   4182   }
   4183 }
   4184 
   4185 
   4186 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
   4187                             Register scratch, CRegister cr) {
   4188   intptr_t value = src2.immediate();
   4189   if (is_uint16(value)) {
   4190     cmplwi(src1, src2, cr);
   4191   } else {
   4192     mov(scratch, src2);
   4193     cmplw(src1, scratch, cr);
   4194   }
   4195 }
   4196 
   4197 
   4198 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
   4199                          RCBit rc) {
   4200   if (rb.is_reg()) {
   4201     and_(ra, rs, rb.rm(), rc);
   4202   } else {
   4203     if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
   4204       andi(ra, rs, rb);
   4205     } else {
   4206       // mov handles the relocation.
   4207       DCHECK(!rs.is(r0));
   4208       mov(r0, rb);
   4209       and_(ra, rs, r0, rc);
   4210     }
   4211   }
   4212 }
   4213 
   4214 
   4215 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
   4216   if (rb.is_reg()) {
   4217     orx(ra, rs, rb.rm(), rc);
   4218   } else {
   4219     if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
   4220       ori(ra, rs, rb);
   4221     } else {
   4222       // mov handles the relocation.
   4223       DCHECK(!rs.is(r0));
   4224       mov(r0, rb);
   4225       orx(ra, rs, r0, rc);
   4226     }
   4227   }
   4228 }
   4229 
   4230 
   4231 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
   4232                          RCBit rc) {
   4233   if (rb.is_reg()) {
   4234     xor_(ra, rs, rb.rm(), rc);
   4235   } else {
   4236     if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
   4237       xori(ra, rs, rb);
   4238     } else {
   4239       // mov handles the relocation.
   4240       DCHECK(!rs.is(r0));
   4241       mov(r0, rb);
   4242       xor_(ra, rs, r0, rc);
   4243     }
   4244   }
   4245 }
   4246 
   4247 
   4248 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
   4249                                    CRegister cr) {
   4250 #if V8_TARGET_ARCH_PPC64
   4251   LoadSmiLiteral(scratch, smi);
   4252   cmp(src1, scratch, cr);
   4253 #else
   4254   Cmpi(src1, Operand(smi), scratch, cr);
   4255 #endif
   4256 }
   4257 
   4258 
   4259 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
   4260                                     CRegister cr) {
   4261 #if V8_TARGET_ARCH_PPC64
   4262   LoadSmiLiteral(scratch, smi);
   4263   cmpl(src1, scratch, cr);
   4264 #else
   4265   Cmpli(src1, Operand(smi), scratch, cr);
   4266 #endif
   4267 }
   4268 
   4269 
   4270 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
   4271                                    Register scratch) {
   4272 #if V8_TARGET_ARCH_PPC64
   4273   LoadSmiLiteral(scratch, smi);
   4274   add(dst, src, scratch);
   4275 #else
   4276   Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
   4277 #endif
   4278 }
   4279 
   4280 
   4281 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
   4282                                    Register scratch) {
   4283 #if V8_TARGET_ARCH_PPC64
   4284   LoadSmiLiteral(scratch, smi);
   4285   sub(dst, src, scratch);
   4286 #else
   4287   Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
   4288 #endif
   4289 }
   4290 
   4291 
   4292 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
   4293                                    Register scratch, RCBit rc) {
   4294 #if V8_TARGET_ARCH_PPC64
   4295   LoadSmiLiteral(scratch, smi);
   4296   and_(dst, src, scratch, rc);
   4297 #else
   4298   And(dst, src, Operand(smi), rc);
   4299 #endif
   4300 }
   4301 
   4302 
   4303 // Load a "pointer" sized value from the memory location
   4304 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
   4305                            Register scratch) {
   4306   int offset = mem.offset();
   4307 
   4308   if (!is_int16(offset)) {
   4309     /* cannot use d-form */
   4310     DCHECK(!scratch.is(no_reg));
   4311     mov(scratch, Operand(offset));
   4312     LoadPX(dst, MemOperand(mem.ra(), scratch));
   4313   } else {
   4314 #if V8_TARGET_ARCH_PPC64
   4315     int misaligned = (offset & 3);
   4316     if (misaligned) {
   4317       // adjust base to conform to offset alignment requirements
   4318       // Todo: enhance to use scratch if dst is unsuitable
   4319       DCHECK(!dst.is(r0));
   4320       addi(dst, mem.ra(), Operand((offset & 3) - 4));
   4321       ld(dst, MemOperand(dst, (offset & ~3) + 4));
   4322     } else {
   4323       ld(dst, mem);
   4324     }
   4325 #else
   4326     lwz(dst, mem);
   4327 #endif
   4328   }
   4329 }
   4330 
   4331 void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
   4332                             Register scratch) {
   4333   int offset = mem.offset();
   4334 
   4335   if (!is_int16(offset)) {
   4336     /* cannot use d-form */
   4337     DCHECK(!scratch.is(no_reg));
   4338     mov(scratch, Operand(offset));
   4339     LoadPUX(dst, MemOperand(mem.ra(), scratch));
   4340   } else {
   4341 #if V8_TARGET_ARCH_PPC64
   4342     ldu(dst, mem);
   4343 #else
   4344     lwzu(dst, mem);
   4345 #endif
   4346   }
   4347 }
   4348 
   4349 // Store a "pointer" sized value to the memory location
   4350 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
   4351                             Register scratch) {
   4352   int offset = mem.offset();
   4353 
   4354   if (!is_int16(offset)) {
   4355     /* cannot use d-form */
   4356     DCHECK(!scratch.is(no_reg));
   4357     mov(scratch, Operand(offset));
   4358     StorePX(src, MemOperand(mem.ra(), scratch));
   4359   } else {
   4360 #if V8_TARGET_ARCH_PPC64
   4361     int misaligned = (offset & 3);
   4362     if (misaligned) {
   4363       // adjust base to conform to offset alignment requirements
   4364       // a suitable scratch is required here
   4365       DCHECK(!scratch.is(no_reg));
   4366       if (scratch.is(r0)) {
   4367         LoadIntLiteral(scratch, offset);
   4368         stdx(src, MemOperand(mem.ra(), scratch));
   4369       } else {
   4370         addi(scratch, mem.ra(), Operand((offset & 3) - 4));
   4371         std(src, MemOperand(scratch, (offset & ~3) + 4));
   4372       }
   4373     } else {
   4374       std(src, mem);
   4375     }
   4376 #else
   4377     stw(src, mem);
   4378 #endif
   4379   }
   4380 }
   4381 
   4382 void MacroAssembler::StorePU(Register src, const MemOperand& mem,
   4383                              Register scratch) {
   4384   int offset = mem.offset();
   4385 
   4386   if (!is_int16(offset)) {
   4387     /* cannot use d-form */
   4388     DCHECK(!scratch.is(no_reg));
   4389     mov(scratch, Operand(offset));
   4390     StorePUX(src, MemOperand(mem.ra(), scratch));
   4391   } else {
   4392 #if V8_TARGET_ARCH_PPC64
   4393     stdu(src, mem);
   4394 #else
   4395     stwu(src, mem);
   4396 #endif
   4397   }
   4398 }
   4399 
   4400 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
   4401                                    Register scratch) {
   4402   int offset = mem.offset();
   4403 
   4404   if (!is_int16(offset)) {
   4405     DCHECK(!scratch.is(no_reg));
   4406     mov(scratch, Operand(offset));
   4407     lwax(dst, MemOperand(mem.ra(), scratch));
   4408   } else {
   4409 #if V8_TARGET_ARCH_PPC64
   4410     int misaligned = (offset & 3);
   4411     if (misaligned) {
   4412       // adjust base to conform to offset alignment requirements
   4413       // Todo: enhance to use scratch if dst is unsuitable
   4414       DCHECK(!dst.is(r0));
   4415       addi(dst, mem.ra(), Operand((offset & 3) - 4));
   4416       lwa(dst, MemOperand(dst, (offset & ~3) + 4));
   4417     } else {
   4418       lwa(dst, mem);
   4419     }
   4420 #else
   4421     lwz(dst, mem);
   4422 #endif
   4423   }
   4424 }
   4425 
   4426 
   4427 // Variable length depending on whether offset fits into immediate field
   4428 // MemOperand currently only supports d-form
   4429 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
   4430                               Register scratch) {
   4431   Register base = mem.ra();
   4432   int offset = mem.offset();
   4433 
   4434   if (!is_int16(offset)) {
   4435     LoadIntLiteral(scratch, offset);
   4436     lwzx(dst, MemOperand(base, scratch));
   4437   } else {
   4438     lwz(dst, mem);
   4439   }
   4440 }
   4441 
   4442 
   4443 // Variable length depending on whether offset fits into immediate field
   4444 // MemOperand current only supports d-form
   4445 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
   4446                                Register scratch) {
   4447   Register base = mem.ra();
   4448   int offset = mem.offset();
   4449 
   4450   if (!is_int16(offset)) {
   4451     LoadIntLiteral(scratch, offset);
   4452     stwx(src, MemOperand(base, scratch));
   4453   } else {
   4454     stw(src, mem);
   4455   }
   4456 }
   4457 
   4458 
   4459 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
   4460                                        Register scratch) {
   4461   int offset = mem.offset();
   4462 
   4463   if (!is_int16(offset)) {
   4464     DCHECK(!scratch.is(no_reg));
   4465     mov(scratch, Operand(offset));
   4466     lhax(dst, MemOperand(mem.ra(), scratch));
   4467   } else {
   4468     lha(dst, mem);
   4469   }
   4470 }
   4471 
   4472 
   4473 // Variable length depending on whether offset fits into immediate field
   4474 // MemOperand currently only supports d-form
   4475 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
   4476                                   Register scratch) {
   4477   Register base = mem.ra();
   4478   int offset = mem.offset();
   4479 
   4480   if (!is_int16(offset)) {
   4481     LoadIntLiteral(scratch, offset);
   4482     lhzx(dst, MemOperand(base, scratch));
   4483   } else {
   4484     lhz(dst, mem);
   4485   }
   4486 }
   4487 
   4488 
   4489 // Variable length depending on whether offset fits into immediate field
   4490 // MemOperand current only supports d-form
   4491 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
   4492                                    Register scratch) {
   4493   Register base = mem.ra();
   4494   int offset = mem.offset();
   4495 
   4496   if (!is_int16(offset)) {
   4497     LoadIntLiteral(scratch, offset);
   4498     sthx(src, MemOperand(base, scratch));
   4499   } else {
   4500     sth(src, mem);
   4501   }
   4502 }
   4503 
   4504 
   4505 // Variable length depending on whether offset fits into immediate field
   4506 // MemOperand currently only supports d-form
   4507 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
   4508                               Register scratch) {
   4509   Register base = mem.ra();
   4510   int offset = mem.offset();
   4511 
   4512   if (!is_int16(offset)) {
   4513     LoadIntLiteral(scratch, offset);
   4514     lbzx(dst, MemOperand(base, scratch));
   4515   } else {
   4516     lbz(dst, mem);
   4517   }
   4518 }
   4519 
   4520 
   4521 // Variable length depending on whether offset fits into immediate field
   4522 // MemOperand current only supports d-form
   4523 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
   4524                                Register scratch) {
   4525   Register base = mem.ra();
   4526   int offset = mem.offset();
   4527 
   4528   if (!is_int16(offset)) {
   4529     LoadIntLiteral(scratch, offset);
   4530     stbx(src, MemOperand(base, scratch));
   4531   } else {
   4532     stb(src, mem);
   4533   }
   4534 }
   4535 
   4536 
   4537 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
   4538                                         Representation r, Register scratch) {
   4539   DCHECK(!r.IsDouble());
   4540   if (r.IsInteger8()) {
   4541     LoadByte(dst, mem, scratch);
   4542     extsb(dst, dst);
   4543   } else if (r.IsUInteger8()) {
   4544     LoadByte(dst, mem, scratch);
   4545   } else if (r.IsInteger16()) {
   4546     LoadHalfWordArith(dst, mem, scratch);
   4547   } else if (r.IsUInteger16()) {
   4548     LoadHalfWord(dst, mem, scratch);
   4549 #if V8_TARGET_ARCH_PPC64
   4550   } else if (r.IsInteger32()) {
   4551     LoadWordArith(dst, mem, scratch);
   4552 #endif
   4553   } else {
   4554     LoadP(dst, mem, scratch);
   4555   }
   4556 }
   4557 
   4558 
   4559 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
   4560                                          Representation r, Register scratch) {
   4561   DCHECK(!r.IsDouble());
   4562   if (r.IsInteger8() || r.IsUInteger8()) {
   4563     StoreByte(src, mem, scratch);
   4564   } else if (r.IsInteger16() || r.IsUInteger16()) {
   4565     StoreHalfWord(src, mem, scratch);
   4566 #if V8_TARGET_ARCH_PPC64
   4567   } else if (r.IsInteger32()) {
   4568     StoreWord(src, mem, scratch);
   4569 #endif
   4570   } else {
   4571     if (r.IsHeapObject()) {
   4572       AssertNotSmi(src);
   4573     } else if (r.IsSmi()) {
   4574       AssertSmi(src);
   4575     }
   4576     StoreP(src, mem, scratch);
   4577   }
   4578 }
   4579 
   4580 
   4581 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
   4582                                 Register scratch) {
   4583   Register base = mem.ra();
   4584   int offset = mem.offset();
   4585 
   4586   if (!is_int16(offset)) {
   4587     mov(scratch, Operand(offset));
   4588     lfdx(dst, MemOperand(base, scratch));
   4589   } else {
   4590     lfd(dst, mem);
   4591   }
   4592 }
   4593 
   4594 void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
   4595                                 Register scratch) {
   4596   Register base = mem.ra();
   4597   int offset = mem.offset();
   4598 
   4599   if (!is_int16(offset)) {
   4600     mov(scratch, Operand(offset));
   4601     lfdux(dst, MemOperand(base, scratch));
   4602   } else {
   4603     lfdu(dst, mem);
   4604   }
   4605 }
   4606 
   4607 void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
   4608                                 Register scratch) {
   4609   Register base = mem.ra();
   4610   int offset = mem.offset();
   4611 
   4612   if (!is_int16(offset)) {
   4613     mov(scratch, Operand(offset));
   4614     lfsx(dst, MemOperand(base, scratch));
   4615   } else {
   4616     lfs(dst, mem);
   4617   }
   4618 }
   4619 
   4620 void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
   4621                                 Register scratch) {
   4622   Register base = mem.ra();
   4623   int offset = mem.offset();
   4624 
   4625   if (!is_int16(offset)) {
   4626     mov(scratch, Operand(offset));
   4627     lfsux(dst, MemOperand(base, scratch));
   4628   } else {
   4629     lfsu(dst, mem);
   4630   }
   4631 }
   4632 
   4633 void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
   4634                                  Register scratch) {
   4635   Register base = mem.ra();
   4636   int offset = mem.offset();
   4637 
   4638   if (!is_int16(offset)) {
   4639     mov(scratch, Operand(offset));
   4640     stfdx(src, MemOperand(base, scratch));
   4641   } else {
   4642     stfd(src, mem);
   4643   }
   4644 }
   4645 
   4646 void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
   4647                                  Register scratch) {
   4648   Register base = mem.ra();
   4649   int offset = mem.offset();
   4650 
   4651   if (!is_int16(offset)) {
   4652     mov(scratch, Operand(offset));
   4653     stfdux(src, MemOperand(base, scratch));
   4654   } else {
   4655     stfdu(src, mem);
   4656   }
   4657 }
   4658 
   4659 void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
   4660                                  Register scratch) {
   4661   Register base = mem.ra();
   4662   int offset = mem.offset();
   4663 
   4664   if (!is_int16(offset)) {
   4665     mov(scratch, Operand(offset));
   4666     stfsx(src, MemOperand(base, scratch));
   4667   } else {
   4668     stfs(src, mem);
   4669   }
   4670 }
   4671 
   4672 void MacroAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
   4673                                  Register scratch) {
   4674   Register base = mem.ra();
   4675   int offset = mem.offset();
   4676 
   4677   if (!is_int16(offset)) {
   4678     mov(scratch, Operand(offset));
   4679     stfsux(src, MemOperand(base, scratch));
   4680   } else {
   4681     stfsu(src, mem);
   4682   }
   4683 }
   4684 
   4685 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
   4686                                                      Register scratch_reg,
   4687                                                      Register scratch2_reg,
   4688                                                      Label* no_memento_found) {
   4689   Label map_check;
   4690   Label top_check;
   4691   ExternalReference new_space_allocation_top_adr =
   4692       ExternalReference::new_space_allocation_top_address(isolate());
   4693   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   4694   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
   4695   Register mask = scratch2_reg;
   4696 
   4697   DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
   4698 
   4699   // Bail out if the object is not in new space.
   4700   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   4701 
   4702   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
   4703   lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
   4704   addi(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
   4705 
   4706   // If the object is in new space, we need to check whether it is on the same
   4707   // page as the current top.
   4708   mov(ip, Operand(new_space_allocation_top_adr));
   4709   LoadP(ip, MemOperand(ip));
   4710   Xor(r0, scratch_reg, Operand(ip));
   4711   and_(r0, r0, mask, SetRC);
   4712   beq(&top_check, cr0);
   4713   // The object is on a different page than allocation top. Bail out if the
   4714   // object sits on the page boundary as no memento can follow and we cannot
   4715   // touch the memory following it.
   4716   xor_(r0, scratch_reg, receiver_reg);
   4717   and_(r0, r0, mask, SetRC);
   4718   bne(no_memento_found, cr0);
   4719   // Continue with the actual map check.
   4720   b(&map_check);
   4721   // If top is on the same page as the current object, we need to check whether
   4722   // we are below top.
   4723   bind(&top_check);
   4724   cmp(scratch_reg, ip);
   4725   bgt(no_memento_found);
   4726   // Memento map check.
   4727   bind(&map_check);
   4728   LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
   4729   Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
   4730        r0);
   4731 }
   4732 
   4733 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
   4734                                    Register reg4, Register reg5,
   4735                                    Register reg6) {
   4736   RegList regs = 0;
   4737   if (reg1.is_valid()) regs |= reg1.bit();
   4738   if (reg2.is_valid()) regs |= reg2.bit();
   4739   if (reg3.is_valid()) regs |= reg3.bit();
   4740   if (reg4.is_valid()) regs |= reg4.bit();
   4741   if (reg5.is_valid()) regs |= reg5.bit();
   4742   if (reg6.is_valid()) regs |= reg6.bit();
   4743 
   4744   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   4745   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
   4746     int code = config->GetAllocatableGeneralCode(i);
   4747     Register candidate = Register::from_code(code);
   4748     if (regs & candidate.bit()) continue;
   4749     return candidate;
   4750   }
   4751   UNREACHABLE();
   4752   return no_reg;
   4753 }
   4754 
   4755 
   4756 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
   4757                                                       Register scratch0,
   4758                                                       Register scratch1,
   4759                                                       Label* found) {
   4760   DCHECK(!scratch1.is(scratch0));
   4761   Register current = scratch0;
   4762   Label loop_again, end;
   4763 
   4764   // scratch contained elements pointer.
   4765   mr(current, object);
   4766   LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
   4767   LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
   4768   CompareRoot(current, Heap::kNullValueRootIndex);
   4769   beq(&end);
   4770 
   4771   // Loop based on the map going up the prototype chain.
   4772   bind(&loop_again);
   4773   LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
   4774 
   4775   STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
   4776   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
   4777   lbz(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
   4778   cmpi(scratch1, Operand(JS_OBJECT_TYPE));
   4779   blt(found);
   4780 
   4781   lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
   4782   DecodeField<Map::ElementsKindBits>(scratch1);
   4783   cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
   4784   beq(found);
   4785   LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
   4786   CompareRoot(current, Heap::kNullValueRootIndex);
   4787   bne(&loop_again);
   4788 
   4789   bind(&end);
   4790 }
   4791 
   4792 
   4793 #ifdef DEBUG
   4794 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
   4795                 Register reg5, Register reg6, Register reg7, Register reg8,
   4796                 Register reg9, Register reg10) {
   4797   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
   4798                         reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
   4799                         reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
   4800                         reg10.is_valid();
   4801 
   4802   RegList regs = 0;
   4803   if (reg1.is_valid()) regs |= reg1.bit();
   4804   if (reg2.is_valid()) regs |= reg2.bit();
   4805   if (reg3.is_valid()) regs |= reg3.bit();
   4806   if (reg4.is_valid()) regs |= reg4.bit();
   4807   if (reg5.is_valid()) regs |= reg5.bit();
   4808   if (reg6.is_valid()) regs |= reg6.bit();
   4809   if (reg7.is_valid()) regs |= reg7.bit();
   4810   if (reg8.is_valid()) regs |= reg8.bit();
   4811   if (reg9.is_valid()) regs |= reg9.bit();
   4812   if (reg10.is_valid()) regs |= reg10.bit();
   4813   int n_of_non_aliasing_regs = NumRegs(regs);
   4814 
   4815   return n_of_valid_regs != n_of_non_aliasing_regs;
   4816 }
   4817 #endif
   4818 
   4819 
   4820 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
   4821                          FlushICache flush_cache)
   4822     : address_(address),
   4823       size_(instructions * Assembler::kInstrSize),
   4824       masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
   4825       flush_cache_(flush_cache) {
   4826   // Create a new macro assembler pointing to the address of the code to patch.
   4827   // The size is adjusted with kGap on order for the assembler to generate size
   4828   // bytes of instructions without failing with buffer size constraints.
   4829   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   4830 }
   4831 
   4832 
   4833 CodePatcher::~CodePatcher() {
   4834   // Indicate that code has changed.
   4835   if (flush_cache_ == FLUSH) {
   4836     Assembler::FlushICache(masm_.isolate(), address_, size_);
   4837   }
   4838 
   4839   // Check that the code was patched as expected.
   4840   DCHECK(masm_.pc_ == address_ + size_);
   4841   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   4842 }
   4843 
   4844 
   4845 void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
   4846 
   4847 
   4848 void CodePatcher::EmitCondition(Condition cond) {
   4849   Instr instr = Assembler::instr_at(masm_.pc_);
   4850   switch (cond) {
   4851     case eq:
   4852       instr = (instr & ~kCondMask) | BT;
   4853       break;
   4854     case ne:
   4855       instr = (instr & ~kCondMask) | BF;
   4856       break;
   4857     default:
   4858       UNIMPLEMENTED();
   4859   }
   4860   masm_.emit(instr);
   4861 }
   4862 
   4863 
   4864 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
   4865                                    int32_t divisor) {
   4866   DCHECK(!dividend.is(result));
   4867   DCHECK(!dividend.is(r0));
   4868   DCHECK(!result.is(r0));
   4869   base::MagicNumbersForDivision<uint32_t> mag =
   4870       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
   4871   mov(r0, Operand(mag.multiplier));
   4872   mulhw(result, dividend, r0);
   4873   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
   4874   if (divisor > 0 && neg) {
   4875     add(result, result, dividend);
   4876   }
   4877   if (divisor < 0 && !neg && mag.multiplier > 0) {
   4878     sub(result, result, dividend);
   4879   }
   4880   if (mag.shift > 0) srawi(result, result, mag.shift);
   4881   ExtractBit(r0, dividend, 31);
   4882   add(result, result, r0);
   4883 }
   4884 
   4885 }  // namespace internal
   4886 }  // namespace v8
   4887 
   4888 #endif  // V8_TARGET_ARCH_PPC
   4889