Home | History | Annotate | Download | only in arm
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/code-generator.h"
      6 
      7 #include "src/arm/macro-assembler-arm.h"
      8 #include "src/compiler/code-generator-impl.h"
      9 #include "src/compiler/gap-resolver.h"
     10 #include "src/compiler/node-matchers.h"
     11 #include "src/compiler/node-properties-inl.h"
     12 #include "src/scopes.h"
     13 
     14 namespace v8 {
     15 namespace internal {
     16 namespace compiler {
     17 
     18 #define __ masm()->
     19 
     20 
     21 #define kScratchReg r9
     22 
     23 
     24 // Adds Arm-specific methods to convert InstructionOperands.
     25 class ArmOperandConverter : public InstructionOperandConverter {
     26  public:
     27   ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
     28       : InstructionOperandConverter(gen, instr) {}
     29 
     30   SBit OutputSBit() const {
     31     switch (instr_->flags_mode()) {
     32       case kFlags_branch:
     33       case kFlags_set:
     34         return SetCC;
     35       case kFlags_none:
     36         return LeaveCC;
     37     }
     38     UNREACHABLE();
     39     return LeaveCC;
     40   }
     41 
     42   Operand InputImmediate(int index) {
     43     Constant constant = ToConstant(instr_->InputAt(index));
     44     switch (constant.type()) {
     45       case Constant::kInt32:
     46         return Operand(constant.ToInt32());
     47       case Constant::kFloat64:
     48         return Operand(
     49             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
     50       case Constant::kInt64:
     51       case Constant::kExternalReference:
     52       case Constant::kHeapObject:
     53         break;
     54     }
     55     UNREACHABLE();
     56     return Operand::Zero();
     57   }
     58 
     59   Operand InputOperand2(int first_index) {
     60     const int index = first_index;
     61     switch (AddressingModeField::decode(instr_->opcode())) {
     62       case kMode_None:
     63       case kMode_Offset_RI:
     64       case kMode_Offset_RR:
     65         break;
     66       case kMode_Operand2_I:
     67         return InputImmediate(index + 0);
     68       case kMode_Operand2_R:
     69         return Operand(InputRegister(index + 0));
     70       case kMode_Operand2_R_ASR_I:
     71         return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
     72       case kMode_Operand2_R_ASR_R:
     73         return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
     74       case kMode_Operand2_R_LSL_I:
     75         return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
     76       case kMode_Operand2_R_LSL_R:
     77         return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
     78       case kMode_Operand2_R_LSR_I:
     79         return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
     80       case kMode_Operand2_R_LSR_R:
     81         return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
     82       case kMode_Operand2_R_ROR_I:
     83         return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
     84       case kMode_Operand2_R_ROR_R:
     85         return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
     86     }
     87     UNREACHABLE();
     88     return Operand::Zero();
     89   }
     90 
     91   MemOperand InputOffset(int* first_index) {
     92     const int index = *first_index;
     93     switch (AddressingModeField::decode(instr_->opcode())) {
     94       case kMode_None:
     95       case kMode_Operand2_I:
     96       case kMode_Operand2_R:
     97       case kMode_Operand2_R_ASR_I:
     98       case kMode_Operand2_R_ASR_R:
     99       case kMode_Operand2_R_LSL_I:
    100       case kMode_Operand2_R_LSL_R:
    101       case kMode_Operand2_R_LSR_I:
    102       case kMode_Operand2_R_LSR_R:
    103       case kMode_Operand2_R_ROR_I:
    104       case kMode_Operand2_R_ROR_R:
    105         break;
    106       case kMode_Offset_RI:
    107         *first_index += 2;
    108         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
    109       case kMode_Offset_RR:
    110         *first_index += 2;
    111         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
    112     }
    113     UNREACHABLE();
    114     return MemOperand(r0);
    115   }
    116 
    117   MemOperand InputOffset() {
    118     int index = 0;
    119     return InputOffset(&index);
    120   }
    121 
    122   MemOperand ToMemOperand(InstructionOperand* op) const {
    123     DCHECK(op != NULL);
    124     DCHECK(!op->IsRegister());
    125     DCHECK(!op->IsDoubleRegister());
    126     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    127     // The linkage computes where all spill slots are located.
    128     FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
    129     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
    130   }
    131 };
    132 
    133 
    134 // Assembles an instruction after register allocation, producing machine code.
    135 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
    136   ArmOperandConverter i(this, instr);
    137 
    138   switch (ArchOpcodeField::decode(instr->opcode())) {
    139     case kArchCallCodeObject: {
    140       EnsureSpaceForLazyDeopt();
    141       if (instr->InputAt(0)->IsImmediate()) {
    142         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
    143                 RelocInfo::CODE_TARGET);
    144       } else {
    145         __ add(ip, i.InputRegister(0),
    146                Operand(Code::kHeaderSize - kHeapObjectTag));
    147         __ Call(ip);
    148       }
    149       AddSafepointAndDeopt(instr);
    150       DCHECK_EQ(LeaveCC, i.OutputSBit());
    151       break;
    152     }
    153     case kArchCallJSFunction: {
    154       EnsureSpaceForLazyDeopt();
    155       Register func = i.InputRegister(0);
    156       if (FLAG_debug_code) {
    157         // Check the function's context matches the context argument.
    158         __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
    159         __ cmp(cp, kScratchReg);
    160         __ Assert(eq, kWrongFunctionContext);
    161       }
    162       __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
    163       __ Call(ip);
    164       AddSafepointAndDeopt(instr);
    165       DCHECK_EQ(LeaveCC, i.OutputSBit());
    166       break;
    167     }
    168     case kArchJmp:
    169       __ b(code_->GetLabel(i.InputBlock(0)));
    170       DCHECK_EQ(LeaveCC, i.OutputSBit());
    171       break;
    172     case kArchNop:
    173       // don't emit code for nops.
    174       DCHECK_EQ(LeaveCC, i.OutputSBit());
    175       break;
    176     case kArchRet:
    177       AssembleReturn();
    178       DCHECK_EQ(LeaveCC, i.OutputSBit());
    179       break;
    180     case kArchTruncateDoubleToI:
    181       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
    182       DCHECK_EQ(LeaveCC, i.OutputSBit());
    183       break;
    184     case kArmAdd:
    185       __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
    186              i.OutputSBit());
    187       break;
    188     case kArmAnd:
    189       __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
    190               i.OutputSBit());
    191       break;
    192     case kArmBic:
    193       __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
    194              i.OutputSBit());
    195       break;
    196     case kArmMul:
    197       __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
    198              i.OutputSBit());
    199       break;
    200     case kArmMla:
    201       __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
    202              i.InputRegister(2), i.OutputSBit());
    203       break;
    204     case kArmMls: {
    205       CpuFeatureScope scope(masm(), MLS);
    206       __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
    207              i.InputRegister(2));
    208       DCHECK_EQ(LeaveCC, i.OutputSBit());
    209       break;
    210     }
    211     case kArmSdiv: {
    212       CpuFeatureScope scope(masm(), SUDIV);
    213       __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    214       DCHECK_EQ(LeaveCC, i.OutputSBit());
    215       break;
    216     }
    217     case kArmUdiv: {
    218       CpuFeatureScope scope(masm(), SUDIV);
    219       __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    220       DCHECK_EQ(LeaveCC, i.OutputSBit());
    221       break;
    222     }
    223     case kArmMov:
    224       __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
    225       break;
    226     case kArmMvn:
    227       __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
    228       break;
    229     case kArmOrr:
    230       __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
    231              i.OutputSBit());
    232       break;
    233     case kArmEor:
    234       __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
    235              i.OutputSBit());
    236       break;
    237     case kArmSub:
    238       __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
    239              i.OutputSBit());
    240       break;
    241     case kArmRsb:
    242       __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
    243              i.OutputSBit());
    244       break;
    245     case kArmBfc: {
    246       CpuFeatureScope scope(masm(), ARMv7);
    247       __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
    248       DCHECK_EQ(LeaveCC, i.OutputSBit());
    249       break;
    250     }
    251     case kArmUbfx: {
    252       CpuFeatureScope scope(masm(), ARMv7);
    253       __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
    254               i.InputInt8(2));
    255       DCHECK_EQ(LeaveCC, i.OutputSBit());
    256       break;
    257     }
    258     case kArmCmp:
    259       __ cmp(i.InputRegister(0), i.InputOperand2(1));
    260       DCHECK_EQ(SetCC, i.OutputSBit());
    261       break;
    262     case kArmCmn:
    263       __ cmn(i.InputRegister(0), i.InputOperand2(1));
    264       DCHECK_EQ(SetCC, i.OutputSBit());
    265       break;
    266     case kArmTst:
    267       __ tst(i.InputRegister(0), i.InputOperand2(1));
    268       DCHECK_EQ(SetCC, i.OutputSBit());
    269       break;
    270     case kArmTeq:
    271       __ teq(i.InputRegister(0), i.InputOperand2(1));
    272       DCHECK_EQ(SetCC, i.OutputSBit());
    273       break;
    274     case kArmVcmpF64:
    275       __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
    276                                i.InputDoubleRegister(1));
    277       DCHECK_EQ(SetCC, i.OutputSBit());
    278       break;
    279     case kArmVaddF64:
    280       __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
    281               i.InputDoubleRegister(1));
    282       DCHECK_EQ(LeaveCC, i.OutputSBit());
    283       break;
    284     case kArmVsubF64:
    285       __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
    286               i.InputDoubleRegister(1));
    287       DCHECK_EQ(LeaveCC, i.OutputSBit());
    288       break;
    289     case kArmVmulF64:
    290       __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
    291               i.InputDoubleRegister(1));
    292       DCHECK_EQ(LeaveCC, i.OutputSBit());
    293       break;
    294     case kArmVmlaF64:
    295       __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
    296               i.InputDoubleRegister(2));
    297       DCHECK_EQ(LeaveCC, i.OutputSBit());
    298       break;
    299     case kArmVmlsF64:
    300       __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
    301               i.InputDoubleRegister(2));
    302       DCHECK_EQ(LeaveCC, i.OutputSBit());
    303       break;
    304     case kArmVdivF64:
    305       __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
    306               i.InputDoubleRegister(1));
    307       DCHECK_EQ(LeaveCC, i.OutputSBit());
    308       break;
    309     case kArmVmodF64: {
    310       // TODO(bmeurer): We should really get rid of this special instruction,
    311       // and generate a CallAddress instruction instead.
    312       FrameScope scope(masm(), StackFrame::MANUAL);
    313       __ PrepareCallCFunction(0, 2, kScratchReg);
    314       __ MovToFloatParameters(i.InputDoubleRegister(0),
    315                               i.InputDoubleRegister(1));
    316       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
    317                        0, 2);
    318       // Move the result in the double result register.
    319       __ MovFromFloatResult(i.OutputDoubleRegister());
    320       DCHECK_EQ(LeaveCC, i.OutputSBit());
    321       break;
    322     }
    323     case kArmVnegF64:
    324       __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
    325       break;
    326     case kArmVsqrtF64:
    327       __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
    328       break;
    329     case kArmVcvtF64S32: {
    330       SwVfpRegister scratch = kScratchDoubleReg.low();
    331       __ vmov(scratch, i.InputRegister(0));
    332       __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
    333       DCHECK_EQ(LeaveCC, i.OutputSBit());
    334       break;
    335     }
    336     case kArmVcvtF64U32: {
    337       SwVfpRegister scratch = kScratchDoubleReg.low();
    338       __ vmov(scratch, i.InputRegister(0));
    339       __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
    340       DCHECK_EQ(LeaveCC, i.OutputSBit());
    341       break;
    342     }
    343     case kArmVcvtS32F64: {
    344       SwVfpRegister scratch = kScratchDoubleReg.low();
    345       __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
    346       __ vmov(i.OutputRegister(), scratch);
    347       DCHECK_EQ(LeaveCC, i.OutputSBit());
    348       break;
    349     }
    350     case kArmVcvtU32F64: {
    351       SwVfpRegister scratch = kScratchDoubleReg.low();
    352       __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
    353       __ vmov(i.OutputRegister(), scratch);
    354       DCHECK_EQ(LeaveCC, i.OutputSBit());
    355       break;
    356     }
    357     case kArmLdrb:
    358       __ ldrb(i.OutputRegister(), i.InputOffset());
    359       DCHECK_EQ(LeaveCC, i.OutputSBit());
    360       break;
    361     case kArmLdrsb:
    362       __ ldrsb(i.OutputRegister(), i.InputOffset());
    363       DCHECK_EQ(LeaveCC, i.OutputSBit());
    364       break;
    365     case kArmStrb: {
    366       int index = 0;
    367       MemOperand operand = i.InputOffset(&index);
    368       __ strb(i.InputRegister(index), operand);
    369       DCHECK_EQ(LeaveCC, i.OutputSBit());
    370       break;
    371     }
    372     case kArmLdrh:
    373       __ ldrh(i.OutputRegister(), i.InputOffset());
    374       break;
    375     case kArmLdrsh:
    376       __ ldrsh(i.OutputRegister(), i.InputOffset());
    377       break;
    378     case kArmStrh: {
    379       int index = 0;
    380       MemOperand operand = i.InputOffset(&index);
    381       __ strh(i.InputRegister(index), operand);
    382       DCHECK_EQ(LeaveCC, i.OutputSBit());
    383       break;
    384     }
    385     case kArmLdr:
    386       __ ldr(i.OutputRegister(), i.InputOffset());
    387       break;
    388     case kArmStr: {
    389       int index = 0;
    390       MemOperand operand = i.InputOffset(&index);
    391       __ str(i.InputRegister(index), operand);
    392       DCHECK_EQ(LeaveCC, i.OutputSBit());
    393       break;
    394     }
    395     case kArmVldr32: {
    396       SwVfpRegister scratch = kScratchDoubleReg.low();
    397       __ vldr(scratch, i.InputOffset());
    398       __ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
    399       DCHECK_EQ(LeaveCC, i.OutputSBit());
    400       break;
    401     }
    402     case kArmVstr32: {
    403       int index = 0;
    404       SwVfpRegister scratch = kScratchDoubleReg.low();
    405       MemOperand operand = i.InputOffset(&index);
    406       __ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
    407       __ vstr(scratch, operand);
    408       DCHECK_EQ(LeaveCC, i.OutputSBit());
    409       break;
    410     }
    411     case kArmVldr64:
    412       __ vldr(i.OutputDoubleRegister(), i.InputOffset());
    413       DCHECK_EQ(LeaveCC, i.OutputSBit());
    414       break;
    415     case kArmVstr64: {
    416       int index = 0;
    417       MemOperand operand = i.InputOffset(&index);
    418       __ vstr(i.InputDoubleRegister(index), operand);
    419       DCHECK_EQ(LeaveCC, i.OutputSBit());
    420       break;
    421     }
    422     case kArmPush:
    423       __ Push(i.InputRegister(0));
    424       DCHECK_EQ(LeaveCC, i.OutputSBit());
    425       break;
    426     case kArmStoreWriteBarrier: {
    427       Register object = i.InputRegister(0);
    428       Register index = i.InputRegister(1);
    429       Register value = i.InputRegister(2);
    430       __ add(index, object, index);
    431       __ str(value, MemOperand(index));
    432       SaveFPRegsMode mode =
    433           frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
    434       LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
    435       __ RecordWrite(object, index, value, lr_status, mode);
    436       DCHECK_EQ(LeaveCC, i.OutputSBit());
    437       break;
    438     }
    439   }
    440 }
    441 
    442 
    443 // Assembles branches after an instruction.
    444 void CodeGenerator::AssembleArchBranch(Instruction* instr,
    445                                        FlagsCondition condition) {
    446   ArmOperandConverter i(this, instr);
    447   Label done;
    448 
    449   // Emit a branch. The true and false targets are always the last two inputs
    450   // to the instruction.
    451   BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
    452   BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
    453   bool fallthru = IsNextInAssemblyOrder(fblock);
    454   Label* tlabel = code()->GetLabel(tblock);
    455   Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
    456   switch (condition) {
    457     case kUnorderedEqual:
    458       __ b(vs, flabel);
    459     // Fall through.
    460     case kEqual:
    461       __ b(eq, tlabel);
    462       break;
    463     case kUnorderedNotEqual:
    464       __ b(vs, tlabel);
    465     // Fall through.
    466     case kNotEqual:
    467       __ b(ne, tlabel);
    468       break;
    469     case kSignedLessThan:
    470       __ b(lt, tlabel);
    471       break;
    472     case kSignedGreaterThanOrEqual:
    473       __ b(ge, tlabel);
    474       break;
    475     case kSignedLessThanOrEqual:
    476       __ b(le, tlabel);
    477       break;
    478     case kSignedGreaterThan:
    479       __ b(gt, tlabel);
    480       break;
    481     case kUnorderedLessThan:
    482       __ b(vs, flabel);
    483     // Fall through.
    484     case kUnsignedLessThan:
    485       __ b(lo, tlabel);
    486       break;
    487     case kUnorderedGreaterThanOrEqual:
    488       __ b(vs, tlabel);
    489     // Fall through.
    490     case kUnsignedGreaterThanOrEqual:
    491       __ b(hs, tlabel);
    492       break;
    493     case kUnorderedLessThanOrEqual:
    494       __ b(vs, flabel);
    495     // Fall through.
    496     case kUnsignedLessThanOrEqual:
    497       __ b(ls, tlabel);
    498       break;
    499     case kUnorderedGreaterThan:
    500       __ b(vs, tlabel);
    501     // Fall through.
    502     case kUnsignedGreaterThan:
    503       __ b(hi, tlabel);
    504       break;
    505     case kOverflow:
    506       __ b(vs, tlabel);
    507       break;
    508     case kNotOverflow:
    509       __ b(vc, tlabel);
    510       break;
    511   }
    512   if (!fallthru) __ b(flabel);  // no fallthru to flabel.
    513   __ bind(&done);
    514 }
    515 
    516 
    517 // Assembles boolean materializations after an instruction.
    518 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
    519                                         FlagsCondition condition) {
    520   ArmOperandConverter i(this, instr);
    521   Label done;
    522 
    523   // Materialize a full 32-bit 1 or 0 value. The result register is always the
    524   // last output of the instruction.
    525   Label check;
    526   DCHECK_NE(0, instr->OutputCount());
    527   Register reg = i.OutputRegister(instr->OutputCount() - 1);
    528   Condition cc = kNoCondition;
    529   switch (condition) {
    530     case kUnorderedEqual:
    531       __ b(vc, &check);
    532       __ mov(reg, Operand(0));
    533       __ b(&done);
    534     // Fall through.
    535     case kEqual:
    536       cc = eq;
    537       break;
    538     case kUnorderedNotEqual:
    539       __ b(vc, &check);
    540       __ mov(reg, Operand(1));
    541       __ b(&done);
    542     // Fall through.
    543     case kNotEqual:
    544       cc = ne;
    545       break;
    546     case kSignedLessThan:
    547       cc = lt;
    548       break;
    549     case kSignedGreaterThanOrEqual:
    550       cc = ge;
    551       break;
    552     case kSignedLessThanOrEqual:
    553       cc = le;
    554       break;
    555     case kSignedGreaterThan:
    556       cc = gt;
    557       break;
    558     case kUnorderedLessThan:
    559       __ b(vc, &check);
    560       __ mov(reg, Operand(0));
    561       __ b(&done);
    562     // Fall through.
    563     case kUnsignedLessThan:
    564       cc = lo;
    565       break;
    566     case kUnorderedGreaterThanOrEqual:
    567       __ b(vc, &check);
    568       __ mov(reg, Operand(1));
    569       __ b(&done);
    570     // Fall through.
    571     case kUnsignedGreaterThanOrEqual:
    572       cc = hs;
    573       break;
    574     case kUnorderedLessThanOrEqual:
    575       __ b(vc, &check);
    576       __ mov(reg, Operand(0));
    577       __ b(&done);
    578     // Fall through.
    579     case kUnsignedLessThanOrEqual:
    580       cc = ls;
    581       break;
    582     case kUnorderedGreaterThan:
    583       __ b(vc, &check);
    584       __ mov(reg, Operand(1));
    585       __ b(&done);
    586     // Fall through.
    587     case kUnsignedGreaterThan:
    588       cc = hi;
    589       break;
    590     case kOverflow:
    591       cc = vs;
    592       break;
    593     case kNotOverflow:
    594       cc = vc;
    595       break;
    596   }
    597   __ bind(&check);
    598   __ mov(reg, Operand(0));
    599   __ mov(reg, Operand(1), LeaveCC, cc);
    600   __ bind(&done);
    601 }
    602 
    603 
    604 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
    605   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
    606       isolate(), deoptimization_id, Deoptimizer::LAZY);
    607   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
    608 }
    609 
    610 
    611 void CodeGenerator::AssemblePrologue() {
    612   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
    613   if (descriptor->kind() == CallDescriptor::kCallAddress) {
    614     bool saved_pp;
    615     if (FLAG_enable_ool_constant_pool) {
    616       __ Push(lr, fp, pp);
    617       // Adjust FP to point to saved FP.
    618       __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
    619       saved_pp = true;
    620     } else {
    621       __ Push(lr, fp);
    622       __ mov(fp, sp);
    623       saved_pp = false;
    624     }
    625     const RegList saves = descriptor->CalleeSavedRegisters();
    626     if (saves != 0 || saved_pp) {
    627       // Save callee-saved registers.
    628       int register_save_area_size = saved_pp ? kPointerSize : 0;
    629       for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
    630         if (!((1 << i) & saves)) continue;
    631         register_save_area_size += kPointerSize;
    632       }
    633       frame()->SetRegisterSaveAreaSize(register_save_area_size);
    634       __ stm(db_w, sp, saves);
    635     }
    636   } else if (descriptor->IsJSFunctionCall()) {
    637     CompilationInfo* info = linkage()->info();
    638     __ Prologue(info->IsCodePreAgingActive());
    639     frame()->SetRegisterSaveAreaSize(
    640         StandardFrameConstants::kFixedFrameSizeFromFp);
    641 
    642     // Sloppy mode functions and builtins need to replace the receiver with the
    643     // global proxy when called as functions (without an explicit receiver
    644     // object).
    645     // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
    646     if (info->strict_mode() == SLOPPY && !info->is_native()) {
    647       Label ok;
    648       // +2 for return address and saved frame pointer.
    649       int receiver_slot = info->scope()->num_parameters() + 2;
    650       __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
    651       __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
    652       __ b(ne, &ok);
    653       __ ldr(r2, GlobalObjectOperand());
    654       __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
    655       __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
    656       __ bind(&ok);
    657     }
    658 
    659   } else {
    660     __ StubPrologue();
    661     frame()->SetRegisterSaveAreaSize(
    662         StandardFrameConstants::kFixedFrameSizeFromFp);
    663   }
    664   int stack_slots = frame()->GetSpillSlotCount();
    665   if (stack_slots > 0) {
    666     __ sub(sp, sp, Operand(stack_slots * kPointerSize));
    667   }
    668 }
    669 
    670 
    671 void CodeGenerator::AssembleReturn() {
    672   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
    673   if (descriptor->kind() == CallDescriptor::kCallAddress) {
    674     if (frame()->GetRegisterSaveAreaSize() > 0) {
    675       // Remove this frame's spill slots first.
    676       int stack_slots = frame()->GetSpillSlotCount();
    677       if (stack_slots > 0) {
    678         __ add(sp, sp, Operand(stack_slots * kPointerSize));
    679       }
    680       // Restore registers.
    681       const RegList saves = descriptor->CalleeSavedRegisters();
    682       if (saves != 0) {
    683         __ ldm(ia_w, sp, saves);
    684       }
    685     }
    686     __ LeaveFrame(StackFrame::MANUAL);
    687     __ Ret();
    688   } else {
    689     __ LeaveFrame(StackFrame::MANUAL);
    690     int pop_count = descriptor->IsJSFunctionCall()
    691                         ? static_cast<int>(descriptor->JSParameterCount())
    692                         : 0;
    693     __ Drop(pop_count);
    694     __ Ret();
    695   }
    696 }
    697 
    698 
    699 void CodeGenerator::AssembleMove(InstructionOperand* source,
    700                                  InstructionOperand* destination) {
    701   ArmOperandConverter g(this, NULL);
    702   // Dispatch on the source and destination operand kinds.  Not all
    703   // combinations are possible.
    704   if (source->IsRegister()) {
    705     DCHECK(destination->IsRegister() || destination->IsStackSlot());
    706     Register src = g.ToRegister(source);
    707     if (destination->IsRegister()) {
    708       __ mov(g.ToRegister(destination), src);
    709     } else {
    710       __ str(src, g.ToMemOperand(destination));
    711     }
    712   } else if (source->IsStackSlot()) {
    713     DCHECK(destination->IsRegister() || destination->IsStackSlot());
    714     MemOperand src = g.ToMemOperand(source);
    715     if (destination->IsRegister()) {
    716       __ ldr(g.ToRegister(destination), src);
    717     } else {
    718       Register temp = kScratchReg;
    719       __ ldr(temp, src);
    720       __ str(temp, g.ToMemOperand(destination));
    721     }
    722   } else if (source->IsConstant()) {
    723     if (destination->IsRegister() || destination->IsStackSlot()) {
    724       Register dst =
    725           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
    726       Constant src = g.ToConstant(source);
    727       switch (src.type()) {
    728         case Constant::kInt32:
    729           __ mov(dst, Operand(src.ToInt32()));
    730           break;
    731         case Constant::kInt64:
    732           UNREACHABLE();
    733           break;
    734         case Constant::kFloat64:
    735           __ Move(dst,
    736                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
    737           break;
    738         case Constant::kExternalReference:
    739           __ mov(dst, Operand(src.ToExternalReference()));
    740           break;
    741         case Constant::kHeapObject:
    742           __ Move(dst, src.ToHeapObject());
    743           break;
    744       }
    745       if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
    746     } else if (destination->IsDoubleRegister()) {
    747       DwVfpRegister result = g.ToDoubleRegister(destination);
    748       __ vmov(result, g.ToDouble(source));
    749     } else {
    750       DCHECK(destination->IsDoubleStackSlot());
    751       DwVfpRegister temp = kScratchDoubleReg;
    752       __ vmov(temp, g.ToDouble(source));
    753       __ vstr(temp, g.ToMemOperand(destination));
    754     }
    755   } else if (source->IsDoubleRegister()) {
    756     DwVfpRegister src = g.ToDoubleRegister(source);
    757     if (destination->IsDoubleRegister()) {
    758       DwVfpRegister dst = g.ToDoubleRegister(destination);
    759       __ Move(dst, src);
    760     } else {
    761       DCHECK(destination->IsDoubleStackSlot());
    762       __ vstr(src, g.ToMemOperand(destination));
    763     }
    764   } else if (source->IsDoubleStackSlot()) {
    765     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
    766     MemOperand src = g.ToMemOperand(source);
    767     if (destination->IsDoubleRegister()) {
    768       __ vldr(g.ToDoubleRegister(destination), src);
    769     } else {
    770       DwVfpRegister temp = kScratchDoubleReg;
    771       __ vldr(temp, src);
    772       __ vstr(temp, g.ToMemOperand(destination));
    773     }
    774   } else {
    775     UNREACHABLE();
    776   }
    777 }
    778 
    779 
    780 void CodeGenerator::AssembleSwap(InstructionOperand* source,
    781                                  InstructionOperand* destination) {
    782   ArmOperandConverter g(this, NULL);
    783   // Dispatch on the source and destination operand kinds.  Not all
    784   // combinations are possible.
    785   if (source->IsRegister()) {
    786     // Register-register.
    787     Register temp = kScratchReg;
    788     Register src = g.ToRegister(source);
    789     if (destination->IsRegister()) {
    790       Register dst = g.ToRegister(destination);
    791       __ Move(temp, src);
    792       __ Move(src, dst);
    793       __ Move(dst, temp);
    794     } else {
    795       DCHECK(destination->IsStackSlot());
    796       MemOperand dst = g.ToMemOperand(destination);
    797       __ mov(temp, src);
    798       __ ldr(src, dst);
    799       __ str(temp, dst);
    800     }
    801   } else if (source->IsStackSlot()) {
    802     DCHECK(destination->IsStackSlot());
    803     Register temp_0 = kScratchReg;
    804     SwVfpRegister temp_1 = kScratchDoubleReg.low();
    805     MemOperand src = g.ToMemOperand(source);
    806     MemOperand dst = g.ToMemOperand(destination);
    807     __ ldr(temp_0, src);
    808     __ vldr(temp_1, dst);
    809     __ str(temp_0, dst);
    810     __ vstr(temp_1, src);
    811   } else if (source->IsDoubleRegister()) {
    812     DwVfpRegister temp = kScratchDoubleReg;
    813     DwVfpRegister src = g.ToDoubleRegister(source);
    814     if (destination->IsDoubleRegister()) {
    815       DwVfpRegister dst = g.ToDoubleRegister(destination);
    816       __ Move(temp, src);
    817       __ Move(src, dst);
    818       __ Move(dst, temp);
    819     } else {
    820       DCHECK(destination->IsDoubleStackSlot());
    821       MemOperand dst = g.ToMemOperand(destination);
    822       __ Move(temp, src);
    823       __ vldr(src, dst);
    824       __ vstr(temp, dst);
    825     }
    826   } else if (source->IsDoubleStackSlot()) {
    827     DCHECK(destination->IsDoubleStackSlot());
    828     Register temp_0 = kScratchReg;
    829     DwVfpRegister temp_1 = kScratchDoubleReg;
    830     MemOperand src0 = g.ToMemOperand(source);
    831     MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
    832     MemOperand dst0 = g.ToMemOperand(destination);
    833     MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
    834     __ vldr(temp_1, dst0);  // Save destination in temp_1.
    835     __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
    836     __ str(temp_0, dst0);
    837     __ ldr(temp_0, src1);
    838     __ str(temp_0, dst1);
    839     __ vstr(temp_1, src0);
    840   } else {
    841     // No other combinations are possible.
    842     UNREACHABLE();
    843   }
    844 }
    845 
    846 
    847 void CodeGenerator::AddNopForSmiCodeInlining() {
    848   // On 32-bit ARM we do not insert nops for inlined Smi code.
    849 }
    850 
    851 
    852 void CodeGenerator::EnsureSpaceForLazyDeopt() {
    853   int space_needed = Deoptimizer::patch_size();
    854   if (!linkage()->info()->IsStub()) {
    855     // Ensure that we have enough space after the previous lazy-bailout
    856     // instruction for patching the code here.
    857     int current_pc = masm()->pc_offset();
    858     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
    859       // Block literal pool emission for duration of padding.
    860       v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
    861       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
    862       DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
    863       while (padding_size > 0) {
    864         __ nop();
    865         padding_size -= v8::internal::Assembler::kInstrSize;
    866       }
    867     }
    868   }
    869   MarkLazyDeoptSite();
    870 }
    871 
    872 #undef __
    873 
    874 }  // namespace compiler
    875 }  // namespace internal
    876 }  // namespace v8
    877