Home | History | Annotate | Download | only in assembler
      1 /*
      2  * Copyright (C) 2008 Apple Inc.
      3  * Copyright (C) 2009 University of Szeged
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     16  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     19  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     23  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #ifndef MacroAssemblerARM_h
     29 #define MacroAssemblerARM_h
     30 
     31 #include <wtf/Platform.h>
     32 
     33 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
     34 
     35 #include "ARMAssembler.h"
     36 #include "AbstractMacroAssembler.h"
     37 
     38 namespace JSC {
     39 
     40 class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
     41     static const int DoubleConditionMask = 0x0f;
     42     static const int DoubleConditionBitSpecial = 0x10;
     43     COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
     44 public:
     45     enum Condition {
     46         Equal = ARMAssembler::EQ,
     47         NotEqual = ARMAssembler::NE,
     48         Above = ARMAssembler::HI,
     49         AboveOrEqual = ARMAssembler::CS,
     50         Below = ARMAssembler::CC,
     51         BelowOrEqual = ARMAssembler::LS,
     52         GreaterThan = ARMAssembler::GT,
     53         GreaterThanOrEqual = ARMAssembler::GE,
     54         LessThan = ARMAssembler::LT,
     55         LessThanOrEqual = ARMAssembler::LE,
     56         Overflow = ARMAssembler::VS,
     57         Signed = ARMAssembler::MI,
     58         Zero = ARMAssembler::EQ,
     59         NonZero = ARMAssembler::NE
     60     };
     61 
     62     enum DoubleCondition {
     63         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
     64         DoubleEqual = ARMAssembler::EQ,
     65         DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
     66         DoubleGreaterThan = ARMAssembler::GT,
     67         DoubleGreaterThanOrEqual = ARMAssembler::GE,
     68         DoubleLessThan = ARMAssembler::CC,
     69         DoubleLessThanOrEqual = ARMAssembler::LS,
     70         // If either operand is NaN, these conditions always evaluate to true.
     71         DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
     72         DoubleNotEqualOrUnordered = ARMAssembler::NE,
     73         DoubleGreaterThanOrUnordered = ARMAssembler::HI,
     74         DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
     75         DoubleLessThanOrUnordered = ARMAssembler::LT,
     76         DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
     77     };
     78 
     79     static const RegisterID stackPointerRegister = ARMRegisters::sp;
     80     static const RegisterID linkRegister = ARMRegisters::lr;
     81 
     82     static const Scale ScalePtr = TimesFour;
     83 
     84     void add32(RegisterID src, RegisterID dest)
     85     {
     86         m_assembler.adds_r(dest, dest, src);
     87     }
     88 
     89     void add32(Imm32 imm, Address address)
     90     {
     91         load32(address, ARMRegisters::S1);
     92         add32(imm, ARMRegisters::S1);
     93         store32(ARMRegisters::S1, address);
     94     }
     95 
     96     void add32(Imm32 imm, RegisterID dest)
     97     {
     98         m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
     99     }
    100 
    101     void add32(Address src, RegisterID dest)
    102     {
    103         load32(src, ARMRegisters::S1);
    104         add32(ARMRegisters::S1, dest);
    105     }
    106 
    107     void and32(RegisterID src, RegisterID dest)
    108     {
    109         m_assembler.ands_r(dest, dest, src);
    110     }
    111 
    112     void and32(Imm32 imm, RegisterID dest)
    113     {
    114         ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
    115         if (w & ARMAssembler::OP2_INV_IMM)
    116             m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
    117         else
    118             m_assembler.ands_r(dest, dest, w);
    119     }
    120 
    121     void lshift32(RegisterID shift_amount, RegisterID dest)
    122     {
    123         ARMWord w = ARMAssembler::getOp2(0x1f);
    124         ASSERT(w != ARMAssembler::INVALID_IMM);
    125         m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
    126 
    127         m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
    128     }
    129 
    130     void lshift32(Imm32 imm, RegisterID dest)
    131     {
    132         m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
    133     }
    134 
    135     void mul32(RegisterID src, RegisterID dest)
    136     {
    137         if (src == dest) {
    138             move(src, ARMRegisters::S0);
    139             src = ARMRegisters::S0;
    140         }
    141         m_assembler.muls_r(dest, dest, src);
    142     }
    143 
    144     void mul32(Imm32 imm, RegisterID src, RegisterID dest)
    145     {
    146         move(imm, ARMRegisters::S0);
    147         m_assembler.muls_r(dest, src, ARMRegisters::S0);
    148     }
    149 
    150     void neg32(RegisterID srcDest)
    151     {
    152         m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
    153     }
    154 
    155     void not32(RegisterID dest)
    156     {
    157         m_assembler.mvns_r(dest, dest);
    158     }
    159 
    160     void or32(RegisterID src, RegisterID dest)
    161     {
    162         m_assembler.orrs_r(dest, dest, src);
    163     }
    164 
    165     void or32(Imm32 imm, RegisterID dest)
    166     {
    167         m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
    168     }
    169 
    170     void rshift32(RegisterID shift_amount, RegisterID dest)
    171     {
    172         ARMWord w = ARMAssembler::getOp2(0x1f);
    173         ASSERT(w != ARMAssembler::INVALID_IMM);
    174         m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
    175 
    176         m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
    177     }
    178 
    179     void rshift32(Imm32 imm, RegisterID dest)
    180     {
    181         m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
    182     }
    183 
    184     void sub32(RegisterID src, RegisterID dest)
    185     {
    186         m_assembler.subs_r(dest, dest, src);
    187     }
    188 
    189     void sub32(Imm32 imm, RegisterID dest)
    190     {
    191         m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
    192     }
    193 
    194     void sub32(Imm32 imm, Address address)
    195     {
    196         load32(address, ARMRegisters::S1);
    197         sub32(imm, ARMRegisters::S1);
    198         store32(ARMRegisters::S1, address);
    199     }
    200 
    201     void sub32(Address src, RegisterID dest)
    202     {
    203         load32(src, ARMRegisters::S1);
    204         sub32(ARMRegisters::S1, dest);
    205     }
    206 
    207     void xor32(RegisterID src, RegisterID dest)
    208     {
    209         m_assembler.eors_r(dest, dest, src);
    210     }
    211 
    212     void xor32(Imm32 imm, RegisterID dest)
    213     {
    214         m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
    215     }
    216 
    217     void load32(ImplicitAddress address, RegisterID dest)
    218     {
    219         m_assembler.dataTransfer32(true, dest, address.base, address.offset);
    220     }
    221 
    222     void load32(BaseIndex address, RegisterID dest)
    223     {
    224         m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
    225     }
    226 
    227 #if CPU(ARMV5_OR_LOWER)
    228     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
    229 #else
    230     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
    231     {
    232         load32(address, dest);
    233     }
    234 #endif
    235 
    236     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
    237     {
    238         DataLabel32 dataLabel(this);
    239         m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
    240         m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0);
    241         return dataLabel;
    242     }
    243 
    244     Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
    245     {
    246         Label label(this);
    247         load32(address, dest);
    248         return label;
    249     }
    250 
    251     void load16(BaseIndex address, RegisterID dest)
    252     {
    253         m_assembler.add_r(ARMRegisters::S0, address.base, m_assembler.lsl(address.index, address.scale));
    254         if (address.offset>=0)
    255             m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset));
    256         else
    257             m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
    258     }
    259 
    260     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
    261     {
    262         DataLabel32 dataLabel(this);
    263         m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
    264         m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0);
    265         return dataLabel;
    266     }
    267 
    268     void store32(RegisterID src, ImplicitAddress address)
    269     {
    270         m_assembler.dataTransfer32(false, src, address.base, address.offset);
    271     }
    272 
    273     void store32(RegisterID src, BaseIndex address)
    274     {
    275         m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
    276     }
    277 
    278     void store32(Imm32 imm, ImplicitAddress address)
    279     {
    280         if (imm.m_isPointer)
    281             m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
    282         else
    283             move(imm, ARMRegisters::S1);
    284         store32(ARMRegisters::S1, address);
    285     }
    286 
    287     void store32(RegisterID src, void* address)
    288     {
    289         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
    290         m_assembler.dtr_u(false, src, ARMRegisters::S0, 0);
    291     }
    292 
    293     void store32(Imm32 imm, void* address)
    294     {
    295         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
    296         if (imm.m_isPointer)
    297             m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
    298         else
    299             m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
    300         m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
    301     }
    302 
    303     void pop(RegisterID dest)
    304     {
    305         m_assembler.pop_r(dest);
    306     }
    307 
    308     void push(RegisterID src)
    309     {
    310         m_assembler.push_r(src);
    311     }
    312 
    313     void push(Address address)
    314     {
    315         load32(address, ARMRegisters::S1);
    316         push(ARMRegisters::S1);
    317     }
    318 
    319     void push(Imm32 imm)
    320     {
    321         move(imm, ARMRegisters::S0);
    322         push(ARMRegisters::S0);
    323     }
    324 
    325     void move(Imm32 imm, RegisterID dest)
    326     {
    327         if (imm.m_isPointer)
    328             m_assembler.ldr_un_imm(dest, imm.m_value);
    329         else
    330             m_assembler.moveImm(imm.m_value, dest);
    331     }
    332 
    333     void move(RegisterID src, RegisterID dest)
    334     {
    335         m_assembler.mov_r(dest, src);
    336     }
    337 
    338     void move(ImmPtr imm, RegisterID dest)
    339     {
    340         move(Imm32(imm), dest);
    341     }
    342 
    343     void swap(RegisterID reg1, RegisterID reg2)
    344     {
    345         m_assembler.mov_r(ARMRegisters::S0, reg1);
    346         m_assembler.mov_r(reg1, reg2);
    347         m_assembler.mov_r(reg2, ARMRegisters::S0);
    348     }
    349 
    350     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    351     {
    352         if (src != dest)
    353             move(src, dest);
    354     }
    355 
    356     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    357     {
    358         if (src != dest)
    359             move(src, dest);
    360     }
    361 
    362     Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
    363     {
    364         m_assembler.cmp_r(left, right);
    365         return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
    366     }
    367 
    368     Jump branch32(Condition cond, RegisterID left, Imm32 right, int useConstantPool = 0)
    369     {
    370         if (right.m_isPointer) {
    371             m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value);
    372             m_assembler.cmp_r(left, ARMRegisters::S0);
    373         } else
    374             m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
    375         return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
    376     }
    377 
    378     Jump branch32(Condition cond, RegisterID left, Address right)
    379     {
    380         load32(right, ARMRegisters::S1);
    381         return branch32(cond, left, ARMRegisters::S1);
    382     }
    383 
    384     Jump branch32(Condition cond, Address left, RegisterID right)
    385     {
    386         load32(left, ARMRegisters::S1);
    387         return branch32(cond, ARMRegisters::S1, right);
    388     }
    389 
    390     Jump branch32(Condition cond, Address left, Imm32 right)
    391     {
    392         load32(left, ARMRegisters::S1);
    393         return branch32(cond, ARMRegisters::S1, right);
    394     }
    395 
    396     Jump branch32(Condition cond, BaseIndex left, Imm32 right)
    397     {
    398         load32(left, ARMRegisters::S1);
    399         return branch32(cond, ARMRegisters::S1, right);
    400     }
    401 
    402     Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
    403     {
    404         load32WithUnalignedHalfWords(left, ARMRegisters::S1);
    405         return branch32(cond, ARMRegisters::S1, right);
    406     }
    407 
    408     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
    409     {
    410         UNUSED_PARAM(cond);
    411         UNUSED_PARAM(left);
    412         UNUSED_PARAM(right);
    413         ASSERT_NOT_REACHED();
    414         return jump();
    415     }
    416 
    417     Jump branch16(Condition cond, BaseIndex left, Imm32 right)
    418     {
    419         load16(left, ARMRegisters::S0);
    420         move(right, ARMRegisters::S1);
    421         m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1);
    422         return m_assembler.jmp(ARMCondition(cond));
    423     }
    424 
    425     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
    426     {
    427         ASSERT((cond == Zero) || (cond == NonZero));
    428         m_assembler.tst_r(reg, mask);
    429         return Jump(m_assembler.jmp(ARMCondition(cond)));
    430     }
    431 
    432     Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    433     {
    434         ASSERT((cond == Zero) || (cond == NonZero));
    435         ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
    436         if (w & ARMAssembler::OP2_INV_IMM)
    437             m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
    438         else
    439             m_assembler.tst_r(reg, w);
    440         return Jump(m_assembler.jmp(ARMCondition(cond)));
    441     }
    442 
    443     Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
    444     {
    445         load32(address, ARMRegisters::S1);
    446         return branchTest32(cond, ARMRegisters::S1, mask);
    447     }
    448 
    449     Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    450     {
    451         load32(address, ARMRegisters::S1);
    452         return branchTest32(cond, ARMRegisters::S1, mask);
    453     }
    454 
    455     Jump jump()
    456     {
    457         return Jump(m_assembler.jmp());
    458     }
    459 
    460     void jump(RegisterID target)
    461     {
    462         move(target, ARMRegisters::pc);
    463     }
    464 
    465     void jump(Address address)
    466     {
    467         load32(address, ARMRegisters::pc);
    468     }
    469 
    470     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
    471     {
    472         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    473         add32(src, dest);
    474         return Jump(m_assembler.jmp(ARMCondition(cond)));
    475     }
    476 
    477     Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
    478     {
    479         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    480         add32(imm, dest);
    481         return Jump(m_assembler.jmp(ARMCondition(cond)));
    482     }
    483 
    484     void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
    485     {
    486         if (src1 == dest) {
    487             move(src1, ARMRegisters::S0);
    488             src1 = ARMRegisters::S0;
    489         }
    490         m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1);
    491         m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31));
    492     }
    493 
    494     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
    495     {
    496         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    497         if (cond == Overflow) {
    498             mull32(src, dest, dest);
    499             cond = NonZero;
    500         }
    501         else
    502             mul32(src, dest);
    503         return Jump(m_assembler.jmp(ARMCondition(cond)));
    504     }
    505 
    506     Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
    507     {
    508         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    509         if (cond == Overflow) {
    510             move(imm, ARMRegisters::S0);
    511             mull32(ARMRegisters::S0, src, dest);
    512             cond = NonZero;
    513         }
    514         else
    515             mul32(imm, src, dest);
    516         return Jump(m_assembler.jmp(ARMCondition(cond)));
    517     }
    518 
    519     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
    520     {
    521         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    522         sub32(src, dest);
    523         return Jump(m_assembler.jmp(ARMCondition(cond)));
    524     }
    525 
    526     Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
    527     {
    528         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    529         sub32(imm, dest);
    530         return Jump(m_assembler.jmp(ARMCondition(cond)));
    531     }
    532 
    533     Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
    534     {
    535         ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
    536         or32(src, dest);
    537         return Jump(m_assembler.jmp(ARMCondition(cond)));
    538     }
    539 
    540     void breakpoint()
    541     {
    542         m_assembler.bkpt(0);
    543     }
    544 
    545     Call nearCall()
    546     {
    547         prepareCall();
    548         return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
    549     }
    550 
    551     Call call(RegisterID target)
    552     {
    553         prepareCall();
    554         move(ARMRegisters::pc, target);
    555         JmpSrc jmpSrc;
    556         return Call(jmpSrc, Call::None);
    557     }
    558 
    559     void call(Address address)
    560     {
    561         call32(address.base, address.offset);
    562     }
    563 
    564     void ret()
    565     {
    566         m_assembler.mov_r(ARMRegisters::pc, linkRegister);
    567     }
    568 
    569     void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    570     {
    571         m_assembler.cmp_r(left, right);
    572         m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
    573         m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
    574     }
    575 
    576     void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
    577     {
    578         m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
    579         m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
    580         m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
    581     }
    582 
    583     void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    584     {
    585         // ARM doesn't have byte registers
    586         set32(cond, left, right, dest);
    587     }
    588 
    589     void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
    590     {
    591         // ARM doesn't have byte registers
    592         load32(left, ARMRegisters::S1);
    593         set32(cond, ARMRegisters::S1, right, dest);
    594     }
    595 
    596     void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
    597     {
    598         // ARM doesn't have byte registers
    599         set32(cond, left, right, dest);
    600     }
    601 
    602     void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
    603     {
    604         load32(address, ARMRegisters::S1);
    605         if (mask.m_value == -1)
    606             m_assembler.cmp_r(0, ARMRegisters::S1);
    607         else
    608             m_assembler.tst_r(ARMRegisters::S1, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
    609         m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
    610         m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
    611     }
    612 
    613     void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
    614     {
    615         // ARM doesn't have byte registers
    616         setTest32(cond, address, mask, dest);
    617     }
    618 
    619     void add32(Imm32 imm, RegisterID src, RegisterID dest)
    620     {
    621         m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
    622     }
    623 
    624     void add32(Imm32 imm, AbsoluteAddress address)
    625     {
    626         m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
    627         m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
    628         add32(imm, ARMRegisters::S1);
    629         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
    630         m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
    631     }
    632 
    633     void sub32(Imm32 imm, AbsoluteAddress address)
    634     {
    635         m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
    636         m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
    637         sub32(imm, ARMRegisters::S1);
    638         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
    639         m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
    640     }
    641 
    642     void load32(void* address, RegisterID dest)
    643     {
    644         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
    645         m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0);
    646     }
    647 
    648     Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
    649     {
    650         load32(left.m_ptr, ARMRegisters::S1);
    651         return branch32(cond, ARMRegisters::S1, right);
    652     }
    653 
    654     Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
    655     {
    656         load32(left.m_ptr, ARMRegisters::S1);
    657         return branch32(cond, ARMRegisters::S1, right);
    658     }
    659 
    660     Call call()
    661     {
    662         prepareCall();
    663         return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
    664     }
    665 
    666     Call tailRecursiveCall()
    667     {
    668         return Call::fromTailJump(jump());
    669     }
    670 
    671     Call makeTailRecursiveCall(Jump oldJump)
    672     {
    673         return Call::fromTailJump(oldJump);
    674     }
    675 
    676     DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
    677     {
    678         DataLabelPtr dataLabel(this);
    679         m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
    680         return dataLabel;
    681     }
    682 
    683     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    684     {
    685         dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
    686         Jump jump = branch32(cond, left, ARMRegisters::S1, true);
    687         return jump;
    688     }
    689 
    690     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    691     {
    692         load32(left, ARMRegisters::S1);
    693         dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
    694         Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
    695         return jump;
    696     }
    697 
    698     DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
    699     {
    700         DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
    701         store32(ARMRegisters::S1, address);
    702         return dataLabel;
    703     }
    704 
    705     DataLabelPtr storePtrWithPatch(ImplicitAddress address)
    706     {
    707         return storePtrWithPatch(ImmPtr(0), address);
    708     }
    709 
    710     // Floating point operators
    711     bool supportsFloatingPoint() const
    712     {
    713         return s_isVFPPresent;
    714     }
    715 
    716     bool supportsFloatingPointTruncate() const
    717     {
    718         return false;
    719     }
    720 
    721     void loadDouble(ImplicitAddress address, FPRegisterID dest)
    722     {
    723         m_assembler.doubleTransfer(true, dest, address.base, address.offset);
    724     }
    725 
    726     void loadDouble(void* address, FPRegisterID dest)
    727     {
    728         m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
    729         m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
    730     }
    731 
    732     void storeDouble(FPRegisterID src, ImplicitAddress address)
    733     {
    734         m_assembler.doubleTransfer(false, src, address.base, address.offset);
    735     }
    736 
    737     void addDouble(FPRegisterID src, FPRegisterID dest)
    738     {
    739         m_assembler.faddd_r(dest, dest, src);
    740     }
    741 
    742     void addDouble(Address src, FPRegisterID dest)
    743     {
    744         loadDouble(src, ARMRegisters::SD0);
    745         addDouble(ARMRegisters::SD0, dest);
    746     }
    747 
    748     void divDouble(FPRegisterID src, FPRegisterID dest)
    749     {
    750         m_assembler.fdivd_r(dest, dest, src);
    751     }
    752 
    753     void divDouble(Address src, FPRegisterID dest)
    754     {
    755         ASSERT_NOT_REACHED(); // Untested
    756         loadDouble(src, ARMRegisters::SD0);
    757         divDouble(ARMRegisters::SD0, dest);
    758     }
    759 
    760     void subDouble(FPRegisterID src, FPRegisterID dest)
    761     {
    762         m_assembler.fsubd_r(dest, dest, src);
    763     }
    764 
    765     void subDouble(Address src, FPRegisterID dest)
    766     {
    767         loadDouble(src, ARMRegisters::SD0);
    768         subDouble(ARMRegisters::SD0, dest);
    769     }
    770 
    771     void mulDouble(FPRegisterID src, FPRegisterID dest)
    772     {
    773         m_assembler.fmuld_r(dest, dest, src);
    774     }
    775 
    776     void mulDouble(Address src, FPRegisterID dest)
    777     {
    778         loadDouble(src, ARMRegisters::SD0);
    779         mulDouble(ARMRegisters::SD0, dest);
    780     }
    781 
    782     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
    783     {
    784         m_assembler.fmsr_r(dest, src);
    785         m_assembler.fsitod_r(dest, dest);
    786     }
    787 
    788     void convertInt32ToDouble(Address src, FPRegisterID dest)
    789     {
    790         ASSERT_NOT_REACHED(); // Untested
    791         // flds does not worth the effort here
    792         load32(src, ARMRegisters::S1);
    793         convertInt32ToDouble(ARMRegisters::S1, dest);
    794     }
    795 
    796     void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
    797     {
    798         ASSERT_NOT_REACHED(); // Untested
    799         // flds does not worth the effort here
    800         m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr);
    801         m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
    802         convertInt32ToDouble(ARMRegisters::S1, dest);
    803     }
    804 
    805     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
    806     {
    807         m_assembler.fcmpd_r(left, right);
    808         m_assembler.fmstat();
    809         if (cond & DoubleConditionBitSpecial)
    810             m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
    811         return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
    812     }
    813 
    814     // Truncates 'src' to an integer, and places the resulting 'dest'.
    815     // If the result is not representable as a 32 bit value, branch.
    816     // May also branch for some values that are representable in 32 bits
    817     // (specifically, in this case, INT_MIN).
    818     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
    819     {
    820         UNUSED_PARAM(src);
    821         UNUSED_PARAM(dest);
    822         ASSERT_NOT_REACHED();
    823         return jump();
    824     }
    825 
    826     // Convert 'src' to an integer, and places the resulting 'dest'.
    827     // If the result is not representable as a 32 bit value, branch.
    828     // May also branch for some values that are representable in 32 bits
    829     // (specifically, in this case, 0).
    830     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
    831     {
    832         m_assembler.ftosid_r(ARMRegisters::SD0, src);
    833         m_assembler.fmrs_r(dest, ARMRegisters::SD0);
    834 
    835         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
    836         m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0);
    837         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
    838 
    839         // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
    840         failureCases.append(branchTest32(Zero, dest));
    841     }
    842 
    843     void zeroDouble(FPRegisterID srcDest)
    844     {
    845         m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
    846         convertInt32ToDouble(ARMRegisters::S0, srcDest);
    847     }
    848 
    849 protected:
    850     ARMAssembler::Condition ARMCondition(Condition cond)
    851     {
    852         return static_cast<ARMAssembler::Condition>(cond);
    853     }
    854 
    855     void ensureSpace(int insnSpace, int constSpace)
    856     {
    857         m_assembler.ensureSpace(insnSpace, constSpace);
    858     }
    859 
    860     int sizeOfConstantPool()
    861     {
    862         return m_assembler.sizeOfConstantPool();
    863     }
    864 
    865     void prepareCall()
    866     {
    867         ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
    868 
    869         m_assembler.mov_r(linkRegister, ARMRegisters::pc);
    870     }
    871 
    872     void call32(RegisterID base, int32_t offset)
    873     {
    874         if (base == ARMRegisters::sp)
    875             offset += 4;
    876 
    877         if (offset >= 0) {
    878             if (offset <= 0xfff) {
    879                 prepareCall();
    880                 m_assembler.dtr_u(true, ARMRegisters::pc, base, offset);
    881             } else if (offset <= 0xfffff) {
    882                 m_assembler.add_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
    883                 prepareCall();
    884                 m_assembler.dtr_u(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
    885             } else {
    886                 ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
    887                 prepareCall();
    888                 m_assembler.dtr_ur(true, ARMRegisters::pc, base, reg);
    889             }
    890         } else  {
    891             offset = -offset;
    892             if (offset <= 0xfff) {
    893                 prepareCall();
    894                 m_assembler.dtr_d(true, ARMRegisters::pc, base, offset);
    895             } else if (offset <= 0xfffff) {
    896                 m_assembler.sub_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
    897                 prepareCall();
    898                 m_assembler.dtr_d(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
    899             } else {
    900                 ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
    901                 prepareCall();
    902                 m_assembler.dtr_dr(true, ARMRegisters::pc, base, reg);
    903             }
    904         }
    905     }
    906 
    907 private:
    908     friend class LinkBuffer;
    909     friend class RepatchBuffer;
    910 
    911     static void linkCall(void* code, Call call, FunctionPtr function)
    912     {
    913         ARMAssembler::linkCall(code, call.m_jmp, function.value());
    914     }
    915 
    916     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
    917     {
    918         ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
    919     }
    920 
    921     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
    922     {
    923         ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
    924     }
    925 
    926     static const bool s_isVFPPresent;
    927 };
    928 
    929 }
    930 
    931 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
    932 
    933 #endif // MacroAssemblerARM_h
    934