Home | History | Annotate | Download | only in assembler
      1 /*
      2  * Copyright (C) 2008 Apple Inc.
      3  * Copyright (C) 2009, 2010 University of Szeged
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     16  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     19  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     23  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #ifndef MacroAssemblerARM_h
     29 #define MacroAssemblerARM_h
     30 
     31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
     32 
     33 #include "ARMAssembler.h"
     34 #include "AbstractMacroAssembler.h"
     35 
     36 namespace JSC {
     37 
     38 class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
     39     static const int DoubleConditionMask = 0x0f;
     40     static const int DoubleConditionBitSpecial = 0x10;
     41     COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
     42 public:
     43     typedef ARMRegisters::FPRegisterID FPRegisterID;
     44 
     45     enum Condition {
     46         Equal = ARMAssembler::EQ,
     47         NotEqual = ARMAssembler::NE,
     48         Above = ARMAssembler::HI,
     49         AboveOrEqual = ARMAssembler::CS,
     50         Below = ARMAssembler::CC,
     51         BelowOrEqual = ARMAssembler::LS,
     52         GreaterThan = ARMAssembler::GT,
     53         GreaterThanOrEqual = ARMAssembler::GE,
     54         LessThan = ARMAssembler::LT,
     55         LessThanOrEqual = ARMAssembler::LE,
     56         Overflow = ARMAssembler::VS,
     57         Signed = ARMAssembler::MI,
     58         Zero = ARMAssembler::EQ,
     59         NonZero = ARMAssembler::NE
     60     };
     61 
     62     enum DoubleCondition {
     63         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
     64         DoubleEqual = ARMAssembler::EQ,
     65         DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
     66         DoubleGreaterThan = ARMAssembler::GT,
     67         DoubleGreaterThanOrEqual = ARMAssembler::GE,
     68         DoubleLessThan = ARMAssembler::CC,
     69         DoubleLessThanOrEqual = ARMAssembler::LS,
     70         // If either operand is NaN, these conditions always evaluate to true.
     71         DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
     72         DoubleNotEqualOrUnordered = ARMAssembler::NE,
     73         DoubleGreaterThanOrUnordered = ARMAssembler::HI,
     74         DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
     75         DoubleLessThanOrUnordered = ARMAssembler::LT,
     76         DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
     77     };
     78 
     79     static const RegisterID stackPointerRegister = ARMRegisters::sp;
     80     static const RegisterID linkRegister = ARMRegisters::lr;
     81 
     82     static const Scale ScalePtr = TimesFour;
     83 
     84     void add32(RegisterID src, RegisterID dest)
     85     {
     86         m_assembler.adds_r(dest, dest, src);
     87     }
     88 
     89     void add32(TrustedImm32 imm, Address address)
     90     {
     91         load32(address, ARMRegisters::S1);
     92         add32(imm, ARMRegisters::S1);
     93         store32(ARMRegisters::S1, address);
     94     }
     95 
     96     void add32(TrustedImm32 imm, RegisterID dest)
     97     {
     98         m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
     99     }
    100 
    101     void add32(Address src, RegisterID dest)
    102     {
    103         load32(src, ARMRegisters::S1);
    104         add32(ARMRegisters::S1, dest);
    105     }
    106 
    107     void and32(RegisterID src, RegisterID dest)
    108     {
    109         m_assembler.ands_r(dest, dest, src);
    110     }
    111 
    112     void and32(TrustedImm32 imm, RegisterID dest)
    113     {
    114         ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
    115         if (w & ARMAssembler::OP2_INV_IMM)
    116             m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
    117         else
    118             m_assembler.ands_r(dest, dest, w);
    119     }
    120 
    121     void lshift32(RegisterID shift_amount, RegisterID dest)
    122     {
    123         ARMWord w = ARMAssembler::getOp2(0x1f);
    124         ASSERT(w != ARMAssembler::INVALID_IMM);
    125         m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
    126 
    127         m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
    128     }
    129 
    130     void lshift32(TrustedImm32 imm, RegisterID dest)
    131     {
    132         m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
    133     }
    134 
    135     void mul32(RegisterID src, RegisterID dest)
    136     {
    137         if (src == dest) {
    138             move(src, ARMRegisters::S0);
    139             src = ARMRegisters::S0;
    140         }
    141         m_assembler.muls_r(dest, dest, src);
    142     }
    143 
    144     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
    145     {
    146         move(imm, ARMRegisters::S0);
    147         m_assembler.muls_r(dest, src, ARMRegisters::S0);
    148     }
    149 
    150     void neg32(RegisterID srcDest)
    151     {
    152         m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
    153     }
    154 
    155     void not32(RegisterID dest)
    156     {
    157         m_assembler.mvns_r(dest, dest);
    158     }
    159 
    160     void or32(RegisterID src, RegisterID dest)
    161     {
    162         m_assembler.orrs_r(dest, dest, src);
    163     }
    164 
    165     void or32(TrustedImm32 imm, RegisterID dest)
    166     {
    167         m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
    168     }
    169 
    170     void rshift32(RegisterID shift_amount, RegisterID dest)
    171     {
    172         ARMWord w = ARMAssembler::getOp2(0x1f);
    173         ASSERT(w != ARMAssembler::INVALID_IMM);
    174         m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
    175 
    176         m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
    177     }
    178 
    179     void rshift32(TrustedImm32 imm, RegisterID dest)
    180     {
    181         m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
    182     }
    183 
    184     void urshift32(RegisterID shift_amount, RegisterID dest)
    185     {
    186         ARMWord w = ARMAssembler::getOp2(0x1f);
    187         ASSERT(w != ARMAssembler::INVALID_IMM);
    188         m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
    189 
    190         m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0));
    191     }
    192 
    193     void urshift32(TrustedImm32 imm, RegisterID dest)
    194     {
    195         m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
    196     }
    197 
    198     void sub32(RegisterID src, RegisterID dest)
    199     {
    200         m_assembler.subs_r(dest, dest, src);
    201     }
    202 
    203     void sub32(TrustedImm32 imm, RegisterID dest)
    204     {
    205         m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
    206     }
    207 
    208     void sub32(TrustedImm32 imm, Address address)
    209     {
    210         load32(address, ARMRegisters::S1);
    211         sub32(imm, ARMRegisters::S1);
    212         store32(ARMRegisters::S1, address);
    213     }
    214 
    215     void sub32(Address src, RegisterID dest)
    216     {
    217         load32(src, ARMRegisters::S1);
    218         sub32(ARMRegisters::S1, dest);
    219     }
    220 
    221     void xor32(RegisterID src, RegisterID dest)
    222     {
    223         m_assembler.eors_r(dest, dest, src);
    224     }
    225 
    226     void xor32(TrustedImm32 imm, RegisterID dest)
    227     {
    228         m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
    229     }
    230 
    231     void countLeadingZeros32(RegisterID src, RegisterID dest)
    232     {
    233 #if WTF_ARM_ARCH_AT_LEAST(5)
    234         m_assembler.clz_r(dest, src);
    235 #else
    236         UNUSED_PARAM(src);
    237         UNUSED_PARAM(dest);
    238         ASSERT_NOT_REACHED();
    239 #endif
    240     }
    241 
    242     void load8(ImplicitAddress address, RegisterID dest)
    243     {
    244         m_assembler.dataTransfer32(true, dest, address.base, address.offset, true);
    245     }
    246 
    247     void load32(ImplicitAddress address, RegisterID dest)
    248     {
    249         m_assembler.dataTransfer32(true, dest, address.base, address.offset);
    250     }
    251 
    252     void load32(BaseIndex address, RegisterID dest)
    253     {
    254         m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
    255     }
    256 
    257 #if CPU(ARMV5_OR_LOWER)
    258     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
    259 #else
    260     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
    261     {
    262         load32(address, dest);
    263     }
    264 #endif
    265 
    266     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
    267     {
    268         DataLabel32 dataLabel(this);
    269         m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
    270         m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0);
    271         return dataLabel;
    272     }
    273 
    274     void load16(BaseIndex address, RegisterID dest)
    275     {
    276         m_assembler.add_r(ARMRegisters::S1, address.base, m_assembler.lsl(address.index, address.scale));
    277         load16(Address(ARMRegisters::S1, address.offset), dest);
    278     }
    279 
    280     void load16(ImplicitAddress address, RegisterID dest)
    281     {
    282         if (address.offset >= 0)
    283             m_assembler.ldrh_u(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(address.offset, ARMRegisters::S0));
    284         else
    285             m_assembler.ldrh_d(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(-address.offset, ARMRegisters::S0));
    286     }
    287 
    288     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
    289     {
    290         DataLabel32 dataLabel(this);
    291         m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
    292         m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0);
    293         return dataLabel;
    294     }
    295 
    296     void store32(RegisterID src, ImplicitAddress address)
    297     {
    298         m_assembler.dataTransfer32(false, src, address.base, address.offset);
    299     }
    300 
    301     void store32(RegisterID src, BaseIndex address)
    302     {
    303         m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
    304     }
    305 
    306     void store32(TrustedImm32 imm, ImplicitAddress address)
    307     {
    308         if (imm.m_isPointer)
    309             m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
    310         else
    311             move(imm, ARMRegisters::S1);
    312         store32(ARMRegisters::S1, address);
    313     }
    314 
    315     void store32(RegisterID src, void* address)
    316     {
    317         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
    318         m_assembler.dtr_u(false, src, ARMRegisters::S0, 0);
    319     }
    320 
    321     void store32(TrustedImm32 imm, void* address)
    322     {
    323         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
    324         if (imm.m_isPointer)
    325             m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
    326         else
    327             m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
    328         m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
    329     }
    330 
    331     void pop(RegisterID dest)
    332     {
    333         m_assembler.pop_r(dest);
    334     }
    335 
    336     void push(RegisterID src)
    337     {
    338         m_assembler.push_r(src);
    339     }
    340 
    341     void push(Address address)
    342     {
    343         load32(address, ARMRegisters::S1);
    344         push(ARMRegisters::S1);
    345     }
    346 
    347     void push(TrustedImm32 imm)
    348     {
    349         move(imm, ARMRegisters::S0);
    350         push(ARMRegisters::S0);
    351     }
    352 
    353     void move(TrustedImm32 imm, RegisterID dest)
    354     {
    355         if (imm.m_isPointer)
    356             m_assembler.ldr_un_imm(dest, imm.m_value);
    357         else
    358             m_assembler.moveImm(imm.m_value, dest);
    359     }
    360 
    361     void move(RegisterID src, RegisterID dest)
    362     {
    363         m_assembler.mov_r(dest, src);
    364     }
    365 
    366     void move(TrustedImmPtr imm, RegisterID dest)
    367     {
    368         move(TrustedImm32(imm), dest);
    369     }
    370 
    371     void swap(RegisterID reg1, RegisterID reg2)
    372     {
    373         m_assembler.mov_r(ARMRegisters::S0, reg1);
    374         m_assembler.mov_r(reg1, reg2);
    375         m_assembler.mov_r(reg2, ARMRegisters::S0);
    376     }
    377 
    378     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    379     {
    380         if (src != dest)
    381             move(src, dest);
    382     }
    383 
    384     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    385     {
    386         if (src != dest)
    387             move(src, dest);
    388     }
    389 
    390     Jump branch8(Condition cond, Address left, TrustedImm32 right)
    391     {
    392         load8(left, ARMRegisters::S1);
    393         return branch32(cond, ARMRegisters::S1, right);
    394     }
    395 
    396     Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
    397     {
    398         m_assembler.cmp_r(left, right);
    399         return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
    400     }
    401 
    402     Jump branch32(Condition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
    403     {
    404         if (right.m_isPointer) {
    405             m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value);
    406             m_assembler.cmp_r(left, ARMRegisters::S0);
    407         } else {
    408             ARMWord tmp = m_assembler.getOp2(-right.m_value);
    409             if (tmp != ARMAssembler::INVALID_IMM)
    410                 m_assembler.cmn_r(left, tmp);
    411             else
    412                 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
    413         }
    414         return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
    415     }
    416 
    417     Jump branch32(Condition cond, RegisterID left, Address right)
    418     {
    419         load32(right, ARMRegisters::S1);
    420         return branch32(cond, left, ARMRegisters::S1);
    421     }
    422 
    423     Jump branch32(Condition cond, Address left, RegisterID right)
    424     {
    425         load32(left, ARMRegisters::S1);
    426         return branch32(cond, ARMRegisters::S1, right);
    427     }
    428 
    429     Jump branch32(Condition cond, Address left, TrustedImm32 right)
    430     {
    431         load32(left, ARMRegisters::S1);
    432         return branch32(cond, ARMRegisters::S1, right);
    433     }
    434 
    435     Jump branch32(Condition cond, BaseIndex left, TrustedImm32 right)
    436     {
    437         load32(left, ARMRegisters::S1);
    438         return branch32(cond, ARMRegisters::S1, right);
    439     }
    440 
    441     Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, TrustedImm32 right)
    442     {
    443         load32WithUnalignedHalfWords(left, ARMRegisters::S1);
    444         return branch32(cond, ARMRegisters::S1, right);
    445     }
    446 
    447     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
    448     {
    449         UNUSED_PARAM(cond);
    450         UNUSED_PARAM(left);
    451         UNUSED_PARAM(right);
    452         ASSERT_NOT_REACHED();
    453         return jump();
    454     }
    455 
    456     Jump branch16(Condition cond, BaseIndex left, TrustedImm32 right)
    457     {
    458         load16(left, ARMRegisters::S0);
    459         move(right, ARMRegisters::S1);
    460         m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1);
    461         return m_assembler.jmp(ARMCondition(cond));
    462     }
    463 
    464     Jump branchTest8(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
    465     {
    466         load8(address, ARMRegisters::S1);
    467         return branchTest32(cond, ARMRegisters::S1, mask);
    468     }
    469 
    470     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
    471     {
    472         ASSERT((cond == Zero) || (cond == NonZero));
    473         m_assembler.tst_r(reg, mask);
    474         return Jump(m_assembler.jmp(ARMCondition(cond)));
    475     }
    476 
    477     Jump branchTest32(Condition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
    478     {
    479         ASSERT((cond == Zero) || (cond == NonZero));
    480         ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
    481         if (w & ARMAssembler::OP2_INV_IMM)
    482             m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
    483         else
    484             m_assembler.tst_r(reg, w);
    485         return Jump(m_assembler.jmp(ARMCondition(cond)));
    486     }
    487 
    488     Jump branchTest32(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
    489     {
    490         load32(address, ARMRegisters::S1);
    491         return branchTest32(cond, ARMRegisters::S1, mask);
    492     }
    493 
    494     Jump branchTest32(Condition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
    495     {
    496         load32(address, ARMRegisters::S1);
    497         return branchTest32(cond, ARMRegisters::S1, mask);
    498     }
    499 
    500     Jump jump()
    501     {
    502         return Jump(m_assembler.jmp());
    503     }
    504 
    505     void jump(RegisterID target)
    506     {
    507         m_assembler.bx(target);
    508     }
    509 
    510     void jump(Address address)
    511     {
    512         load32(address, ARMRegisters::pc);
    513     }
    514 
    515     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
    516     {
    517         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    518         add32(src, dest);
    519         return Jump(m_assembler.jmp(ARMCondition(cond)));
    520     }
    521 
    522     Jump branchAdd32(Condition cond, TrustedImm32 imm, RegisterID dest)
    523     {
    524         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    525         add32(imm, dest);
    526         return Jump(m_assembler.jmp(ARMCondition(cond)));
    527     }
    528 
    529     void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
    530     {
    531         if (src1 == dest) {
    532             move(src1, ARMRegisters::S0);
    533             src1 = ARMRegisters::S0;
    534         }
    535         m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1);
    536         m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31));
    537     }
    538 
    539     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
    540     {
    541         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    542         if (cond == Overflow) {
    543             mull32(src, dest, dest);
    544             cond = NonZero;
    545         }
    546         else
    547             mul32(src, dest);
    548         return Jump(m_assembler.jmp(ARMCondition(cond)));
    549     }
    550 
    551     Jump branchMul32(Condition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
    552     {
    553         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    554         if (cond == Overflow) {
    555             move(imm, ARMRegisters::S0);
    556             mull32(ARMRegisters::S0, src, dest);
    557             cond = NonZero;
    558         }
    559         else
    560             mul32(imm, src, dest);
    561         return Jump(m_assembler.jmp(ARMCondition(cond)));
    562     }
    563 
    564     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
    565     {
    566         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    567         sub32(src, dest);
    568         return Jump(m_assembler.jmp(ARMCondition(cond)));
    569     }
    570 
    571     Jump branchSub32(Condition cond, TrustedImm32 imm, RegisterID dest)
    572     {
    573         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    574         sub32(imm, dest);
    575         return Jump(m_assembler.jmp(ARMCondition(cond)));
    576     }
    577 
    578     Jump branchNeg32(Condition cond, RegisterID srcDest)
    579     {
    580         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
    581         neg32(srcDest);
    582         return Jump(m_assembler.jmp(ARMCondition(cond)));
    583     }
    584 
    585     Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
    586     {
    587         ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
    588         or32(src, dest);
    589         return Jump(m_assembler.jmp(ARMCondition(cond)));
    590     }
    591 
    592     void breakpoint()
    593     {
    594         m_assembler.bkpt(0);
    595     }
    596 
    597     Call nearCall()
    598     {
    599 #if WTF_ARM_ARCH_AT_LEAST(5)
    600         ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
    601         m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
    602         return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
    603 #else
    604         prepareCall();
    605         return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
    606 #endif
    607     }
    608 
    609     Call call(RegisterID target)
    610     {
    611         return Call(m_assembler.blx(target), Call::None);
    612     }
    613 
    614     void call(Address address)
    615     {
    616         call32(address.base, address.offset);
    617     }
    618 
    619     void ret()
    620     {
    621         m_assembler.bx(linkRegister);
    622     }
    623 
    624     void set32Compare32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    625     {
    626         m_assembler.cmp_r(left, right);
    627         m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
    628         m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
    629     }
    630 
    631     void set32Compare32(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
    632     {
    633         m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
    634         m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
    635         m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
    636     }
    637 
    638     void set8Compare32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    639     {
    640         // ARM doesn't have byte registers
    641         set32Compare32(cond, left, right, dest);
    642     }
    643 
    644     void set8Compare32(Condition cond, Address left, RegisterID right, RegisterID dest)
    645     {
    646         // ARM doesn't have byte registers
    647         load32(left, ARMRegisters::S1);
    648         set32Compare32(cond, ARMRegisters::S1, right, dest);
    649     }
    650 
    651     void set8Compare32(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
    652     {
    653         // ARM doesn't have byte registers
    654         set32Compare32(cond, left, right, dest);
    655     }
    656 
    657     void set32Test32(Condition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
    658     {
    659         if (mask.m_value == -1)
    660             m_assembler.cmp_r(0, reg);
    661         else
    662             m_assembler.tst_r(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
    663         m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
    664         m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
    665     }
    666 
    667     void set32Test32(Condition cond, Address address, TrustedImm32 mask, RegisterID dest)
    668     {
    669         load32(address, ARMRegisters::S1);
    670         set32Test32(cond, ARMRegisters::S1, mask, dest);
    671     }
    672 
    673     void set32Test8(Condition cond, Address address, TrustedImm32 mask, RegisterID dest)
    674     {
    675         load8(address, ARMRegisters::S1);
    676         set32Test32(cond, ARMRegisters::S1, mask, dest);
    677     }
    678 
    679     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
    680     {
    681         m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
    682     }
    683 
    684     void add32(TrustedImm32 imm, AbsoluteAddress address)
    685     {
    686         m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
    687         m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
    688         add32(imm, ARMRegisters::S1);
    689         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
    690         m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
    691     }
    692 
    693     void sub32(TrustedImm32 imm, AbsoluteAddress address)
    694     {
    695         m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
    696         m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
    697         sub32(imm, ARMRegisters::S1);
    698         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
    699         m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
    700     }
    701 
    702     void load32(const void* address, RegisterID dest)
    703     {
    704         m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
    705         m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0);
    706     }
    707 
    708     Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
    709     {
    710         load32(left.m_ptr, ARMRegisters::S1);
    711         return branch32(cond, ARMRegisters::S1, right);
    712     }
    713 
    714     Jump branch32(Condition cond, AbsoluteAddress left, TrustedImm32 right)
    715     {
    716         load32(left.m_ptr, ARMRegisters::S1);
    717         return branch32(cond, ARMRegisters::S1, right);
    718     }
    719 
    720     void relativeTableJump(RegisterID index, int scale)
    721     {
    722         ASSERT(scale >= 0 && scale <= 31);
    723         m_assembler.add_r(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
    724 
    725         // NOP the default prefetching
    726         m_assembler.mov_r(ARMRegisters::r0, ARMRegisters::r0);
    727     }
    728 
    729     Call call()
    730     {
    731 #if WTF_ARM_ARCH_AT_LEAST(5)
    732         ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
    733         m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
    734         return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
    735 #else
    736         prepareCall();
    737         return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
    738 #endif
    739     }
    740 
    741     Call tailRecursiveCall()
    742     {
    743         return Call::fromTailJump(jump());
    744     }
    745 
    746     Call makeTailRecursiveCall(Jump oldJump)
    747     {
    748         return Call::fromTailJump(oldJump);
    749     }
    750 
    751     DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
    752     {
    753         DataLabelPtr dataLabel(this);
    754         m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
    755         return dataLabel;
    756     }
    757 
    758     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
    759     {
    760         dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
    761         Jump jump = branch32(cond, left, ARMRegisters::S1, true);
    762         return jump;
    763     }
    764 
    765     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
    766     {
    767         load32(left, ARMRegisters::S1);
    768         dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
    769         Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
    770         return jump;
    771     }
    772 
    773     DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
    774     {
    775         DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
    776         store32(ARMRegisters::S1, address);
    777         return dataLabel;
    778     }
    779 
    780     DataLabelPtr storePtrWithPatch(ImplicitAddress address)
    781     {
    782         return storePtrWithPatch(TrustedImmPtr(0), address);
    783     }
    784 
    785     // Floating point operators
    786     bool supportsFloatingPoint() const
    787     {
    788         return s_isVFPPresent;
    789     }
    790 
    791     bool supportsFloatingPointTruncate() const
    792     {
    793         return s_isVFPPresent;
    794     }
    795 
    796     bool supportsFloatingPointSqrt() const
    797     {
    798         return s_isVFPPresent;
    799     }
    800 
    801     void loadDouble(ImplicitAddress address, FPRegisterID dest)
    802     {
    803         m_assembler.doubleTransfer(true, dest, address.base, address.offset);
    804     }
    805 
    806     void loadDouble(const void* address, FPRegisterID dest)
    807     {
    808         m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
    809         m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
    810     }
    811 
    812     void storeDouble(FPRegisterID src, ImplicitAddress address)
    813     {
    814         m_assembler.doubleTransfer(false, src, address.base, address.offset);
    815     }
    816 
    817     void addDouble(FPRegisterID src, FPRegisterID dest)
    818     {
    819         m_assembler.vadd_f64_r(dest, dest, src);
    820     }
    821 
    822     void addDouble(Address src, FPRegisterID dest)
    823     {
    824         loadDouble(src, ARMRegisters::SD0);
    825         addDouble(ARMRegisters::SD0, dest);
    826     }
    827 
    828     void divDouble(FPRegisterID src, FPRegisterID dest)
    829     {
    830         m_assembler.vdiv_f64_r(dest, dest, src);
    831     }
    832 
    833     void divDouble(Address src, FPRegisterID dest)
    834     {
    835         ASSERT_NOT_REACHED(); // Untested
    836         loadDouble(src, ARMRegisters::SD0);
    837         divDouble(ARMRegisters::SD0, dest);
    838     }
    839 
    840     void subDouble(FPRegisterID src, FPRegisterID dest)
    841     {
    842         m_assembler.vsub_f64_r(dest, dest, src);
    843     }
    844 
    845     void subDouble(Address src, FPRegisterID dest)
    846     {
    847         loadDouble(src, ARMRegisters::SD0);
    848         subDouble(ARMRegisters::SD0, dest);
    849     }
    850 
    851     void mulDouble(FPRegisterID src, FPRegisterID dest)
    852     {
    853         m_assembler.vmul_f64_r(dest, dest, src);
    854     }
    855 
    856     void mulDouble(Address src, FPRegisterID dest)
    857     {
    858         loadDouble(src, ARMRegisters::SD0);
    859         mulDouble(ARMRegisters::SD0, dest);
    860     }
    861 
    862     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
    863     {
    864         m_assembler.vsqrt_f64_r(dest, src);
    865     }
    866 
    867     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
    868     {
    869         m_assembler.vmov_vfp_r(dest << 1, src);
    870         m_assembler.vcvt_f64_s32_r(dest, dest << 1);
    871     }
    872 
    873     void convertInt32ToDouble(Address src, FPRegisterID dest)
    874     {
    875         ASSERT_NOT_REACHED(); // Untested
    876         // flds does not worth the effort here
    877         load32(src, ARMRegisters::S1);
    878         convertInt32ToDouble(ARMRegisters::S1, dest);
    879     }
    880 
    881     void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
    882     {
    883         ASSERT_NOT_REACHED(); // Untested
    884         // flds does not worth the effort here
    885         m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr);
    886         m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
    887         convertInt32ToDouble(ARMRegisters::S1, dest);
    888     }
    889 
    890     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
    891     {
    892         m_assembler.vcmp_f64_r(left, right);
    893         m_assembler.vmrs_apsr();
    894         if (cond & DoubleConditionBitSpecial)
    895             m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
    896         return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
    897     }
    898 
    899     // Truncates 'src' to an integer, and places the resulting 'dest'.
    900     // If the result is not representable as a 32 bit value, branch.
    901     // May also branch for some values that are representable in 32 bits
    902     // (specifically, in this case, INT_MIN and INT_MAX).
    903     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
    904     {
    905         m_assembler.vcvtr_s32_f64_r(ARMRegisters::SD0 << 1, src);
    906         // If VCVTR.S32.F64 can't fit the result into a 32-bit
    907         // integer, it saturates at INT_MAX or INT_MIN. Testing this is
    908         // probably quicker than testing FPSCR for exception.
    909         m_assembler.vmov_arm_r(dest, ARMRegisters::SD0 << 1);
    910         m_assembler.sub_r(ARMRegisters::S0, dest, ARMAssembler::getOp2(0x80000000));
    911         m_assembler.cmn_r(ARMRegisters::S0, ARMAssembler::getOp2(1), ARMCondition(NotEqual));
    912         return Jump(m_assembler.jmp(ARMCondition(Equal)));
    913     }
    914 
    915     // Convert 'src' to an integer, and places the resulting 'dest'.
    916     // If the result is not representable as a 32 bit value, branch.
    917     // May also branch for some values that are representable in 32 bits
    918     // (specifically, in this case, 0).
    919     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
    920     {
    921         m_assembler.vcvt_s32_f64_r(ARMRegisters::SD0 << 1, src);
    922         m_assembler.vmov_arm_r(dest, ARMRegisters::SD0 << 1);
    923 
    924         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
    925         m_assembler.vcvt_f64_s32_r(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
    926         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
    927 
    928         // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
    929         failureCases.append(branchTest32(Zero, dest));
    930     }
    931 
    932     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
    933     {
    934         m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
    935         convertInt32ToDouble(ARMRegisters::S0, scratch);
    936         return branchDouble(DoubleNotEqual, reg, scratch);
    937     }
    938 
    939     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
    940     {
    941         m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
    942         convertInt32ToDouble(ARMRegisters::S0, scratch);
    943         return branchDouble(DoubleEqualOrUnordered, reg, scratch);
    944     }
    945 
    946 protected:
    947     ARMAssembler::Condition ARMCondition(Condition cond)
    948     {
    949         return static_cast<ARMAssembler::Condition>(cond);
    950     }
    951 
    952     void ensureSpace(int insnSpace, int constSpace)
    953     {
    954         m_assembler.ensureSpace(insnSpace, constSpace);
    955     }
    956 
    957     int sizeOfConstantPool()
    958     {
    959         return m_assembler.sizeOfConstantPool();
    960     }
    961 
    962     void prepareCall()
    963     {
    964 #if WTF_ARM_ARCH_VERSION < 5
    965         ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
    966 
    967         m_assembler.mov_r(linkRegister, ARMRegisters::pc);
    968 #endif
    969     }
    970 
    971     void call32(RegisterID base, int32_t offset)
    972     {
    973 #if WTF_ARM_ARCH_AT_LEAST(5)
    974         int targetReg = ARMRegisters::S1;
    975 #else
    976         int targetReg = ARMRegisters::pc;
    977 #endif
    978         int tmpReg = ARMRegisters::S1;
    979 
    980         if (base == ARMRegisters::sp)
    981             offset += 4;
    982 
    983         if (offset >= 0) {
    984             if (offset <= 0xfff) {
    985                 prepareCall();
    986                 m_assembler.dtr_u(true, targetReg, base, offset);
    987             } else if (offset <= 0xfffff) {
    988                 m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
    989                 prepareCall();
    990                 m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff);
    991             } else {
    992                 m_assembler.moveImm(offset, tmpReg);
    993                 prepareCall();
    994                 m_assembler.dtr_ur(true, targetReg, base, tmpReg);
    995             }
    996         } else  {
    997             offset = -offset;
    998             if (offset <= 0xfff) {
    999                 prepareCall();
   1000                 m_assembler.dtr_d(true, targetReg, base, offset);
   1001             } else if (offset <= 0xfffff) {
   1002                 m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
   1003                 prepareCall();
   1004                 m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff);
   1005             } else {
   1006                 m_assembler.moveImm(offset, tmpReg);
   1007                 prepareCall();
   1008                 m_assembler.dtr_dr(true, targetReg, base, tmpReg);
   1009             }
   1010         }
   1011 #if WTF_ARM_ARCH_AT_LEAST(5)
   1012         m_assembler.blx(targetReg);
   1013 #endif
   1014     }
   1015 
   1016 private:
   1017     friend class LinkBuffer;
   1018     friend class RepatchBuffer;
   1019 
   1020     static void linkCall(void* code, Call call, FunctionPtr function)
   1021     {
   1022         ARMAssembler::linkCall(code, call.m_jmp, function.value());
   1023     }
   1024 
   1025     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
   1026     {
   1027         ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
   1028     }
   1029 
   1030     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
   1031     {
   1032         ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
   1033     }
   1034 
   1035     static const bool s_isVFPPresent;
   1036 };
   1037 
   1038 }
   1039 
   1040 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
   1041 
   1042 #endif // MacroAssemblerARM_h
   1043