Home | History | Annotate | Download | only in assembler
      1 /*
      2  * Copyright (C) 2009 University of Szeged
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  *
     14  * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
     15  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     17  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
     18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     22  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     24  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include "config.h"
     28 
     29 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
     30 
     31 #include "ARMAssembler.h"
     32 
     33 namespace JSC {
     34 
     35 // Patching helpers
     36 
     37 void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
     38 {
     39     ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
     40     ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
     41     ARMWord index = (*ldr & 0xfff) >> 1;
     42 
     43     ASSERT(diff >= 1);
     44     if (diff >= 2 || index > 0) {
     45         diff = (diff + index - 2) * sizeof(ARMWord);
     46         ASSERT(diff <= 0xfff);
     47         *ldr = (*ldr & ~0xfff) | diff;
     48     } else
     49         *ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
     50 }
     51 
     52 // Handle immediates
     53 
     54 ARMWord ARMAssembler::getOp2(ARMWord imm)
     55 {
     56     int rol;
     57 
     58     if (imm <= 0xff)
     59         return OP2_IMM | imm;
     60 
     61     if ((imm & 0xff000000) == 0) {
     62         imm <<= 8;
     63         rol = 8;
     64     }
     65     else {
     66         imm = (imm << 24) | (imm >> 8);
     67         rol = 0;
     68     }
     69 
     70     if ((imm & 0xff000000) == 0) {
     71         imm <<= 8;
     72         rol += 4;
     73     }
     74 
     75     if ((imm & 0xf0000000) == 0) {
     76         imm <<= 4;
     77         rol += 2;
     78     }
     79 
     80     if ((imm & 0xc0000000) == 0) {
     81         imm <<= 2;
     82         rol += 1;
     83     }
     84 
     85     if ((imm & 0x00ffffff) == 0)
     86         return OP2_IMM | (imm >> 24) | (rol << 8);
     87 
     88     return INVALID_IMM;
     89 }
     90 
     91 int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
     92 {
     93     // Step1: Search a non-immediate part
     94     ARMWord mask;
     95     ARMWord imm1;
     96     ARMWord imm2;
     97     int rol;
     98 
     99     mask = 0xff000000;
    100     rol = 8;
    101     while(1) {
    102         if ((imm & mask) == 0) {
    103             imm = (imm << rol) | (imm >> (32 - rol));
    104             rol = 4 + (rol >> 1);
    105             break;
    106         }
    107         rol += 2;
    108         mask >>= 2;
    109         if (mask & 0x3) {
    110             // rol 8
    111             imm = (imm << 8) | (imm >> 24);
    112             mask = 0xff00;
    113             rol = 24;
    114             while (1) {
    115                 if ((imm & mask) == 0) {
    116                     imm = (imm << rol) | (imm >> (32 - rol));
    117                     rol = (rol >> 1) - 8;
    118                     break;
    119                 }
    120                 rol += 2;
    121                 mask >>= 2;
    122                 if (mask & 0x3)
    123                     return 0;
    124             }
    125             break;
    126         }
    127     }
    128 
    129     ASSERT((imm & 0xff) == 0);
    130 
    131     if ((imm & 0xff000000) == 0) {
    132         imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
    133         imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
    134     } else if (imm & 0xc0000000) {
    135         imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
    136         imm <<= 8;
    137         rol += 4;
    138 
    139         if ((imm & 0xff000000) == 0) {
    140             imm <<= 8;
    141             rol += 4;
    142         }
    143 
    144         if ((imm & 0xf0000000) == 0) {
    145             imm <<= 4;
    146             rol += 2;
    147         }
    148 
    149         if ((imm & 0xc0000000) == 0) {
    150             imm <<= 2;
    151             rol += 1;
    152         }
    153 
    154         if ((imm & 0x00ffffff) == 0)
    155             imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
    156         else
    157             return 0;
    158     } else {
    159         if ((imm & 0xf0000000) == 0) {
    160             imm <<= 4;
    161             rol += 2;
    162         }
    163 
    164         if ((imm & 0xc0000000) == 0) {
    165             imm <<= 2;
    166             rol += 1;
    167         }
    168 
    169         imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
    170         imm <<= 8;
    171         rol += 4;
    172 
    173         if ((imm & 0xf0000000) == 0) {
    174             imm <<= 4;
    175             rol += 2;
    176         }
    177 
    178         if ((imm & 0xc0000000) == 0) {
    179             imm <<= 2;
    180             rol += 1;
    181         }
    182 
    183         if ((imm & 0x00ffffff) == 0)
    184             imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
    185         else
    186             return 0;
    187     }
    188 
    189     if (positive) {
    190         mov_r(reg, imm1);
    191         orr_r(reg, reg, imm2);
    192     } else {
    193         mvn_r(reg, imm1);
    194         bic_r(reg, reg, imm2);
    195     }
    196 
    197     return 1;
    198 }
    199 
    200 ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
    201 {
    202     ARMWord tmp;
    203 
    204     // Do it by 1 instruction
    205     tmp = getOp2(imm);
    206     if (tmp != INVALID_IMM)
    207         return tmp;
    208 
    209     tmp = getOp2(~imm);
    210     if (tmp != INVALID_IMM) {
    211         if (invert)
    212             return tmp | OP2_INV_IMM;
    213         mvn_r(tmpReg, tmp);
    214         return tmpReg;
    215     }
    216 
    217     return encodeComplexImm(imm, tmpReg);
    218 }
    219 
    220 void ARMAssembler::moveImm(ARMWord imm, int dest)
    221 {
    222     ARMWord tmp;
    223 
    224     // Do it by 1 instruction
    225     tmp = getOp2(imm);
    226     if (tmp != INVALID_IMM) {
    227         mov_r(dest, tmp);
    228         return;
    229     }
    230 
    231     tmp = getOp2(~imm);
    232     if (tmp != INVALID_IMM) {
    233         mvn_r(dest, tmp);
    234         return;
    235     }
    236 
    237     encodeComplexImm(imm, dest);
    238 }
    239 
    240 ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
    241 {
    242 #if WTF_ARM_ARCH_AT_LEAST(7)
    243     ARMWord tmp = getImm16Op2(imm);
    244     if (tmp != INVALID_IMM) {
    245         movw_r(dest, tmp);
    246         return dest;
    247     }
    248     movw_r(dest, getImm16Op2(imm & 0xffff));
    249     movt_r(dest, getImm16Op2(imm >> 16));
    250     return dest;
    251 #else
    252     // Do it by 2 instruction
    253     if (genInt(dest, imm, true))
    254         return dest;
    255     if (genInt(dest, ~imm, false))
    256         return dest;
    257 
    258     ldr_imm(dest, imm);
    259     return dest;
    260 #endif
    261 }
    262 
    263 // Memory load/store helpers
    264 
    265 void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes)
    266 {
    267     ARMWord transferFlag = bytes ? DT_BYTE : 0;
    268     if (offset >= 0) {
    269         if (offset <= 0xfff)
    270             dtr_u(isLoad, srcDst, base, offset | transferFlag);
    271         else if (offset <= 0xfffff) {
    272             add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
    273             dtr_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
    274         } else {
    275             moveImm(offset, ARMRegisters::S0);
    276             dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag);
    277         }
    278     } else {
    279         offset = -offset;
    280         if (offset <= 0xfff)
    281             dtr_d(isLoad, srcDst, base, offset | transferFlag);
    282         else if (offset <= 0xfffff) {
    283             sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
    284             dtr_d(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
    285         } else {
    286             moveImm(offset, ARMRegisters::S0);
    287             dtr_dr(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag);
    288         }
    289     }
    290 }
    291 
    292 void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
    293 {
    294     ARMWord op2;
    295 
    296     ASSERT(scale >= 0 && scale <= 3);
    297     op2 = lsl(index, scale);
    298 
    299     if (offset >= 0 && offset <= 0xfff) {
    300         add_r(ARMRegisters::S0, base, op2);
    301         dtr_u(isLoad, srcDst, ARMRegisters::S0, offset);
    302         return;
    303     }
    304     if (offset <= 0 && offset >= -0xfff) {
    305         add_r(ARMRegisters::S0, base, op2);
    306         dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset);
    307         return;
    308     }
    309 
    310     ldr_un_imm(ARMRegisters::S0, offset);
    311     add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
    312     dtr_ur(isLoad, srcDst, base, ARMRegisters::S0);
    313 }
    314 
    315 void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
    316 {
    317     if (offset & 0x3) {
    318         if (offset <= 0x3ff && offset >= 0) {
    319             fdtr_u(isLoad, srcDst, base, offset >> 2);
    320             return;
    321         }
    322         if (offset <= 0x3ffff && offset >= 0) {
    323             add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
    324             fdtr_u(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
    325             return;
    326         }
    327         offset = -offset;
    328 
    329         if (offset <= 0x3ff && offset >= 0) {
    330             fdtr_d(isLoad, srcDst, base, offset >> 2);
    331             return;
    332         }
    333         if (offset <= 0x3ffff && offset >= 0) {
    334             sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
    335             fdtr_d(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
    336             return;
    337         }
    338         offset = -offset;
    339     }
    340 
    341     ldr_un_imm(ARMRegisters::S0, offset);
    342     add_r(ARMRegisters::S0, ARMRegisters::S0, base);
    343     fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0);
    344 }
    345 
    346 void* ARMAssembler::executableCopy(ExecutablePool* allocator)
    347 {
    348     // 64-bit alignment is required for next constant pool and JIT code as well
    349     m_buffer.flushWithoutBarrier(true);
    350     if (m_buffer.uncheckedSize() & 0x7)
    351         bkpt(0);
    352 
    353     char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
    354 
    355     for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
    356         // The last bit is set if the constant must be placed on constant pool.
    357         int pos = (*iter) & (~0x1);
    358         ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
    359         ARMWord* addr = getLdrImmAddress(ldrAddr);
    360         if (*addr != InvalidBranchTarget) {
    361             if (!(*iter & 1)) {
    362                 int diff = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching);
    363 
    364                 if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
    365                     *ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK);
    366                     continue;
    367                 }
    368             }
    369             *addr = reinterpret_cast<ARMWord>(data + *addr);
    370         }
    371     }
    372 
    373     return data;
    374 }
    375 
    376 } // namespace JSC
    377 
    378 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
    379