Home | History | Annotate | Download | only in assembler
      1 /*
      2  * Copyright (C) 2008 Apple Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     24  */
     25 
     26 #ifndef MacroAssemblerX86_64_h
     27 #define MacroAssemblerX86_64_h
     28 
     29 #include <wtf/Platform.h>
     30 
     31 #if ENABLE(ASSEMBLER) && CPU(X86_64)
     32 
     33 #include "MacroAssemblerX86Common.h"
     34 
     35 #define REPTACH_OFFSET_CALL_R11 3
     36 
     37 namespace JSC {
     38 
     39 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
     40 protected:
     41     static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
     42 
     43 public:
     44     static const Scale ScalePtr = TimesEight;
     45 
     46     using MacroAssemblerX86Common::add32;
     47     using MacroAssemblerX86Common::and32;
     48     using MacroAssemblerX86Common::or32;
     49     using MacroAssemblerX86Common::sub32;
     50     using MacroAssemblerX86Common::load32;
     51     using MacroAssemblerX86Common::store32;
     52     using MacroAssemblerX86Common::call;
     53     using MacroAssemblerX86Common::loadDouble;
     54     using MacroAssemblerX86Common::convertInt32ToDouble;
     55 
     56     void add32(Imm32 imm, AbsoluteAddress address)
     57     {
     58         move(ImmPtr(address.m_ptr), scratchRegister);
     59         add32(imm, Address(scratchRegister));
     60     }
     61 
     62     void and32(Imm32 imm, AbsoluteAddress address)
     63     {
     64         move(ImmPtr(address.m_ptr), scratchRegister);
     65         and32(imm, Address(scratchRegister));
     66     }
     67 
     68     void or32(Imm32 imm, AbsoluteAddress address)
     69     {
     70         move(ImmPtr(address.m_ptr), scratchRegister);
     71         or32(imm, Address(scratchRegister));
     72     }
     73 
     74     void sub32(Imm32 imm, AbsoluteAddress address)
     75     {
     76         move(ImmPtr(address.m_ptr), scratchRegister);
     77         sub32(imm, Address(scratchRegister));
     78     }
     79 
     80     void load32(void* address, RegisterID dest)
     81     {
     82         if (dest == X86Registers::eax)
     83             m_assembler.movl_mEAX(address);
     84         else {
     85             move(X86Registers::eax, dest);
     86             m_assembler.movl_mEAX(address);
     87             swap(X86Registers::eax, dest);
     88         }
     89     }
     90 
     91     void loadDouble(void* address, FPRegisterID dest)
     92     {
     93         move(ImmPtr(address), scratchRegister);
     94         loadDouble(scratchRegister, dest);
     95     }
     96 
     97     void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
     98     {
     99         move(Imm32(*static_cast<int32_t*>(src.m_ptr)), scratchRegister);
    100         m_assembler.cvtsi2sd_rr(scratchRegister, dest);
    101     }
    102 
    103     void store32(Imm32 imm, void* address)
    104     {
    105         move(X86Registers::eax, scratchRegister);
    106         move(imm, X86Registers::eax);
    107         m_assembler.movl_EAXm(address);
    108         move(scratchRegister, X86Registers::eax);
    109     }
    110 
    111     Call call()
    112     {
    113         DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
    114         Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
    115         ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
    116         return result;
    117     }
    118 
    119     Call tailRecursiveCall()
    120     {
    121         DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
    122         Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
    123         ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
    124         return Call::fromTailJump(newJump);
    125     }
    126 
    127     Call makeTailRecursiveCall(Jump oldJump)
    128     {
    129         oldJump.link(this);
    130         DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
    131         Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
    132         ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
    133         return Call::fromTailJump(newJump);
    134     }
    135 
    136 
    137     void addPtr(RegisterID src, RegisterID dest)
    138     {
    139         m_assembler.addq_rr(src, dest);
    140     }
    141 
    142     void addPtr(Imm32 imm, RegisterID srcDest)
    143     {
    144         m_assembler.addq_ir(imm.m_value, srcDest);
    145     }
    146 
    147     void addPtr(ImmPtr imm, RegisterID dest)
    148     {
    149         move(imm, scratchRegister);
    150         m_assembler.addq_rr(scratchRegister, dest);
    151     }
    152 
    153     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    154     {
    155         m_assembler.leaq_mr(imm.m_value, src, dest);
    156     }
    157 
    158     void addPtr(Imm32 imm, Address address)
    159     {
    160         m_assembler.addq_im(imm.m_value, address.offset, address.base);
    161     }
    162 
    163     void addPtr(Imm32 imm, AbsoluteAddress address)
    164     {
    165         move(ImmPtr(address.m_ptr), scratchRegister);
    166         addPtr(imm, Address(scratchRegister));
    167     }
    168 
    169     void andPtr(RegisterID src, RegisterID dest)
    170     {
    171         m_assembler.andq_rr(src, dest);
    172     }
    173 
    174     void andPtr(Imm32 imm, RegisterID srcDest)
    175     {
    176         m_assembler.andq_ir(imm.m_value, srcDest);
    177     }
    178 
    179     void orPtr(RegisterID src, RegisterID dest)
    180     {
    181         m_assembler.orq_rr(src, dest);
    182     }
    183 
    184     void orPtr(ImmPtr imm, RegisterID dest)
    185     {
    186         move(imm, scratchRegister);
    187         m_assembler.orq_rr(scratchRegister, dest);
    188     }
    189 
    190     void orPtr(Imm32 imm, RegisterID dest)
    191     {
    192         m_assembler.orq_ir(imm.m_value, dest);
    193     }
    194 
    195     void subPtr(RegisterID src, RegisterID dest)
    196     {
    197         m_assembler.subq_rr(src, dest);
    198     }
    199 
    200     void subPtr(Imm32 imm, RegisterID dest)
    201     {
    202         m_assembler.subq_ir(imm.m_value, dest);
    203     }
    204 
    205     void subPtr(ImmPtr imm, RegisterID dest)
    206     {
    207         move(imm, scratchRegister);
    208         m_assembler.subq_rr(scratchRegister, dest);
    209     }
    210 
    211     void xorPtr(RegisterID src, RegisterID dest)
    212     {
    213         m_assembler.xorq_rr(src, dest);
    214     }
    215 
    216     void xorPtr(Imm32 imm, RegisterID srcDest)
    217     {
    218         m_assembler.xorq_ir(imm.m_value, srcDest);
    219     }
    220 
    221 
    222     void loadPtr(ImplicitAddress address, RegisterID dest)
    223     {
    224         m_assembler.movq_mr(address.offset, address.base, dest);
    225     }
    226 
    227     void loadPtr(BaseIndex address, RegisterID dest)
    228     {
    229         m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
    230     }
    231 
    232     void loadPtr(void* address, RegisterID dest)
    233     {
    234         if (dest == X86Registers::eax)
    235             m_assembler.movq_mEAX(address);
    236         else {
    237             move(X86Registers::eax, dest);
    238             m_assembler.movq_mEAX(address);
    239             swap(X86Registers::eax, dest);
    240         }
    241     }
    242 
    243     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    244     {
    245         m_assembler.movq_mr_disp32(address.offset, address.base, dest);
    246         return DataLabel32(this);
    247     }
    248 
    249     void storePtr(RegisterID src, ImplicitAddress address)
    250     {
    251         m_assembler.movq_rm(src, address.offset, address.base);
    252     }
    253 
    254     void storePtr(RegisterID src, BaseIndex address)
    255     {
    256         m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
    257     }
    258 
    259     void storePtr(RegisterID src, void* address)
    260     {
    261         if (src == X86Registers::eax)
    262             m_assembler.movq_EAXm(address);
    263         else {
    264             swap(X86Registers::eax, src);
    265             m_assembler.movq_EAXm(address);
    266             swap(X86Registers::eax, src);
    267         }
    268     }
    269 
    270     void storePtr(ImmPtr imm, ImplicitAddress address)
    271     {
    272         move(imm, scratchRegister);
    273         storePtr(scratchRegister, address);
    274     }
    275 
    276     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    277     {
    278         m_assembler.movq_rm_disp32(src, address.offset, address.base);
    279         return DataLabel32(this);
    280     }
    281 
    282     void movePtrToDouble(RegisterID src, FPRegisterID dest)
    283     {
    284         m_assembler.movq_rr(src, dest);
    285     }
    286 
    287     void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
    288     {
    289         m_assembler.movq_rr(src, dest);
    290     }
    291 
    292     void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
    293     {
    294         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    295             m_assembler.testq_rr(left, left);
    296         else
    297             m_assembler.cmpq_ir(right.m_value, left);
    298         m_assembler.setCC_r(x86Condition(cond), dest);
    299         m_assembler.movzbl_rr(dest, dest);
    300     }
    301 
    302     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    303     {
    304         m_assembler.cmpq_rr(right, left);
    305         return Jump(m_assembler.jCC(x86Condition(cond)));
    306     }
    307 
    308     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    309     {
    310         move(right, scratchRegister);
    311         return branchPtr(cond, left, scratchRegister);
    312     }
    313 
    314     Jump branchPtr(Condition cond, RegisterID left, Address right)
    315     {
    316         m_assembler.cmpq_mr(right.offset, right.base, left);
    317         return Jump(m_assembler.jCC(x86Condition(cond)));
    318     }
    319 
    320     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    321     {
    322         move(ImmPtr(left.m_ptr), scratchRegister);
    323         return branchPtr(cond, Address(scratchRegister), right);
    324     }
    325 
    326     Jump branchPtr(Condition cond, Address left, RegisterID right)
    327     {
    328         m_assembler.cmpq_rm(right, left.offset, left.base);
    329         return Jump(m_assembler.jCC(x86Condition(cond)));
    330     }
    331 
    332     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    333     {
    334         move(right, scratchRegister);
    335         return branchPtr(cond, left, scratchRegister);
    336     }
    337 
    338     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    339     {
    340         m_assembler.testq_rr(reg, mask);
    341         return Jump(m_assembler.jCC(x86Condition(cond)));
    342     }
    343 
    344     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    345     {
    346         // if we are only interested in the low seven bits, this can be tested with a testb
    347         if (mask.m_value == -1)
    348             m_assembler.testq_rr(reg, reg);
    349         else if ((mask.m_value & ~0x7f) == 0)
    350             m_assembler.testb_i8r(mask.m_value, reg);
    351         else
    352             m_assembler.testq_i32r(mask.m_value, reg);
    353         return Jump(m_assembler.jCC(x86Condition(cond)));
    354     }
    355 
    356     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    357     {
    358         if (mask.m_value == -1)
    359             m_assembler.cmpq_im(0, address.offset, address.base);
    360         else
    361             m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
    362         return Jump(m_assembler.jCC(x86Condition(cond)));
    363     }
    364 
    365     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    366     {
    367         if (mask.m_value == -1)
    368             m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
    369         else
    370             m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    371         return Jump(m_assembler.jCC(x86Condition(cond)));
    372     }
    373 
    374 
    375     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    376     {
    377         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    378         addPtr(src, dest);
    379         return Jump(m_assembler.jCC(x86Condition(cond)));
    380     }
    381 
    382     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    383     {
    384         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    385         subPtr(imm, dest);
    386         return Jump(m_assembler.jCC(x86Condition(cond)));
    387     }
    388 
    389     DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
    390     {
    391         m_assembler.movq_i64r(initialValue.asIntptr(), dest);
    392         return DataLabelPtr(this);
    393     }
    394 
    395     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    396     {
    397         dataLabel = moveWithPatch(initialRightValue, scratchRegister);
    398         return branchPtr(cond, left, scratchRegister);
    399     }
    400 
    401     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    402     {
    403         dataLabel = moveWithPatch(initialRightValue, scratchRegister);
    404         return branchPtr(cond, left, scratchRegister);
    405     }
    406 
    407     DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
    408     {
    409         DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
    410         storePtr(scratchRegister, address);
    411         return label;
    412     }
    413 
    414     Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
    415     {
    416         Label label(this);
    417         loadPtr(address, dest);
    418         return label;
    419     }
    420 
    421     bool supportsFloatingPoint() const { return true; }
    422     // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
    423     bool supportsFloatingPointTruncate() const { return true; }
    424 
    425 private:
    426     friend class LinkBuffer;
    427     friend class RepatchBuffer;
    428 
    429     static void linkCall(void* code, Call call, FunctionPtr function)
    430     {
    431         if (!call.isFlagSet(Call::Near))
    432             X86Assembler::linkPointer(code, X86Assembler::labelFor(call.m_jmp, -REPTACH_OFFSET_CALL_R11), function.value());
    433         else
    434             X86Assembler::linkCall(code, call.m_jmp, function.value());
    435     }
    436 
    437     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
    438     {
    439         X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
    440     }
    441 
    442     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
    443     {
    444         X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
    445     }
    446 
    447 };
    448 
    449 } // namespace JSC
    450 
    451 #endif // ENABLE(ASSEMBLER)
    452 
    453 #endif // MacroAssemblerX86_64_h
    454