Home | History | Annotate | Download | only in assembler
      1 /*
      2  * Copyright (C) 2008 Apple Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     24  */
     25 
     26 #ifndef X86Assembler_h
     27 #define X86Assembler_h
     28 
     29 #include <wtf/Platform.h>
     30 
     31 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
     32 
     33 #include "AssemblerBuffer.h"
     34 #include <stdint.h>
     35 #include <wtf/Assertions.h>
     36 #include <wtf/Vector.h>
     37 
     38 namespace JSC {
     39 
     40 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
     41 
     42 namespace X86Registers {
     43     typedef enum {
     44         eax,
     45         ecx,
     46         edx,
     47         ebx,
     48         esp,
     49         ebp,
     50         esi,
     51         edi,
     52 
     53 #if CPU(X86_64)
     54         r8,
     55         r9,
     56         r10,
     57         r11,
     58         r12,
     59         r13,
     60         r14,
     61         r15,
     62 #endif
     63     } RegisterID;
     64 
     65     typedef enum {
     66         xmm0,
     67         xmm1,
     68         xmm2,
     69         xmm3,
     70         xmm4,
     71         xmm5,
     72         xmm6,
     73         xmm7,
     74     } XMMRegisterID;
     75 }
     76 
     77 class X86Assembler {
     78 public:
     79     typedef X86Registers::RegisterID RegisterID;
     80     typedef X86Registers::XMMRegisterID XMMRegisterID;
     81     typedef XMMRegisterID FPRegisterID;
     82 
     83     typedef enum {
     84         ConditionO,
     85         ConditionNO,
     86         ConditionB,
     87         ConditionAE,
     88         ConditionE,
     89         ConditionNE,
     90         ConditionBE,
     91         ConditionA,
     92         ConditionS,
     93         ConditionNS,
     94         ConditionP,
     95         ConditionNP,
     96         ConditionL,
     97         ConditionGE,
     98         ConditionLE,
     99         ConditionG,
    100 
    101         ConditionC  = ConditionB,
    102         ConditionNC = ConditionAE,
    103     } Condition;
    104 
    105 private:
    106     typedef enum {
    107         OP_ADD_EvGv                     = 0x01,
    108         OP_ADD_GvEv                     = 0x03,
    109         OP_OR_EvGv                      = 0x09,
    110         OP_OR_GvEv                      = 0x0B,
    111         OP_2BYTE_ESCAPE                 = 0x0F,
    112         OP_AND_EvGv                     = 0x21,
    113         OP_AND_GvEv                     = 0x23,
    114         OP_SUB_EvGv                     = 0x29,
    115         OP_SUB_GvEv                     = 0x2B,
    116         PRE_PREDICT_BRANCH_NOT_TAKEN    = 0x2E,
    117         OP_XOR_EvGv                     = 0x31,
    118         OP_XOR_GvEv                     = 0x33,
    119         OP_CMP_EvGv                     = 0x39,
    120         OP_CMP_GvEv                     = 0x3B,
    121 #if CPU(X86_64)
    122         PRE_REX                         = 0x40,
    123 #endif
    124         OP_PUSH_EAX                     = 0x50,
    125         OP_POP_EAX                      = 0x58,
    126 #if CPU(X86_64)
    127         OP_MOVSXD_GvEv                  = 0x63,
    128 #endif
    129         PRE_OPERAND_SIZE                = 0x66,
    130         PRE_SSE_66                      = 0x66,
    131         OP_PUSH_Iz                      = 0x68,
    132         OP_IMUL_GvEvIz                  = 0x69,
    133         OP_GROUP1_EvIz                  = 0x81,
    134         OP_GROUP1_EvIb                  = 0x83,
    135         OP_TEST_EvGv                    = 0x85,
    136         OP_XCHG_EvGv                    = 0x87,
    137         OP_MOV_EvGv                     = 0x89,
    138         OP_MOV_GvEv                     = 0x8B,
    139         OP_LEA                          = 0x8D,
    140         OP_GROUP1A_Ev                   = 0x8F,
    141         OP_CDQ                          = 0x99,
    142         OP_MOV_EAXOv                    = 0xA1,
    143         OP_MOV_OvEAX                    = 0xA3,
    144         OP_MOV_EAXIv                    = 0xB8,
    145         OP_GROUP2_EvIb                  = 0xC1,
    146         OP_RET                          = 0xC3,
    147         OP_GROUP11_EvIz                 = 0xC7,
    148         OP_INT3                         = 0xCC,
    149         OP_GROUP2_Ev1                   = 0xD1,
    150         OP_GROUP2_EvCL                  = 0xD3,
    151         OP_CALL_rel32                   = 0xE8,
    152         OP_JMP_rel32                    = 0xE9,
    153         PRE_SSE_F2                      = 0xF2,
    154         OP_HLT                          = 0xF4,
    155         OP_GROUP3_EbIb                  = 0xF6,
    156         OP_GROUP3_Ev                    = 0xF7,
    157         OP_GROUP3_EvIz                  = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
    158         OP_GROUP5_Ev                    = 0xFF,
    159     } OneByteOpcodeID;
    160 
    161     typedef enum {
    162         OP2_MOVSD_VsdWsd    = 0x10,
    163         OP2_MOVSD_WsdVsd    = 0x11,
    164         OP2_CVTSI2SD_VsdEd  = 0x2A,
    165         OP2_CVTTSD2SI_GdWsd = 0x2C,
    166         OP2_UCOMISD_VsdWsd  = 0x2E,
    167         OP2_ADDSD_VsdWsd    = 0x58,
    168         OP2_MULSD_VsdWsd    = 0x59,
    169         OP2_SUBSD_VsdWsd    = 0x5C,
    170         OP2_DIVSD_VsdWsd    = 0x5E,
    171         OP2_XORPD_VpdWpd    = 0x57,
    172         OP2_MOVD_VdEd       = 0x6E,
    173         OP2_MOVD_EdVd       = 0x7E,
    174         OP2_JCC_rel32       = 0x80,
    175         OP_SETCC            = 0x90,
    176         OP2_IMUL_GvEv       = 0xAF,
    177         OP2_MOVZX_GvEb      = 0xB6,
    178         OP2_MOVZX_GvEw      = 0xB7,
    179         OP2_PEXTRW_GdUdIb   = 0xC5,
    180     } TwoByteOpcodeID;
    181 
    182     TwoByteOpcodeID jccRel32(Condition cond)
    183     {
    184         return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
    185     }
    186 
    187     TwoByteOpcodeID setccOpcode(Condition cond)
    188     {
    189         return (TwoByteOpcodeID)(OP_SETCC + cond);
    190     }
    191 
    192     typedef enum {
    193         GROUP1_OP_ADD = 0,
    194         GROUP1_OP_OR  = 1,
    195         GROUP1_OP_ADC = 2,
    196         GROUP1_OP_AND = 4,
    197         GROUP1_OP_SUB = 5,
    198         GROUP1_OP_XOR = 6,
    199         GROUP1_OP_CMP = 7,
    200 
    201         GROUP1A_OP_POP = 0,
    202 
    203         GROUP2_OP_SHL = 4,
    204         GROUP2_OP_SAR = 7,
    205 
    206         GROUP3_OP_TEST = 0,
    207         GROUP3_OP_NOT  = 2,
    208         GROUP3_OP_NEG  = 3,
    209         GROUP3_OP_IDIV = 7,
    210 
    211         GROUP5_OP_CALLN = 2,
    212         GROUP5_OP_JMPN  = 4,
    213         GROUP5_OP_PUSH  = 6,
    214 
    215         GROUP11_MOV = 0,
    216     } GroupOpcodeID;
    217 
    218     class X86InstructionFormatter;
    219 public:
    220 
    221     class JmpSrc {
    222         friend class X86Assembler;
    223         friend class X86InstructionFormatter;
    224     public:
    225         JmpSrc()
    226             : m_offset(-1)
    227         {
    228         }
    229 
    230     private:
    231         JmpSrc(int offset)
    232             : m_offset(offset)
    233         {
    234         }
    235 
    236         int m_offset;
    237     };
    238 
    239     class JmpDst {
    240         friend class X86Assembler;
    241         friend class X86InstructionFormatter;
    242     public:
    243         JmpDst()
    244             : m_offset(-1)
    245             , m_used(false)
    246         {
    247         }
    248 
    249         bool isUsed() const { return m_used; }
    250         void used() { m_used = true; }
    251     private:
    252         JmpDst(int offset)
    253             : m_offset(offset)
    254             , m_used(false)
    255         {
    256             ASSERT(m_offset == offset);
    257         }
    258 
    259         int m_offset : 31;
    260         bool m_used : 1;
    261     };
    262 
    263     X86Assembler()
    264     {
    265     }
    266 
    267     size_t size() const { return m_formatter.size(); }
    268 
    269     // Stack operations:
    270 
    271     void push_r(RegisterID reg)
    272     {
    273         m_formatter.oneByteOp(OP_PUSH_EAX, reg);
    274     }
    275 
    276     void pop_r(RegisterID reg)
    277     {
    278         m_formatter.oneByteOp(OP_POP_EAX, reg);
    279     }
    280 
    281     void push_i32(int imm)
    282     {
    283         m_formatter.oneByteOp(OP_PUSH_Iz);
    284         m_formatter.immediate32(imm);
    285     }
    286 
    287     void push_m(int offset, RegisterID base)
    288     {
    289         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
    290     }
    291 
    292     void pop_m(int offset, RegisterID base)
    293     {
    294         m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
    295     }
    296 
    297     // Arithmetic operations:
    298 
    299 #if !CPU(X86_64)
    300     void adcl_im(int imm, void* addr)
    301     {
    302         if (CAN_SIGN_EXTEND_8_32(imm)) {
    303             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
    304             m_formatter.immediate8(imm);
    305         } else {
    306             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
    307             m_formatter.immediate32(imm);
    308         }
    309     }
    310 #endif
    311 
    312     void addl_rr(RegisterID src, RegisterID dst)
    313     {
    314         m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
    315     }
    316 
    317     void addl_mr(int offset, RegisterID base, RegisterID dst)
    318     {
    319         m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
    320     }
    321 
    322     void addl_rm(RegisterID src, int offset, RegisterID base)
    323     {
    324         m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
    325     }
    326 
    327     void addl_ir(int imm, RegisterID dst)
    328     {
    329         if (CAN_SIGN_EXTEND_8_32(imm)) {
    330             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
    331             m_formatter.immediate8(imm);
    332         } else {
    333             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
    334             m_formatter.immediate32(imm);
    335         }
    336     }
    337 
    338     void addl_im(int imm, int offset, RegisterID base)
    339     {
    340         if (CAN_SIGN_EXTEND_8_32(imm)) {
    341             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
    342             m_formatter.immediate8(imm);
    343         } else {
    344             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
    345             m_formatter.immediate32(imm);
    346         }
    347     }
    348 
    349 #if CPU(X86_64)
    350     void addq_rr(RegisterID src, RegisterID dst)
    351     {
    352         m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
    353     }
    354 
    355     void addq_ir(int imm, RegisterID dst)
    356     {
    357         if (CAN_SIGN_EXTEND_8_32(imm)) {
    358             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
    359             m_formatter.immediate8(imm);
    360         } else {
    361             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
    362             m_formatter.immediate32(imm);
    363         }
    364     }
    365 
    366     void addq_im(int imm, int offset, RegisterID base)
    367     {
    368         if (CAN_SIGN_EXTEND_8_32(imm)) {
    369             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
    370             m_formatter.immediate8(imm);
    371         } else {
    372             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
    373             m_formatter.immediate32(imm);
    374         }
    375     }
    376 #else
    377     void addl_im(int imm, void* addr)
    378     {
    379         if (CAN_SIGN_EXTEND_8_32(imm)) {
    380             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
    381             m_formatter.immediate8(imm);
    382         } else {
    383             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
    384             m_formatter.immediate32(imm);
    385         }
    386     }
    387 #endif
    388 
    389     void andl_rr(RegisterID src, RegisterID dst)
    390     {
    391         m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
    392     }
    393 
    394     void andl_mr(int offset, RegisterID base, RegisterID dst)
    395     {
    396         m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
    397     }
    398 
    399     void andl_rm(RegisterID src, int offset, RegisterID base)
    400     {
    401         m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
    402     }
    403 
    404     void andl_ir(int imm, RegisterID dst)
    405     {
    406         if (CAN_SIGN_EXTEND_8_32(imm)) {
    407             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
    408             m_formatter.immediate8(imm);
    409         } else {
    410             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
    411             m_formatter.immediate32(imm);
    412         }
    413     }
    414 
    415     void andl_im(int imm, int offset, RegisterID base)
    416     {
    417         if (CAN_SIGN_EXTEND_8_32(imm)) {
    418             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
    419             m_formatter.immediate8(imm);
    420         } else {
    421             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
    422             m_formatter.immediate32(imm);
    423         }
    424     }
    425 
    426 #if CPU(X86_64)
    427     void andq_rr(RegisterID src, RegisterID dst)
    428     {
    429         m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
    430     }
    431 
    432     void andq_ir(int imm, RegisterID dst)
    433     {
    434         if (CAN_SIGN_EXTEND_8_32(imm)) {
    435             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
    436             m_formatter.immediate8(imm);
    437         } else {
    438             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
    439             m_formatter.immediate32(imm);
    440         }
    441     }
    442 #else
    443     void andl_im(int imm, void* addr)
    444     {
    445         if (CAN_SIGN_EXTEND_8_32(imm)) {
    446             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
    447             m_formatter.immediate8(imm);
    448         } else {
    449             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
    450             m_formatter.immediate32(imm);
    451         }
    452     }
    453 #endif
    454 
    455     void negl_r(RegisterID dst)
    456     {
    457         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
    458     }
    459 
    460     void negl_m(int offset, RegisterID base)
    461     {
    462         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
    463     }
    464 
    465     void notl_r(RegisterID dst)
    466     {
    467         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
    468     }
    469 
    470     void notl_m(int offset, RegisterID base)
    471     {
    472         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
    473     }
    474 
    475     void orl_rr(RegisterID src, RegisterID dst)
    476     {
    477         m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
    478     }
    479 
    480     void orl_mr(int offset, RegisterID base, RegisterID dst)
    481     {
    482         m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
    483     }
    484 
    485     void orl_rm(RegisterID src, int offset, RegisterID base)
    486     {
    487         m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
    488     }
    489 
    490     void orl_ir(int imm, RegisterID dst)
    491     {
    492         if (CAN_SIGN_EXTEND_8_32(imm)) {
    493             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
    494             m_formatter.immediate8(imm);
    495         } else {
    496             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
    497             m_formatter.immediate32(imm);
    498         }
    499     }
    500 
    501     void orl_im(int imm, int offset, RegisterID base)
    502     {
    503         if (CAN_SIGN_EXTEND_8_32(imm)) {
    504             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
    505             m_formatter.immediate8(imm);
    506         } else {
    507             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
    508             m_formatter.immediate32(imm);
    509         }
    510     }
    511 
    512 #if CPU(X86_64)
    513     void orq_rr(RegisterID src, RegisterID dst)
    514     {
    515         m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
    516     }
    517 
    518     void orq_ir(int imm, RegisterID dst)
    519     {
    520         if (CAN_SIGN_EXTEND_8_32(imm)) {
    521             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
    522             m_formatter.immediate8(imm);
    523         } else {
    524             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
    525             m_formatter.immediate32(imm);
    526         }
    527     }
    528 #else
    529     void orl_im(int imm, void* addr)
    530     {
    531         if (CAN_SIGN_EXTEND_8_32(imm)) {
    532             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
    533             m_formatter.immediate8(imm);
    534         } else {
    535             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
    536             m_formatter.immediate32(imm);
    537         }
    538     }
    539 #endif
    540 
    541     void subl_rr(RegisterID src, RegisterID dst)
    542     {
    543         m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
    544     }
    545 
    546     void subl_mr(int offset, RegisterID base, RegisterID dst)
    547     {
    548         m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
    549     }
    550 
    551     void subl_rm(RegisterID src, int offset, RegisterID base)
    552     {
    553         m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
    554     }
    555 
    556     void subl_ir(int imm, RegisterID dst)
    557     {
    558         if (CAN_SIGN_EXTEND_8_32(imm)) {
    559             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
    560             m_formatter.immediate8(imm);
    561         } else {
    562             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
    563             m_formatter.immediate32(imm);
    564         }
    565     }
    566 
    567     void subl_im(int imm, int offset, RegisterID base)
    568     {
    569         if (CAN_SIGN_EXTEND_8_32(imm)) {
    570             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
    571             m_formatter.immediate8(imm);
    572         } else {
    573             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
    574             m_formatter.immediate32(imm);
    575         }
    576     }
    577 
    578 #if CPU(X86_64)
    579     void subq_rr(RegisterID src, RegisterID dst)
    580     {
    581         m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
    582     }
    583 
    584     void subq_ir(int imm, RegisterID dst)
    585     {
    586         if (CAN_SIGN_EXTEND_8_32(imm)) {
    587             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
    588             m_formatter.immediate8(imm);
    589         } else {
    590             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
    591             m_formatter.immediate32(imm);
    592         }
    593     }
    594 #else
    595     void subl_im(int imm, void* addr)
    596     {
    597         if (CAN_SIGN_EXTEND_8_32(imm)) {
    598             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
    599             m_formatter.immediate8(imm);
    600         } else {
    601             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
    602             m_formatter.immediate32(imm);
    603         }
    604     }
    605 #endif
    606 
    607     void xorl_rr(RegisterID src, RegisterID dst)
    608     {
    609         m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
    610     }
    611 
    612     void xorl_mr(int offset, RegisterID base, RegisterID dst)
    613     {
    614         m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
    615     }
    616 
    617     void xorl_rm(RegisterID src, int offset, RegisterID base)
    618     {
    619         m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
    620     }
    621 
    622     void xorl_im(int imm, int offset, RegisterID base)
    623     {
    624         if (CAN_SIGN_EXTEND_8_32(imm)) {
    625             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
    626             m_formatter.immediate8(imm);
    627         } else {
    628             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
    629             m_formatter.immediate32(imm);
    630         }
    631     }
    632 
    633     void xorl_ir(int imm, RegisterID dst)
    634     {
    635         if (CAN_SIGN_EXTEND_8_32(imm)) {
    636             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
    637             m_formatter.immediate8(imm);
    638         } else {
    639             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
    640             m_formatter.immediate32(imm);
    641         }
    642     }
    643 
    644 #if CPU(X86_64)
    645     void xorq_rr(RegisterID src, RegisterID dst)
    646     {
    647         m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
    648     }
    649 
    650     void xorq_ir(int imm, RegisterID dst)
    651     {
    652         if (CAN_SIGN_EXTEND_8_32(imm)) {
    653             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
    654             m_formatter.immediate8(imm);
    655         } else {
    656             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
    657             m_formatter.immediate32(imm);
    658         }
    659     }
    660 #endif
    661 
    662     void sarl_i8r(int imm, RegisterID dst)
    663     {
    664         if (imm == 1)
    665             m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
    666         else {
    667             m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
    668             m_formatter.immediate8(imm);
    669         }
    670     }
    671 
    672     void sarl_CLr(RegisterID dst)
    673     {
    674         m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
    675     }
    676 
    677     void shll_i8r(int imm, RegisterID dst)
    678     {
    679         if (imm == 1)
    680             m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
    681         else {
    682             m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
    683             m_formatter.immediate8(imm);
    684         }
    685     }
    686 
    687     void shll_CLr(RegisterID dst)
    688     {
    689         m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
    690     }
    691 
    692 #if CPU(X86_64)
    693     void sarq_CLr(RegisterID dst)
    694     {
    695         m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
    696     }
    697 
    698     void sarq_i8r(int imm, RegisterID dst)
    699     {
    700         if (imm == 1)
    701             m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
    702         else {
    703             m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
    704             m_formatter.immediate8(imm);
    705         }
    706     }
    707 #endif
    708 
    709     void imull_rr(RegisterID src, RegisterID dst)
    710     {
    711         m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
    712     }
    713 
    714     void imull_mr(int offset, RegisterID base, RegisterID dst)
    715     {
    716         m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
    717     }
    718 
    719     void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
    720     {
    721         m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
    722         m_formatter.immediate32(value);
    723     }
    724 
    725     void idivl_r(RegisterID dst)
    726     {
    727         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
    728     }
    729 
    730     // Comparisons:
    731 
    732     void cmpl_rr(RegisterID src, RegisterID dst)
    733     {
    734         m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
    735     }
    736 
    737     void cmpl_rm(RegisterID src, int offset, RegisterID base)
    738     {
    739         m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
    740     }
    741 
    742     void cmpl_mr(int offset, RegisterID base, RegisterID src)
    743     {
    744         m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
    745     }
    746 
    747     void cmpl_ir(int imm, RegisterID dst)
    748     {
    749         if (CAN_SIGN_EXTEND_8_32(imm)) {
    750             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
    751             m_formatter.immediate8(imm);
    752         } else {
    753             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
    754             m_formatter.immediate32(imm);
    755         }
    756     }
    757 
    758     void cmpl_ir_force32(int imm, RegisterID dst)
    759     {
    760         m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
    761         m_formatter.immediate32(imm);
    762     }
    763 
    764     void cmpl_im(int imm, int offset, RegisterID base)
    765     {
    766         if (CAN_SIGN_EXTEND_8_32(imm)) {
    767             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
    768             m_formatter.immediate8(imm);
    769         } else {
    770             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
    771             m_formatter.immediate32(imm);
    772         }
    773     }
    774 
    775     void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
    776     {
    777         if (CAN_SIGN_EXTEND_8_32(imm)) {
    778             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
    779             m_formatter.immediate8(imm);
    780         } else {
    781             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
    782             m_formatter.immediate32(imm);
    783         }
    784     }
    785 
    786     void cmpl_im_force32(int imm, int offset, RegisterID base)
    787     {
    788         m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
    789         m_formatter.immediate32(imm);
    790     }
    791 
    792 #if CPU(X86_64)
    793     void cmpq_rr(RegisterID src, RegisterID dst)
    794     {
    795         m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
    796     }
    797 
    798     void cmpq_rm(RegisterID src, int offset, RegisterID base)
    799     {
    800         m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
    801     }
    802 
    803     void cmpq_mr(int offset, RegisterID base, RegisterID src)
    804     {
    805         m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
    806     }
    807 
    808     void cmpq_ir(int imm, RegisterID dst)
    809     {
    810         if (CAN_SIGN_EXTEND_8_32(imm)) {
    811             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
    812             m_formatter.immediate8(imm);
    813         } else {
    814             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
    815             m_formatter.immediate32(imm);
    816         }
    817     }
    818 
    819     void cmpq_im(int imm, int offset, RegisterID base)
    820     {
    821         if (CAN_SIGN_EXTEND_8_32(imm)) {
    822             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
    823             m_formatter.immediate8(imm);
    824         } else {
    825             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
    826             m_formatter.immediate32(imm);
    827         }
    828     }
    829 
    830     void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
    831     {
    832         if (CAN_SIGN_EXTEND_8_32(imm)) {
    833             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
    834             m_formatter.immediate8(imm);
    835         } else {
    836             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
    837             m_formatter.immediate32(imm);
    838         }
    839     }
    840 #else
    841     void cmpl_rm(RegisterID reg, void* addr)
    842     {
    843         m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
    844     }
    845 
    846     void cmpl_im(int imm, void* addr)
    847     {
    848         if (CAN_SIGN_EXTEND_8_32(imm)) {
    849             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
    850             m_formatter.immediate8(imm);
    851         } else {
    852             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
    853             m_formatter.immediate32(imm);
    854         }
    855     }
    856 #endif
    857 
    858     void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
    859     {
    860         m_formatter.prefix(PRE_OPERAND_SIZE);
    861         m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
    862     }
    863 
    864     void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
    865     {
    866         if (CAN_SIGN_EXTEND_8_32(imm)) {
    867             m_formatter.prefix(PRE_OPERAND_SIZE);
    868             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
    869             m_formatter.immediate8(imm);
    870         } else {
    871             m_formatter.prefix(PRE_OPERAND_SIZE);
    872             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
    873             m_formatter.immediate16(imm);
    874         }
    875     }
    876 
    877     void testl_rr(RegisterID src, RegisterID dst)
    878     {
    879         m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
    880     }
    881 
    882     void testl_i32r(int imm, RegisterID dst)
    883     {
    884         m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
    885         m_formatter.immediate32(imm);
    886     }
    887 
    888     void testl_i32m(int imm, int offset, RegisterID base)
    889     {
    890         m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
    891         m_formatter.immediate32(imm);
    892     }
    893 
    894     void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
    895     {
    896         m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
    897         m_formatter.immediate32(imm);
    898     }
    899 
    900 #if CPU(X86_64)
    901     void testq_rr(RegisterID src, RegisterID dst)
    902     {
    903         m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
    904     }
    905 
    906     void testq_i32r(int imm, RegisterID dst)
    907     {
    908         m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
    909         m_formatter.immediate32(imm);
    910     }
    911 
    912     void testq_i32m(int imm, int offset, RegisterID base)
    913     {
    914         m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
    915         m_formatter.immediate32(imm);
    916     }
    917 
    918     void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
    919     {
    920         m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
    921         m_formatter.immediate32(imm);
    922     }
    923 #endif
    924 
    925     void testw_rr(RegisterID src, RegisterID dst)
    926     {
    927         m_formatter.prefix(PRE_OPERAND_SIZE);
    928         m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
    929     }
    930 
    931     void testb_i8r(int imm, RegisterID dst)
    932     {
    933         m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
    934         m_formatter.immediate8(imm);
    935     }
    936 
    937     void setCC_r(Condition cond, RegisterID dst)
    938     {
    939         m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
    940     }
    941 
    942     void sete_r(RegisterID dst)
    943     {
    944         m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
    945     }
    946 
    947     void setz_r(RegisterID dst)
    948     {
    949         sete_r(dst);
    950     }
    951 
    952     void setne_r(RegisterID dst)
    953     {
    954         m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
    955     }
    956 
    957     void setnz_r(RegisterID dst)
    958     {
    959         setne_r(dst);
    960     }
    961 
    962     // Various move ops:
    963 
    964     void cdq()
    965     {
    966         m_formatter.oneByteOp(OP_CDQ);
    967     }
    968 
    969     void xchgl_rr(RegisterID src, RegisterID dst)
    970     {
    971         m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
    972     }
    973 
    974 #if CPU(X86_64)
    975     void xchgq_rr(RegisterID src, RegisterID dst)
    976     {
    977         m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
    978     }
    979 #endif
    980 
    981     void movl_rr(RegisterID src, RegisterID dst)
    982     {
    983         m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
    984     }
    985 
    986     void movl_rm(RegisterID src, int offset, RegisterID base)
    987     {
    988         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
    989     }
    990 
    991     void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
    992     {
    993         m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
    994     }
    995 
    996     void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
    997     {
    998         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
    999     }
   1000 
   1001     void movl_mEAX(void* addr)
   1002     {
   1003         m_formatter.oneByteOp(OP_MOV_EAXOv);
   1004 #if CPU(X86_64)
   1005         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
   1006 #else
   1007         m_formatter.immediate32(reinterpret_cast<int>(addr));
   1008 #endif
   1009     }
   1010 
   1011     void movl_mr(int offset, RegisterID base, RegisterID dst)
   1012     {
   1013         m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
   1014     }
   1015 
   1016     void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
   1017     {
   1018         m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
   1019     }
   1020 
   1021     void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
   1022     {
   1023         m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
   1024     }
   1025 
   1026     void movl_i32r(int imm, RegisterID dst)
   1027     {
   1028         m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
   1029         m_formatter.immediate32(imm);
   1030     }
   1031 
   1032     void movl_i32m(int imm, int offset, RegisterID base)
   1033     {
   1034         m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
   1035         m_formatter.immediate32(imm);
   1036     }
   1037 
   1038     void movl_EAXm(void* addr)
   1039     {
   1040         m_formatter.oneByteOp(OP_MOV_OvEAX);
   1041 #if CPU(X86_64)
   1042         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
   1043 #else
   1044         m_formatter.immediate32(reinterpret_cast<int>(addr));
   1045 #endif
   1046     }
   1047 
   1048 #if CPU(X86_64)
   1049     void movq_rr(RegisterID src, RegisterID dst)
   1050     {
   1051         m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
   1052     }
   1053 
   1054     void movq_rm(RegisterID src, int offset, RegisterID base)
   1055     {
   1056         m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
   1057     }
   1058 
   1059     void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
   1060     {
   1061         m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
   1062     }
   1063 
   1064     void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
   1065     {
   1066         m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
   1067     }
   1068 
   1069     void movq_mEAX(void* addr)
   1070     {
   1071         m_formatter.oneByteOp64(OP_MOV_EAXOv);
   1072         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
   1073     }
   1074 
   1075     void movq_EAXm(void* addr)
   1076     {
   1077         m_formatter.oneByteOp64(OP_MOV_OvEAX);
   1078         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
   1079     }
   1080 
   1081     void movq_mr(int offset, RegisterID base, RegisterID dst)
   1082     {
   1083         m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
   1084     }
   1085 
   1086     void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
   1087     {
   1088         m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
   1089     }
   1090 
   1091     void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
   1092     {
   1093         m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
   1094     }
   1095 
   1096     void movq_i32m(int imm, int offset, RegisterID base)
   1097     {
   1098         m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
   1099         m_formatter.immediate32(imm);
   1100     }
   1101 
   1102     void movq_i64r(int64_t imm, RegisterID dst)
   1103     {
   1104         m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
   1105         m_formatter.immediate64(imm);
   1106     }
   1107 
   1108     void movsxd_rr(RegisterID src, RegisterID dst)
   1109     {
   1110         m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
   1111     }
   1112 
   1113 
   1114 #else
   1115     void movl_rm(RegisterID src, void* addr)
   1116     {
   1117         if (src == X86Registers::eax)
   1118             movl_EAXm(addr);
   1119         else
   1120             m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
   1121     }
   1122 
   1123     void movl_mr(void* addr, RegisterID dst)
   1124     {
   1125         if (dst == X86Registers::eax)
   1126             movl_mEAX(addr);
   1127         else
   1128             m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
   1129     }
   1130 
   1131     void movl_i32m(int imm, void* addr)
   1132     {
   1133         m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
   1134         m_formatter.immediate32(imm);
   1135     }
   1136 #endif
   1137 
   1138     void movzwl_mr(int offset, RegisterID base, RegisterID dst)
   1139     {
   1140         m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
   1141     }
   1142 
   1143     void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
   1144     {
   1145         m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
   1146     }
   1147 
   1148     void movzbl_rr(RegisterID src, RegisterID dst)
   1149     {
   1150         // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
   1151         // is in the range ESP-EDI, and the src would not have required a REX).  Unneeded
   1152         // REX prefixes are defined to be silently ignored by the processor.
   1153         m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
   1154     }
   1155 
   1156     void leal_mr(int offset, RegisterID base, RegisterID dst)
   1157     {
   1158         m_formatter.oneByteOp(OP_LEA, dst, base, offset);
   1159     }
   1160 #if CPU(X86_64)
   1161     void leaq_mr(int offset, RegisterID base, RegisterID dst)
   1162     {
   1163         m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
   1164     }
   1165 #endif
   1166 
   1167     // Flow control:
   1168 
   1169     JmpSrc call()
   1170     {
   1171         m_formatter.oneByteOp(OP_CALL_rel32);
   1172         return m_formatter.immediateRel32();
   1173     }
   1174 
   1175     JmpSrc call(RegisterID dst)
   1176     {
   1177         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
   1178         return JmpSrc(m_formatter.size());
   1179     }
   1180 
   1181     void call_m(int offset, RegisterID base)
   1182     {
   1183         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
   1184     }
   1185 
   1186     JmpSrc jmp()
   1187     {
   1188         m_formatter.oneByteOp(OP_JMP_rel32);
   1189         return m_formatter.immediateRel32();
   1190     }
   1191 
   1192     // Return a JmpSrc so we have a label to the jump, so we can use this
   1193     // To make a tail recursive call on x86-64.  The MacroAssembler
   1194     // really shouldn't wrap this as a Jump, since it can't be linked. :-/
   1195     JmpSrc jmp_r(RegisterID dst)
   1196     {
   1197         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
   1198         return JmpSrc(m_formatter.size());
   1199     }
   1200 
   1201     void jmp_m(int offset, RegisterID base)
   1202     {
   1203         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
   1204     }
   1205 
   1206     JmpSrc jne()
   1207     {
   1208         m_formatter.twoByteOp(jccRel32(ConditionNE));
   1209         return m_formatter.immediateRel32();
   1210     }
   1211 
   1212     JmpSrc jnz()
   1213     {
   1214         return jne();
   1215     }
   1216 
   1217     JmpSrc je()
   1218     {
   1219         m_formatter.twoByteOp(jccRel32(ConditionE));
   1220         return m_formatter.immediateRel32();
   1221     }
   1222 
   1223     JmpSrc jz()
   1224     {
   1225         return je();
   1226     }
   1227 
   1228     JmpSrc jl()
   1229     {
   1230         m_formatter.twoByteOp(jccRel32(ConditionL));
   1231         return m_formatter.immediateRel32();
   1232     }
   1233 
   1234     JmpSrc jb()
   1235     {
   1236         m_formatter.twoByteOp(jccRel32(ConditionB));
   1237         return m_formatter.immediateRel32();
   1238     }
   1239 
   1240     JmpSrc jle()
   1241     {
   1242         m_formatter.twoByteOp(jccRel32(ConditionLE));
   1243         return m_formatter.immediateRel32();
   1244     }
   1245 
   1246     JmpSrc jbe()
   1247     {
   1248         m_formatter.twoByteOp(jccRel32(ConditionBE));
   1249         return m_formatter.immediateRel32();
   1250     }
   1251 
   1252     JmpSrc jge()
   1253     {
   1254         m_formatter.twoByteOp(jccRel32(ConditionGE));
   1255         return m_formatter.immediateRel32();
   1256     }
   1257 
   1258     JmpSrc jg()
   1259     {
   1260         m_formatter.twoByteOp(jccRel32(ConditionG));
   1261         return m_formatter.immediateRel32();
   1262     }
   1263 
   1264     JmpSrc ja()
   1265     {
   1266         m_formatter.twoByteOp(jccRel32(ConditionA));
   1267         return m_formatter.immediateRel32();
   1268     }
   1269 
   1270     JmpSrc jae()
   1271     {
   1272         m_formatter.twoByteOp(jccRel32(ConditionAE));
   1273         return m_formatter.immediateRel32();
   1274     }
   1275 
   1276     JmpSrc jo()
   1277     {
   1278         m_formatter.twoByteOp(jccRel32(ConditionO));
   1279         return m_formatter.immediateRel32();
   1280     }
   1281 
   1282     JmpSrc jp()
   1283     {
   1284         m_formatter.twoByteOp(jccRel32(ConditionP));
   1285         return m_formatter.immediateRel32();
   1286     }
   1287 
   1288     JmpSrc js()
   1289     {
   1290         m_formatter.twoByteOp(jccRel32(ConditionS));
   1291         return m_formatter.immediateRel32();
   1292     }
   1293 
   1294     JmpSrc jCC(Condition cond)
   1295     {
   1296         m_formatter.twoByteOp(jccRel32(cond));
   1297         return m_formatter.immediateRel32();
   1298     }
   1299 
   1300     // SSE operations:
   1301 
   1302     void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1303     {
   1304         m_formatter.prefix(PRE_SSE_F2);
   1305         m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1306     }
   1307 
   1308     void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1309     {
   1310         m_formatter.prefix(PRE_SSE_F2);
   1311         m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
   1312     }
   1313 
   1314     void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
   1315     {
   1316         m_formatter.prefix(PRE_SSE_F2);
   1317         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
   1318     }
   1319 
   1320     void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1321     {
   1322         m_formatter.prefix(PRE_SSE_F2);
   1323         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
   1324     }
   1325 
   1326 #if !CPU(X86_64)
   1327     void cvtsi2sd_mr(void* address, XMMRegisterID dst)
   1328     {
   1329         m_formatter.prefix(PRE_SSE_F2);
   1330         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
   1331     }
   1332 #endif
   1333 
   1334     void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
   1335     {
   1336         m_formatter.prefix(PRE_SSE_F2);
   1337         m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
   1338     }
   1339 
   1340     void movd_rr(XMMRegisterID src, RegisterID dst)
   1341     {
   1342         m_formatter.prefix(PRE_SSE_66);
   1343         m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
   1344     }
   1345 
   1346 #if CPU(X86_64)
   1347     void movq_rr(XMMRegisterID src, RegisterID dst)
   1348     {
   1349         m_formatter.prefix(PRE_SSE_66);
   1350         m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
   1351     }
   1352 
   1353     void movq_rr(RegisterID src, XMMRegisterID dst)
   1354     {
   1355         m_formatter.prefix(PRE_SSE_66);
   1356         m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
   1357     }
   1358 #endif
   1359 
   1360     void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
   1361     {
   1362         m_formatter.prefix(PRE_SSE_F2);
   1363         m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
   1364     }
   1365 
   1366     void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1367     {
   1368         m_formatter.prefix(PRE_SSE_F2);
   1369         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
   1370     }
   1371 
   1372 #if !CPU(X86_64)
   1373     void movsd_mr(void* address, XMMRegisterID dst)
   1374     {
   1375         m_formatter.prefix(PRE_SSE_F2);
   1376         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
   1377     }
   1378 #endif
   1379 
   1380     void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1381     {
   1382         m_formatter.prefix(PRE_SSE_F2);
   1383         m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1384     }
   1385 
   1386     void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1387     {
   1388         m_formatter.prefix(PRE_SSE_F2);
   1389         m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
   1390     }
   1391 
   1392     void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
   1393     {
   1394         m_formatter.prefix(PRE_SSE_66);
   1395         m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
   1396         m_formatter.immediate8(whichWord);
   1397     }
   1398 
   1399     void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1400     {
   1401         m_formatter.prefix(PRE_SSE_F2);
   1402         m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1403     }
   1404 
   1405     void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1406     {
   1407         m_formatter.prefix(PRE_SSE_F2);
   1408         m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
   1409     }
   1410 
   1411     void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
   1412     {
   1413         m_formatter.prefix(PRE_SSE_66);
   1414         m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1415     }
   1416 
   1417     void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1418     {
   1419         m_formatter.prefix(PRE_SSE_66);
   1420         m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
   1421     }
   1422 
   1423     void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1424     {
   1425         m_formatter.prefix(PRE_SSE_F2);
   1426         m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1427     }
   1428 
   1429     void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1430     {
   1431         m_formatter.prefix(PRE_SSE_F2);
   1432         m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
   1433     }
   1434 
   1435     void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
   1436     {
   1437         m_formatter.prefix(PRE_SSE_66);
   1438         m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
   1439     }
   1440 
   1441     // Misc instructions:
   1442 
   1443     void int3()
   1444     {
   1445         m_formatter.oneByteOp(OP_INT3);
   1446     }
   1447 
   1448     void ret()
   1449     {
   1450         m_formatter.oneByteOp(OP_RET);
   1451     }
   1452 
   1453     void predictNotTaken()
   1454     {
   1455         m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
   1456     }
   1457 
   1458     // Assembler admin methods:
   1459 
   1460     JmpDst label()
   1461     {
   1462         return JmpDst(m_formatter.size());
   1463     }
   1464 
   1465     static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
   1466     {
   1467         return JmpDst(jump.m_offset + offset);
   1468     }
   1469 
   1470     JmpDst align(int alignment)
   1471     {
   1472         while (!m_formatter.isAligned(alignment))
   1473             m_formatter.oneByteOp(OP_HLT);
   1474 
   1475         return label();
   1476     }
   1477 
   1478     // Linking & patching:
   1479     //
   1480     // 'link' and 'patch' methods are for use on unprotected code - such as the code
   1481     // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
   1482     // code has been finalized it is (platform support permitting) within a non-
   1483     // writable region of memory; to modify the code in an execute-only execuable
   1484     // pool the 'repatch' and 'relink' methods should be used.
   1485 
   1486     void linkJump(JmpSrc from, JmpDst to)
   1487     {
   1488         ASSERT(from.m_offset != -1);
   1489         ASSERT(to.m_offset != -1);
   1490 
   1491         char* code = reinterpret_cast<char*>(m_formatter.data());
   1492         setRel32(code + from.m_offset, code + to.m_offset);
   1493     }
   1494 
   1495     static void linkJump(void* code, JmpSrc from, void* to)
   1496     {
   1497         ASSERT(from.m_offset != -1);
   1498 
   1499         setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
   1500     }
   1501 
   1502     static void linkCall(void* code, JmpSrc from, void* to)
   1503     {
   1504         ASSERT(from.m_offset != -1);
   1505 
   1506         setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
   1507     }
   1508 
   1509     static void linkPointer(void* code, JmpDst where, void* value)
   1510     {
   1511         ASSERT(where.m_offset != -1);
   1512 
   1513         setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
   1514     }
   1515 
   1516     static void relinkJump(void* from, void* to)
   1517     {
   1518         setRel32(from, to);
   1519     }
   1520 
   1521     static void relinkCall(void* from, void* to)
   1522     {
   1523         setRel32(from, to);
   1524     }
   1525 
   1526     static void repatchInt32(void* where, int32_t value)
   1527     {
   1528         setInt32(where, value);
   1529     }
   1530 
   1531     static void repatchPointer(void* where, void* value)
   1532     {
   1533         setPointer(where, value);
   1534     }
   1535 
   1536     static void repatchLoadPtrToLEA(void* where)
   1537     {
   1538 #if CPU(X86_64)
   1539         // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
   1540         // Skip over the prefix byte.
   1541         where = reinterpret_cast<char*>(where) + 1;
   1542 #endif
   1543         *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
   1544     }
   1545 
   1546     static unsigned getCallReturnOffset(JmpSrc call)
   1547     {
   1548         ASSERT(call.m_offset >= 0);
   1549         return call.m_offset;
   1550     }
   1551 
   1552     static void* getRelocatedAddress(void* code, JmpSrc jump)
   1553     {
   1554         ASSERT(jump.m_offset != -1);
   1555 
   1556         return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
   1557     }
   1558 
   1559     static void* getRelocatedAddress(void* code, JmpDst destination)
   1560     {
   1561         ASSERT(destination.m_offset != -1);
   1562 
   1563         return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
   1564     }
   1565 
   1566     static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
   1567     {
   1568         return dst.m_offset - src.m_offset;
   1569     }
   1570 
   1571     static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
   1572     {
   1573         return dst.m_offset - src.m_offset;
   1574     }
   1575 
   1576     static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
   1577     {
   1578         return dst.m_offset - src.m_offset;
   1579     }
   1580 
   1581     void* executableCopy(ExecutablePool* allocator)
   1582     {
   1583         void* copy = m_formatter.executableCopy(allocator);
   1584         ASSERT(copy);
   1585         return copy;
   1586     }
   1587 
   1588 private:
   1589 
   1590     static void setPointer(void* where, void* value)
   1591     {
   1592         reinterpret_cast<void**>(where)[-1] = value;
   1593     }
   1594 
   1595     static void setInt32(void* where, int32_t value)
   1596     {
   1597         reinterpret_cast<int32_t*>(where)[-1] = value;
   1598     }
   1599 
   1600     static void setRel32(void* from, void* to)
   1601     {
   1602         intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
   1603         ASSERT(offset == static_cast<int32_t>(offset));
   1604 
   1605         setInt32(from, offset);
   1606     }
   1607 
   1608     class X86InstructionFormatter {
   1609 
   1610         static const int maxInstructionSize = 16;
   1611 
   1612     public:
   1613 
   1614         // Legacy prefix bytes:
   1615         //
   1616         // These are emmitted prior to the instruction.
   1617 
   1618         void prefix(OneByteOpcodeID pre)
   1619         {
   1620             m_buffer.putByte(pre);
   1621         }
   1622 
   1623         // Word-sized operands / no operand instruction formatters.
   1624         //
   1625         // In addition to the opcode, the following operand permutations are supported:
   1626         //   * None - instruction takes no operands.
   1627         //   * One register - the low three bits of the RegisterID are added into the opcode.
   1628         //   * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
   1629         //   * Three argument ModRM - a register, and a register and an offset describing a memory operand.
   1630         //   * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
   1631         //
   1632         // For 32-bit x86 targets, the address operand may also be provided as a void*.
   1633         // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
   1634         //
   1635         // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
   1636 
   1637         void oneByteOp(OneByteOpcodeID opcode)
   1638         {
   1639             m_buffer.ensureSpace(maxInstructionSize);
   1640             m_buffer.putByteUnchecked(opcode);
   1641         }
   1642 
   1643         void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
   1644         {
   1645             m_buffer.ensureSpace(maxInstructionSize);
   1646             emitRexIfNeeded(0, 0, reg);
   1647             m_buffer.putByteUnchecked(opcode + (reg & 7));
   1648         }
   1649 
   1650         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
   1651         {
   1652             m_buffer.ensureSpace(maxInstructionSize);
   1653             emitRexIfNeeded(reg, 0, rm);
   1654             m_buffer.putByteUnchecked(opcode);
   1655             registerModRM(reg, rm);
   1656         }
   1657 
   1658         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1659         {
   1660             m_buffer.ensureSpace(maxInstructionSize);
   1661             emitRexIfNeeded(reg, 0, base);
   1662             m_buffer.putByteUnchecked(opcode);
   1663             memoryModRM(reg, base, offset);
   1664         }
   1665 
   1666         void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1667         {
   1668             m_buffer.ensureSpace(maxInstructionSize);
   1669             emitRexIfNeeded(reg, 0, base);
   1670             m_buffer.putByteUnchecked(opcode);
   1671             memoryModRM_disp32(reg, base, offset);
   1672         }
   1673 
   1674         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
   1675         {
   1676             m_buffer.ensureSpace(maxInstructionSize);
   1677             emitRexIfNeeded(reg, index, base);
   1678             m_buffer.putByteUnchecked(opcode);
   1679             memoryModRM(reg, base, index, scale, offset);
   1680         }
   1681 
   1682 #if !CPU(X86_64)
   1683         void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
   1684         {
   1685             m_buffer.ensureSpace(maxInstructionSize);
   1686             m_buffer.putByteUnchecked(opcode);
   1687             memoryModRM(reg, address);
   1688         }
   1689 #endif
   1690 
   1691         void twoByteOp(TwoByteOpcodeID opcode)
   1692         {
   1693             m_buffer.ensureSpace(maxInstructionSize);
   1694             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1695             m_buffer.putByteUnchecked(opcode);
   1696         }
   1697 
   1698         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
   1699         {
   1700             m_buffer.ensureSpace(maxInstructionSize);
   1701             emitRexIfNeeded(reg, 0, rm);
   1702             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1703             m_buffer.putByteUnchecked(opcode);
   1704             registerModRM(reg, rm);
   1705         }
   1706 
   1707         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1708         {
   1709             m_buffer.ensureSpace(maxInstructionSize);
   1710             emitRexIfNeeded(reg, 0, base);
   1711             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1712             m_buffer.putByteUnchecked(opcode);
   1713             memoryModRM(reg, base, offset);
   1714         }
   1715 
   1716         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
   1717         {
   1718             m_buffer.ensureSpace(maxInstructionSize);
   1719             emitRexIfNeeded(reg, index, base);
   1720             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1721             m_buffer.putByteUnchecked(opcode);
   1722             memoryModRM(reg, base, index, scale, offset);
   1723         }
   1724 
   1725 #if !CPU(X86_64)
   1726         void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
   1727         {
   1728             m_buffer.ensureSpace(maxInstructionSize);
   1729             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1730             m_buffer.putByteUnchecked(opcode);
   1731             memoryModRM(reg, address);
   1732         }
   1733 #endif
   1734 
   1735 #if CPU(X86_64)
   1736         // Quad-word-sized operands:
   1737         //
   1738         // Used to format 64-bit operantions, planting a REX.w prefix.
   1739         // When planting d64 or f64 instructions, not requiring a REX.w prefix,
   1740         // the normal (non-'64'-postfixed) formatters should be used.
   1741 
   1742         void oneByteOp64(OneByteOpcodeID opcode)
   1743         {
   1744             m_buffer.ensureSpace(maxInstructionSize);
   1745             emitRexW(0, 0, 0);
   1746             m_buffer.putByteUnchecked(opcode);
   1747         }
   1748 
   1749         void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
   1750         {
   1751             m_buffer.ensureSpace(maxInstructionSize);
   1752             emitRexW(0, 0, reg);
   1753             m_buffer.putByteUnchecked(opcode + (reg & 7));
   1754         }
   1755 
   1756         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
   1757         {
   1758             m_buffer.ensureSpace(maxInstructionSize);
   1759             emitRexW(reg, 0, rm);
   1760             m_buffer.putByteUnchecked(opcode);
   1761             registerModRM(reg, rm);
   1762         }
   1763 
   1764         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1765         {
   1766             m_buffer.ensureSpace(maxInstructionSize);
   1767             emitRexW(reg, 0, base);
   1768             m_buffer.putByteUnchecked(opcode);
   1769             memoryModRM(reg, base, offset);
   1770         }
   1771 
   1772         void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1773         {
   1774             m_buffer.ensureSpace(maxInstructionSize);
   1775             emitRexW(reg, 0, base);
   1776             m_buffer.putByteUnchecked(opcode);
   1777             memoryModRM_disp32(reg, base, offset);
   1778         }
   1779 
   1780         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
   1781         {
   1782             m_buffer.ensureSpace(maxInstructionSize);
   1783             emitRexW(reg, index, base);
   1784             m_buffer.putByteUnchecked(opcode);
   1785             memoryModRM(reg, base, index, scale, offset);
   1786         }
   1787 
   1788         void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
   1789         {
   1790             m_buffer.ensureSpace(maxInstructionSize);
   1791             emitRexW(reg, 0, rm);
   1792             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1793             m_buffer.putByteUnchecked(opcode);
   1794             registerModRM(reg, rm);
   1795         }
   1796 #endif
   1797 
   1798         // Byte-operands:
   1799         //
   1800         // These methods format byte operations.  Byte operations differ from the normal
   1801         // formatters in the circumstances under which they will decide to emit REX prefixes.
   1802         // These should be used where any register operand signifies a byte register.
   1803         //
   1804         // The disctinction is due to the handling of register numbers in the range 4..7 on
   1805         // x86-64.  These register numbers may either represent the second byte of the first
   1806         // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
   1807         //
   1808         // Since ah..bh cannot be used in all permutations of operands (specifically cannot
   1809         // be accessed where a REX prefix is present), these are likely best treated as
   1810         // deprecated.  In order to ensure the correct registers spl..dil are selected a
   1811         // REX prefix will be emitted for any byte register operand in the range 4..15.
   1812         //
   1813         // These formatters may be used in instructions where a mix of operand sizes, in which
   1814         // case an unnecessary REX will be emitted, for example:
   1815         //     movzbl %al, %edi
   1816         // In this case a REX will be planted since edi is 7 (and were this a byte operand
   1817         // a REX would be required to specify dil instead of bh).  Unneeded REX prefixes will
   1818         // be silently ignored by the processor.
   1819         //
   1820         // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
   1821         // is provided to check byte register operands.
   1822 
   1823         void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
   1824         {
   1825             m_buffer.ensureSpace(maxInstructionSize);
   1826             emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
   1827             m_buffer.putByteUnchecked(opcode);
   1828             registerModRM(groupOp, rm);
   1829         }
   1830 
   1831         void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
   1832         {
   1833             m_buffer.ensureSpace(maxInstructionSize);
   1834             emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
   1835             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1836             m_buffer.putByteUnchecked(opcode);
   1837             registerModRM(reg, rm);
   1838         }
   1839 
   1840         void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
   1841         {
   1842             m_buffer.ensureSpace(maxInstructionSize);
   1843             emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
   1844             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1845             m_buffer.putByteUnchecked(opcode);
   1846             registerModRM(groupOp, rm);
   1847         }
   1848 
   1849         // Immediates:
   1850         //
   1851         // An immedaite should be appended where appropriate after an op has been emitted.
   1852         // The writes are unchecked since the opcode formatters above will have ensured space.
   1853 
   1854         void immediate8(int imm)
   1855         {
   1856             m_buffer.putByteUnchecked(imm);
   1857         }
   1858 
   1859         void immediate16(int imm)
   1860         {
   1861             m_buffer.putShortUnchecked(imm);
   1862         }
   1863 
   1864         void immediate32(int imm)
   1865         {
   1866             m_buffer.putIntUnchecked(imm);
   1867         }
   1868 
   1869         void immediate64(int64_t imm)
   1870         {
   1871             m_buffer.putInt64Unchecked(imm);
   1872         }
   1873 
   1874         JmpSrc immediateRel32()
   1875         {
   1876             m_buffer.putIntUnchecked(0);
   1877             return JmpSrc(m_buffer.size());
   1878         }
   1879 
   1880         // Administrative methods:
   1881 
   1882         size_t size() const { return m_buffer.size(); }
   1883         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
   1884         void* data() const { return m_buffer.data(); }
   1885         void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
   1886 
   1887     private:
   1888 
   1889         // Internals; ModRm and REX formatters.
   1890 
   1891         static const RegisterID noBase = X86Registers::ebp;
   1892         static const RegisterID hasSib = X86Registers::esp;
   1893         static const RegisterID noIndex = X86Registers::esp;
   1894 #if CPU(X86_64)
   1895         static const RegisterID noBase2 = X86Registers::r13;
   1896         static const RegisterID hasSib2 = X86Registers::r12;
   1897 
   1898         // Registers r8 & above require a REX prefixe.
   1899         inline bool regRequiresRex(int reg)
   1900         {
   1901             return (reg >= X86Registers::r8);
   1902         }
   1903 
   1904         // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
   1905         inline bool byteRegRequiresRex(int reg)
   1906         {
   1907             return (reg >= X86Registers::esp);
   1908         }
   1909 
   1910         // Format a REX prefix byte.
   1911         inline void emitRex(bool w, int r, int x, int b)
   1912         {
   1913             m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
   1914         }
   1915 
   1916         // Used to plant a REX byte with REX.w set (for 64-bit operations).
   1917         inline void emitRexW(int r, int x, int b)
   1918         {
   1919             emitRex(true, r, x, b);
   1920         }
   1921 
   1922         // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
   1923         // regRequiresRex() to check other registers (i.e. address base & index).
   1924         inline void emitRexIf(bool condition, int r, int x, int b)
   1925         {
   1926             if (condition) emitRex(false, r, x, b);
   1927         }
   1928 
   1929         // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
   1930         inline void emitRexIfNeeded(int r, int x, int b)
   1931         {
   1932             emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
   1933         }
   1934 #else
   1935         // No REX prefix bytes on 32-bit x86.
   1936         inline bool regRequiresRex(int) { return false; }
   1937         inline bool byteRegRequiresRex(int) { return false; }
   1938         inline void emitRexIf(bool, int, int, int) {}
   1939         inline void emitRexIfNeeded(int, int, int) {}
   1940 #endif
   1941 
   1942         enum ModRmMode {
   1943             ModRmMemoryNoDisp,
   1944             ModRmMemoryDisp8,
   1945             ModRmMemoryDisp32,
   1946             ModRmRegister,
   1947         };
   1948 
   1949         void putModRm(ModRmMode mode, int reg, RegisterID rm)
   1950         {
   1951             m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
   1952         }
   1953 
   1954         void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
   1955         {
   1956             ASSERT(mode != ModRmRegister);
   1957 
   1958             putModRm(mode, reg, hasSib);
   1959             m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
   1960         }
   1961 
   1962         void registerModRM(int reg, RegisterID rm)
   1963         {
   1964             putModRm(ModRmRegister, reg, rm);
   1965         }
   1966 
   1967         void memoryModRM(int reg, RegisterID base, int offset)
   1968         {
   1969             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
   1970 #if CPU(X86_64)
   1971             if ((base == hasSib) || (base == hasSib2)) {
   1972 #else
   1973             if (base == hasSib) {
   1974 #endif
   1975                 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
   1976                     putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
   1977                 else if (CAN_SIGN_EXTEND_8_32(offset)) {
   1978                     putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
   1979                     m_buffer.putByteUnchecked(offset);
   1980                 } else {
   1981                     putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
   1982                     m_buffer.putIntUnchecked(offset);
   1983                 }
   1984             } else {
   1985 #if CPU(X86_64)
   1986                 if (!offset && (base != noBase) && (base != noBase2))
   1987 #else
   1988                 if (!offset && (base != noBase))
   1989 #endif
   1990                     putModRm(ModRmMemoryNoDisp, reg, base);
   1991                 else if (CAN_SIGN_EXTEND_8_32(offset)) {
   1992                     putModRm(ModRmMemoryDisp8, reg, base);
   1993                     m_buffer.putByteUnchecked(offset);
   1994                 } else {
   1995                     putModRm(ModRmMemoryDisp32, reg, base);
   1996                     m_buffer.putIntUnchecked(offset);
   1997                 }
   1998             }
   1999         }
   2000 
   2001         void memoryModRM_disp32(int reg, RegisterID base, int offset)
   2002         {
   2003             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
   2004 #if CPU(X86_64)
   2005             if ((base == hasSib) || (base == hasSib2)) {
   2006 #else
   2007             if (base == hasSib) {
   2008 #endif
   2009                 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
   2010                 m_buffer.putIntUnchecked(offset);
   2011             } else {
   2012                 putModRm(ModRmMemoryDisp32, reg, base);
   2013                 m_buffer.putIntUnchecked(offset);
   2014             }
   2015         }
   2016 
   2017         void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
   2018         {
   2019             ASSERT(index != noIndex);
   2020 
   2021 #if CPU(X86_64)
   2022             if (!offset && (base != noBase) && (base != noBase2))
   2023 #else
   2024             if (!offset && (base != noBase))
   2025 #endif
   2026                 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
   2027             else if (CAN_SIGN_EXTEND_8_32(offset)) {
   2028                 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
   2029                 m_buffer.putByteUnchecked(offset);
   2030             } else {
   2031                 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
   2032                 m_buffer.putIntUnchecked(offset);
   2033             }
   2034         }
   2035 
   2036 #if !CPU(X86_64)
   2037         void memoryModRM(int reg, void* address)
   2038         {
   2039             // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
   2040             putModRm(ModRmMemoryNoDisp, reg, noBase);
   2041             m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
   2042         }
   2043 #endif
   2044 
   2045         AssemblerBuffer m_buffer;
   2046     } m_formatter;
   2047 };
   2048 
   2049 } // namespace JSC
   2050 
   2051 #endif // ENABLE(ASSEMBLER) && CPU(X86)
   2052 
   2053 #endif // X86Assembler_h
   2054