Home | History | Annotate | Download | only in assembler
      1 /*
      2  * Copyright (C) 2008 Apple Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     24  */
     25 
     26 #ifndef X86Assembler_h
     27 #define X86Assembler_h
     28 
     29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
     30 
     31 #include "AssemblerBuffer.h"
     32 #include <stdint.h>
     33 #include <wtf/Assertions.h>
     34 #include <wtf/Vector.h>
     35 
     36 namespace JSC {
     37 
     38 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
     39 
     40 namespace X86Registers {
     41     typedef enum {
     42         eax,
     43         ecx,
     44         edx,
     45         ebx,
     46         esp,
     47         ebp,
     48         esi,
     49         edi,
     50 
     51 #if CPU(X86_64)
     52         r8,
     53         r9,
     54         r10,
     55         r11,
     56         r12,
     57         r13,
     58         r14,
     59         r15,
     60 #endif
     61     } RegisterID;
     62 
     63     typedef enum {
     64         xmm0,
     65         xmm1,
     66         xmm2,
     67         xmm3,
     68         xmm4,
     69         xmm5,
     70         xmm6,
     71         xmm7,
     72     } XMMRegisterID;
     73 }
     74 
     75 class X86Assembler {
     76 public:
     77     typedef X86Registers::RegisterID RegisterID;
     78     typedef X86Registers::XMMRegisterID XMMRegisterID;
     79     typedef XMMRegisterID FPRegisterID;
     80 
     81     typedef enum {
     82         ConditionO,
     83         ConditionNO,
     84         ConditionB,
     85         ConditionAE,
     86         ConditionE,
     87         ConditionNE,
     88         ConditionBE,
     89         ConditionA,
     90         ConditionS,
     91         ConditionNS,
     92         ConditionP,
     93         ConditionNP,
     94         ConditionL,
     95         ConditionGE,
     96         ConditionLE,
     97         ConditionG,
     98 
     99         ConditionC  = ConditionB,
    100         ConditionNC = ConditionAE,
    101     } Condition;
    102 
    103 private:
    104     typedef enum {
    105         OP_ADD_EvGv                     = 0x01,
    106         OP_ADD_GvEv                     = 0x03,
    107         OP_OR_EvGv                      = 0x09,
    108         OP_OR_GvEv                      = 0x0B,
    109         OP_2BYTE_ESCAPE                 = 0x0F,
    110         OP_AND_EvGv                     = 0x21,
    111         OP_AND_GvEv                     = 0x23,
    112         OP_SUB_EvGv                     = 0x29,
    113         OP_SUB_GvEv                     = 0x2B,
    114         PRE_PREDICT_BRANCH_NOT_TAKEN    = 0x2E,
    115         OP_XOR_EvGv                     = 0x31,
    116         OP_XOR_GvEv                     = 0x33,
    117         OP_CMP_EvGv                     = 0x39,
    118         OP_CMP_GvEv                     = 0x3B,
    119 #if CPU(X86_64)
    120         PRE_REX                         = 0x40,
    121 #endif
    122         OP_PUSH_EAX                     = 0x50,
    123         OP_POP_EAX                      = 0x58,
    124 #if CPU(X86_64)
    125         OP_MOVSXD_GvEv                  = 0x63,
    126 #endif
    127         PRE_OPERAND_SIZE                = 0x66,
    128         PRE_SSE_66                      = 0x66,
    129         OP_PUSH_Iz                      = 0x68,
    130         OP_IMUL_GvEvIz                  = 0x69,
    131         OP_GROUP1_EbIb                  = 0x80,
    132         OP_GROUP1_EvIz                  = 0x81,
    133         OP_GROUP1_EvIb                  = 0x83,
    134         OP_TEST_EbGb                    = 0x84,
    135         OP_TEST_EvGv                    = 0x85,
    136         OP_XCHG_EvGv                    = 0x87,
    137         OP_MOV_EvGv                     = 0x89,
    138         OP_MOV_GvEv                     = 0x8B,
    139         OP_LEA                          = 0x8D,
    140         OP_GROUP1A_Ev                   = 0x8F,
    141         OP_CDQ                          = 0x99,
    142         OP_MOV_EAXOv                    = 0xA1,
    143         OP_MOV_OvEAX                    = 0xA3,
    144         OP_MOV_EAXIv                    = 0xB8,
    145         OP_GROUP2_EvIb                  = 0xC1,
    146         OP_RET                          = 0xC3,
    147         OP_GROUP11_EvIz                 = 0xC7,
    148         OP_INT3                         = 0xCC,
    149         OP_GROUP2_Ev1                   = 0xD1,
    150         OP_GROUP2_EvCL                  = 0xD3,
    151         OP_CALL_rel32                   = 0xE8,
    152         OP_JMP_rel32                    = 0xE9,
    153         PRE_SSE_F2                      = 0xF2,
    154         OP_HLT                          = 0xF4,
    155         OP_GROUP3_EbIb                  = 0xF6,
    156         OP_GROUP3_Ev                    = 0xF7,
    157         OP_GROUP3_EvIz                  = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
    158         OP_GROUP5_Ev                    = 0xFF,
    159     } OneByteOpcodeID;
    160 
    161     typedef enum {
    162         OP2_MOVSD_VsdWsd    = 0x10,
    163         OP2_MOVSD_WsdVsd    = 0x11,
    164         OP2_CVTSI2SD_VsdEd  = 0x2A,
    165         OP2_CVTTSD2SI_GdWsd = 0x2C,
    166         OP2_UCOMISD_VsdWsd  = 0x2E,
    167         OP2_ADDSD_VsdWsd    = 0x58,
    168         OP2_MULSD_VsdWsd    = 0x59,
    169         OP2_SUBSD_VsdWsd    = 0x5C,
    170         OP2_DIVSD_VsdWsd    = 0x5E,
    171         OP2_SQRTSD_VsdWsd   = 0x51,
    172         OP2_XORPD_VpdWpd    = 0x57,
    173         OP2_MOVD_VdEd       = 0x6E,
    174         OP2_MOVD_EdVd       = 0x7E,
    175         OP2_JCC_rel32       = 0x80,
    176         OP_SETCC            = 0x90,
    177         OP2_IMUL_GvEv       = 0xAF,
    178         OP2_MOVZX_GvEb      = 0xB6,
    179         OP2_MOVZX_GvEw      = 0xB7,
    180         OP2_PEXTRW_GdUdIb   = 0xC5,
    181     } TwoByteOpcodeID;
    182 
    183     TwoByteOpcodeID jccRel32(Condition cond)
    184     {
    185         return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
    186     }
    187 
    188     TwoByteOpcodeID setccOpcode(Condition cond)
    189     {
    190         return (TwoByteOpcodeID)(OP_SETCC + cond);
    191     }
    192 
    193     typedef enum {
    194         GROUP1_OP_ADD = 0,
    195         GROUP1_OP_OR  = 1,
    196         GROUP1_OP_ADC = 2,
    197         GROUP1_OP_AND = 4,
    198         GROUP1_OP_SUB = 5,
    199         GROUP1_OP_XOR = 6,
    200         GROUP1_OP_CMP = 7,
    201 
    202         GROUP1A_OP_POP = 0,
    203 
    204         GROUP2_OP_SHL = 4,
    205         GROUP2_OP_SHR = 5,
    206         GROUP2_OP_SAR = 7,
    207 
    208         GROUP3_OP_TEST = 0,
    209         GROUP3_OP_NOT  = 2,
    210         GROUP3_OP_NEG  = 3,
    211         GROUP3_OP_IDIV = 7,
    212 
    213         GROUP5_OP_CALLN = 2,
    214         GROUP5_OP_JMPN  = 4,
    215         GROUP5_OP_PUSH  = 6,
    216 
    217         GROUP11_MOV = 0,
    218     } GroupOpcodeID;
    219 
    220     class X86InstructionFormatter;
    221 public:
    222 
    223     class JmpSrc {
    224         friend class X86Assembler;
    225         friend class X86InstructionFormatter;
    226     public:
    227         JmpSrc()
    228             : m_offset(-1)
    229         {
    230         }
    231 
    232         bool isSet() const { return (m_offset != -1); }
    233 
    234     private:
    235         JmpSrc(int offset)
    236             : m_offset(offset)
    237         {
    238         }
    239 
    240         int m_offset;
    241     };
    242 
    243     class JmpDst {
    244         friend class X86Assembler;
    245         friend class X86InstructionFormatter;
    246     public:
    247         JmpDst()
    248             : m_offset(-1)
    249             , m_used(false)
    250         {
    251         }
    252 
    253         bool isUsed() const { return m_used; }
    254         bool isSet() const { return (m_offset != -1); }
    255         void used() { m_used = true; }
    256     private:
    257         JmpDst(int offset)
    258             : m_offset(offset)
    259             , m_used(false)
    260         {
    261             ASSERT(m_offset == offset);
    262         }
    263 
    264         int m_offset : 31;
    265         bool m_used : 1;
    266     };
    267 
    268     X86Assembler()
    269     {
    270     }
    271 
    272     size_t size() const { return m_formatter.size(); }
    273 
    274     // Stack operations:
    275 
    276     void push_r(RegisterID reg)
    277     {
    278         m_formatter.oneByteOp(OP_PUSH_EAX, reg);
    279     }
    280 
    281     void pop_r(RegisterID reg)
    282     {
    283         m_formatter.oneByteOp(OP_POP_EAX, reg);
    284     }
    285 
    286     void push_i32(int imm)
    287     {
    288         m_formatter.oneByteOp(OP_PUSH_Iz);
    289         m_formatter.immediate32(imm);
    290     }
    291 
    292     void push_m(int offset, RegisterID base)
    293     {
    294         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
    295     }
    296 
    297     void pop_m(int offset, RegisterID base)
    298     {
    299         m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
    300     }
    301 
    302     // Arithmetic operations:
    303 
    304 #if !CPU(X86_64)
    305     void adcl_im(int imm, const void* addr)
    306     {
    307         if (CAN_SIGN_EXTEND_8_32(imm)) {
    308             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
    309             m_formatter.immediate8(imm);
    310         } else {
    311             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
    312             m_formatter.immediate32(imm);
    313         }
    314     }
    315 #endif
    316 
    317     void addl_rr(RegisterID src, RegisterID dst)
    318     {
    319         m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
    320     }
    321 
    322     void addl_mr(int offset, RegisterID base, RegisterID dst)
    323     {
    324         m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
    325     }
    326 
    327     void addl_rm(RegisterID src, int offset, RegisterID base)
    328     {
    329         m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
    330     }
    331 
    332     void addl_ir(int imm, RegisterID dst)
    333     {
    334         if (CAN_SIGN_EXTEND_8_32(imm)) {
    335             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
    336             m_formatter.immediate8(imm);
    337         } else {
    338             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
    339             m_formatter.immediate32(imm);
    340         }
    341     }
    342 
    343     void addl_im(int imm, int offset, RegisterID base)
    344     {
    345         if (CAN_SIGN_EXTEND_8_32(imm)) {
    346             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
    347             m_formatter.immediate8(imm);
    348         } else {
    349             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
    350             m_formatter.immediate32(imm);
    351         }
    352     }
    353 
    354 #if CPU(X86_64)
    355     void addq_rr(RegisterID src, RegisterID dst)
    356     {
    357         m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
    358     }
    359 
    360     void addq_ir(int imm, RegisterID dst)
    361     {
    362         if (CAN_SIGN_EXTEND_8_32(imm)) {
    363             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
    364             m_formatter.immediate8(imm);
    365         } else {
    366             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
    367             m_formatter.immediate32(imm);
    368         }
    369     }
    370 
    371     void addq_im(int imm, int offset, RegisterID base)
    372     {
    373         if (CAN_SIGN_EXTEND_8_32(imm)) {
    374             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
    375             m_formatter.immediate8(imm);
    376         } else {
    377             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
    378             m_formatter.immediate32(imm);
    379         }
    380     }
    381 #else
    382     void addl_im(int imm, const void* addr)
    383     {
    384         if (CAN_SIGN_EXTEND_8_32(imm)) {
    385             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
    386             m_formatter.immediate8(imm);
    387         } else {
    388             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
    389             m_formatter.immediate32(imm);
    390         }
    391     }
    392 #endif
    393 
    394     void andl_rr(RegisterID src, RegisterID dst)
    395     {
    396         m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
    397     }
    398 
    399     void andl_mr(int offset, RegisterID base, RegisterID dst)
    400     {
    401         m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
    402     }
    403 
    404     void andl_rm(RegisterID src, int offset, RegisterID base)
    405     {
    406         m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
    407     }
    408 
    409     void andl_ir(int imm, RegisterID dst)
    410     {
    411         if (CAN_SIGN_EXTEND_8_32(imm)) {
    412             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
    413             m_formatter.immediate8(imm);
    414         } else {
    415             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
    416             m_formatter.immediate32(imm);
    417         }
    418     }
    419 
    420     void andl_im(int imm, int offset, RegisterID base)
    421     {
    422         if (CAN_SIGN_EXTEND_8_32(imm)) {
    423             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
    424             m_formatter.immediate8(imm);
    425         } else {
    426             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
    427             m_formatter.immediate32(imm);
    428         }
    429     }
    430 
    431 #if CPU(X86_64)
    432     void andq_rr(RegisterID src, RegisterID dst)
    433     {
    434         m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
    435     }
    436 
    437     void andq_ir(int imm, RegisterID dst)
    438     {
    439         if (CAN_SIGN_EXTEND_8_32(imm)) {
    440             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
    441             m_formatter.immediate8(imm);
    442         } else {
    443             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
    444             m_formatter.immediate32(imm);
    445         }
    446     }
    447 #else
    448     void andl_im(int imm, const void* addr)
    449     {
    450         if (CAN_SIGN_EXTEND_8_32(imm)) {
    451             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
    452             m_formatter.immediate8(imm);
    453         } else {
    454             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
    455             m_formatter.immediate32(imm);
    456         }
    457     }
    458 #endif
    459 
    460     void negl_r(RegisterID dst)
    461     {
    462         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
    463     }
    464 
    465     void negl_m(int offset, RegisterID base)
    466     {
    467         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
    468     }
    469 
    470     void notl_r(RegisterID dst)
    471     {
    472         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
    473     }
    474 
    475     void notl_m(int offset, RegisterID base)
    476     {
    477         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
    478     }
    479 
    480     void orl_rr(RegisterID src, RegisterID dst)
    481     {
    482         m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
    483     }
    484 
    485     void orl_mr(int offset, RegisterID base, RegisterID dst)
    486     {
    487         m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
    488     }
    489 
    490     void orl_rm(RegisterID src, int offset, RegisterID base)
    491     {
    492         m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
    493     }
    494 
    495     void orl_ir(int imm, RegisterID dst)
    496     {
    497         if (CAN_SIGN_EXTEND_8_32(imm)) {
    498             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
    499             m_formatter.immediate8(imm);
    500         } else {
    501             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
    502             m_formatter.immediate32(imm);
    503         }
    504     }
    505 
    506     void orl_im(int imm, int offset, RegisterID base)
    507     {
    508         if (CAN_SIGN_EXTEND_8_32(imm)) {
    509             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
    510             m_formatter.immediate8(imm);
    511         } else {
    512             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
    513             m_formatter.immediate32(imm);
    514         }
    515     }
    516 
    517 #if CPU(X86_64)
    518     void orq_rr(RegisterID src, RegisterID dst)
    519     {
    520         m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
    521     }
    522 
    523     void orq_ir(int imm, RegisterID dst)
    524     {
    525         if (CAN_SIGN_EXTEND_8_32(imm)) {
    526             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
    527             m_formatter.immediate8(imm);
    528         } else {
    529             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
    530             m_formatter.immediate32(imm);
    531         }
    532     }
    533 #else
    534     void orl_im(int imm, const void* addr)
    535     {
    536         if (CAN_SIGN_EXTEND_8_32(imm)) {
    537             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
    538             m_formatter.immediate8(imm);
    539         } else {
    540             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
    541             m_formatter.immediate32(imm);
    542         }
    543     }
    544 #endif
    545 
    546     void subl_rr(RegisterID src, RegisterID dst)
    547     {
    548         m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
    549     }
    550 
    551     void subl_mr(int offset, RegisterID base, RegisterID dst)
    552     {
    553         m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
    554     }
    555 
    556     void subl_rm(RegisterID src, int offset, RegisterID base)
    557     {
    558         m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
    559     }
    560 
    561     void subl_ir(int imm, RegisterID dst)
    562     {
    563         if (CAN_SIGN_EXTEND_8_32(imm)) {
    564             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
    565             m_formatter.immediate8(imm);
    566         } else {
    567             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
    568             m_formatter.immediate32(imm);
    569         }
    570     }
    571 
    572     void subl_im(int imm, int offset, RegisterID base)
    573     {
    574         if (CAN_SIGN_EXTEND_8_32(imm)) {
    575             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
    576             m_formatter.immediate8(imm);
    577         } else {
    578             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
    579             m_formatter.immediate32(imm);
    580         }
    581     }
    582 
    583 #if CPU(X86_64)
    584     void subq_rr(RegisterID src, RegisterID dst)
    585     {
    586         m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
    587     }
    588 
    589     void subq_ir(int imm, RegisterID dst)
    590     {
    591         if (CAN_SIGN_EXTEND_8_32(imm)) {
    592             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
    593             m_formatter.immediate8(imm);
    594         } else {
    595             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
    596             m_formatter.immediate32(imm);
    597         }
    598     }
    599 #else
    600     void subl_im(int imm, const void* addr)
    601     {
    602         if (CAN_SIGN_EXTEND_8_32(imm)) {
    603             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
    604             m_formatter.immediate8(imm);
    605         } else {
    606             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
    607             m_formatter.immediate32(imm);
    608         }
    609     }
    610 #endif
    611 
    612     void xorl_rr(RegisterID src, RegisterID dst)
    613     {
    614         m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
    615     }
    616 
    617     void xorl_mr(int offset, RegisterID base, RegisterID dst)
    618     {
    619         m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
    620     }
    621 
    622     void xorl_rm(RegisterID src, int offset, RegisterID base)
    623     {
    624         m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
    625     }
    626 
    627     void xorl_im(int imm, int offset, RegisterID base)
    628     {
    629         if (CAN_SIGN_EXTEND_8_32(imm)) {
    630             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
    631             m_formatter.immediate8(imm);
    632         } else {
    633             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
    634             m_formatter.immediate32(imm);
    635         }
    636     }
    637 
    638     void xorl_ir(int imm, RegisterID dst)
    639     {
    640         if (CAN_SIGN_EXTEND_8_32(imm)) {
    641             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
    642             m_formatter.immediate8(imm);
    643         } else {
    644             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
    645             m_formatter.immediate32(imm);
    646         }
    647     }
    648 
    649 #if CPU(X86_64)
    650     void xorq_rr(RegisterID src, RegisterID dst)
    651     {
    652         m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
    653     }
    654 
    655     void xorq_ir(int imm, RegisterID dst)
    656     {
    657         if (CAN_SIGN_EXTEND_8_32(imm)) {
    658             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
    659             m_formatter.immediate8(imm);
    660         } else {
    661             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
    662             m_formatter.immediate32(imm);
    663         }
    664     }
    665 #endif
    666 
    667     void sarl_i8r(int imm, RegisterID dst)
    668     {
    669         if (imm == 1)
    670             m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
    671         else {
    672             m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
    673             m_formatter.immediate8(imm);
    674         }
    675     }
    676 
    677     void sarl_CLr(RegisterID dst)
    678     {
    679         m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
    680     }
    681 
    682     void shrl_i8r(int imm, RegisterID dst)
    683     {
    684         if (imm == 1)
    685             m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
    686         else {
    687             m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
    688             m_formatter.immediate8(imm);
    689         }
    690     }
    691 
    692     void shrl_CLr(RegisterID dst)
    693     {
    694         m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
    695     }
    696 
    697     void shll_i8r(int imm, RegisterID dst)
    698     {
    699         if (imm == 1)
    700             m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
    701         else {
    702             m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
    703             m_formatter.immediate8(imm);
    704         }
    705     }
    706 
    707     void shll_CLr(RegisterID dst)
    708     {
    709         m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
    710     }
    711 
    712 #if CPU(X86_64)
    713     void sarq_CLr(RegisterID dst)
    714     {
    715         m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
    716     }
    717 
    718     void sarq_i8r(int imm, RegisterID dst)
    719     {
    720         if (imm == 1)
    721             m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
    722         else {
    723             m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
    724             m_formatter.immediate8(imm);
    725         }
    726     }
    727 #endif
    728 
    729     void imull_rr(RegisterID src, RegisterID dst)
    730     {
    731         m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
    732     }
    733 
    734     void imull_mr(int offset, RegisterID base, RegisterID dst)
    735     {
    736         m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
    737     }
    738 
    739     void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
    740     {
    741         m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
    742         m_formatter.immediate32(value);
    743     }
    744 
    745     void idivl_r(RegisterID dst)
    746     {
    747         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
    748     }
    749 
    750     // Comparisons:
    751 
    752     void cmpl_rr(RegisterID src, RegisterID dst)
    753     {
    754         m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
    755     }
    756 
    757     void cmpl_rm(RegisterID src, int offset, RegisterID base)
    758     {
    759         m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
    760     }
    761 
    762     void cmpl_mr(int offset, RegisterID base, RegisterID src)
    763     {
    764         m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
    765     }
    766 
    767     void cmpl_ir(int imm, RegisterID dst)
    768     {
    769         if (CAN_SIGN_EXTEND_8_32(imm)) {
    770             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
    771             m_formatter.immediate8(imm);
    772         } else {
    773             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
    774             m_formatter.immediate32(imm);
    775         }
    776     }
    777 
    778     void cmpl_ir_force32(int imm, RegisterID dst)
    779     {
    780         m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
    781         m_formatter.immediate32(imm);
    782     }
    783 
    784     void cmpl_im(int imm, int offset, RegisterID base)
    785     {
    786         if (CAN_SIGN_EXTEND_8_32(imm)) {
    787             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
    788             m_formatter.immediate8(imm);
    789         } else {
    790             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
    791             m_formatter.immediate32(imm);
    792         }
    793     }
    794 
    795     void cmpb_im(int imm, int offset, RegisterID base)
    796     {
    797         m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
    798         m_formatter.immediate8(imm);
    799     }
    800 
    801     void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
    802     {
    803         m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
    804         m_formatter.immediate8(imm);
    805     }
    806 
    807     void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
    808     {
    809         if (CAN_SIGN_EXTEND_8_32(imm)) {
    810             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
    811             m_formatter.immediate8(imm);
    812         } else {
    813             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
    814             m_formatter.immediate32(imm);
    815         }
    816     }
    817 
    818     void cmpl_im_force32(int imm, int offset, RegisterID base)
    819     {
    820         m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
    821         m_formatter.immediate32(imm);
    822     }
    823 
    824 #if CPU(X86_64)
    825     void cmpq_rr(RegisterID src, RegisterID dst)
    826     {
    827         m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
    828     }
    829 
    830     void cmpq_rm(RegisterID src, int offset, RegisterID base)
    831     {
    832         m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
    833     }
    834 
    835     void cmpq_mr(int offset, RegisterID base, RegisterID src)
    836     {
    837         m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
    838     }
    839 
    840     void cmpq_ir(int imm, RegisterID dst)
    841     {
    842         if (CAN_SIGN_EXTEND_8_32(imm)) {
    843             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
    844             m_formatter.immediate8(imm);
    845         } else {
    846             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
    847             m_formatter.immediate32(imm);
    848         }
    849     }
    850 
    851     void cmpq_im(int imm, int offset, RegisterID base)
    852     {
    853         if (CAN_SIGN_EXTEND_8_32(imm)) {
    854             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
    855             m_formatter.immediate8(imm);
    856         } else {
    857             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
    858             m_formatter.immediate32(imm);
    859         }
    860     }
    861 
    862     void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
    863     {
    864         if (CAN_SIGN_EXTEND_8_32(imm)) {
    865             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
    866             m_formatter.immediate8(imm);
    867         } else {
    868             m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
    869             m_formatter.immediate32(imm);
    870         }
    871     }
    872 #else
    873     void cmpl_rm(RegisterID reg, const void* addr)
    874     {
    875         m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
    876     }
    877 
    878     void cmpl_im(int imm, const void* addr)
    879     {
    880         if (CAN_SIGN_EXTEND_8_32(imm)) {
    881             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
    882             m_formatter.immediate8(imm);
    883         } else {
    884             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
    885             m_formatter.immediate32(imm);
    886         }
    887     }
    888 #endif
    889 
    890     void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
    891     {
    892         m_formatter.prefix(PRE_OPERAND_SIZE);
    893         m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
    894     }
    895 
    896     void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
    897     {
    898         if (CAN_SIGN_EXTEND_8_32(imm)) {
    899             m_formatter.prefix(PRE_OPERAND_SIZE);
    900             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
    901             m_formatter.immediate8(imm);
    902         } else {
    903             m_formatter.prefix(PRE_OPERAND_SIZE);
    904             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
    905             m_formatter.immediate16(imm);
    906         }
    907     }
    908 
    909     void testl_rr(RegisterID src, RegisterID dst)
    910     {
    911         m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
    912     }
    913 
    914     void testl_i32r(int imm, RegisterID dst)
    915     {
    916         m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
    917         m_formatter.immediate32(imm);
    918     }
    919 
    920     void testl_i32m(int imm, int offset, RegisterID base)
    921     {
    922         m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
    923         m_formatter.immediate32(imm);
    924     }
    925 
    926     void testb_rr(RegisterID src, RegisterID dst)
    927     {
    928         m_formatter.oneByteOp(OP_TEST_EbGb, src, dst);
    929     }
    930 
    931     void testb_im(int imm, int offset, RegisterID base)
    932     {
    933         m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
    934         m_formatter.immediate8(imm);
    935     }
    936 
    937     void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
    938     {
    939         m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
    940         m_formatter.immediate8(imm);
    941     }
    942 
    943     void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
    944     {
    945         m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
    946         m_formatter.immediate32(imm);
    947     }
    948 
    949 #if CPU(X86_64)
    950     void testq_rr(RegisterID src, RegisterID dst)
    951     {
    952         m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
    953     }
    954 
    955     void testq_i32r(int imm, RegisterID dst)
    956     {
    957         m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
    958         m_formatter.immediate32(imm);
    959     }
    960 
    961     void testq_i32m(int imm, int offset, RegisterID base)
    962     {
    963         m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
    964         m_formatter.immediate32(imm);
    965     }
    966 
    967     void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
    968     {
    969         m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
    970         m_formatter.immediate32(imm);
    971     }
    972 #endif
    973 
    974     void testw_rr(RegisterID src, RegisterID dst)
    975     {
    976         m_formatter.prefix(PRE_OPERAND_SIZE);
    977         m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
    978     }
    979 
    980     void testb_i8r(int imm, RegisterID dst)
    981     {
    982         m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
    983         m_formatter.immediate8(imm);
    984     }
    985 
    986     void setCC_r(Condition cond, RegisterID dst)
    987     {
    988         m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
    989     }
    990 
    991     void sete_r(RegisterID dst)
    992     {
    993         m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
    994     }
    995 
    996     void setz_r(RegisterID dst)
    997     {
    998         sete_r(dst);
    999     }
   1000 
   1001     void setne_r(RegisterID dst)
   1002     {
   1003         m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
   1004     }
   1005 
   1006     void setnz_r(RegisterID dst)
   1007     {
   1008         setne_r(dst);
   1009     }
   1010 
   1011     // Various move ops:
   1012 
   1013     void cdq()
   1014     {
   1015         m_formatter.oneByteOp(OP_CDQ);
   1016     }
   1017 
   1018     void xchgl_rr(RegisterID src, RegisterID dst)
   1019     {
   1020         m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
   1021     }
   1022 
   1023 #if CPU(X86_64)
   1024     void xchgq_rr(RegisterID src, RegisterID dst)
   1025     {
   1026         m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
   1027     }
   1028 #endif
   1029 
   1030     void movl_rr(RegisterID src, RegisterID dst)
   1031     {
   1032         m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
   1033     }
   1034 
   1035     void movl_rm(RegisterID src, int offset, RegisterID base)
   1036     {
   1037         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
   1038     }
   1039 
   1040     void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
   1041     {
   1042         m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
   1043     }
   1044 
   1045     void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
   1046     {
   1047         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
   1048     }
   1049 
   1050     void movl_mEAX(const void* addr)
   1051     {
   1052         m_formatter.oneByteOp(OP_MOV_EAXOv);
   1053 #if CPU(X86_64)
   1054         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
   1055 #else
   1056         m_formatter.immediate32(reinterpret_cast<int>(addr));
   1057 #endif
   1058     }
   1059 
   1060     void movl_mr(int offset, RegisterID base, RegisterID dst)
   1061     {
   1062         m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
   1063     }
   1064 
   1065     void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
   1066     {
   1067         m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
   1068     }
   1069 
   1070     void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
   1071     {
   1072         m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
   1073     }
   1074 
   1075     void movl_i32r(int imm, RegisterID dst)
   1076     {
   1077         m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
   1078         m_formatter.immediate32(imm);
   1079     }
   1080 
   1081     void movl_i32m(int imm, int offset, RegisterID base)
   1082     {
   1083         m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
   1084         m_formatter.immediate32(imm);
   1085     }
   1086 
   1087     void movl_EAXm(const void* addr)
   1088     {
   1089         m_formatter.oneByteOp(OP_MOV_OvEAX);
   1090 #if CPU(X86_64)
   1091         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
   1092 #else
   1093         m_formatter.immediate32(reinterpret_cast<int>(addr));
   1094 #endif
   1095     }
   1096 
   1097 #if CPU(X86_64)
   1098     void movq_rr(RegisterID src, RegisterID dst)
   1099     {
   1100         m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
   1101     }
   1102 
   1103     void movq_rm(RegisterID src, int offset, RegisterID base)
   1104     {
   1105         m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
   1106     }
   1107 
   1108     void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
   1109     {
   1110         m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
   1111     }
   1112 
   1113     void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
   1114     {
   1115         m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
   1116     }
   1117 
   1118     void movq_mEAX(const void* addr)
   1119     {
   1120         m_formatter.oneByteOp64(OP_MOV_EAXOv);
   1121         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
   1122     }
   1123 
   1124     void movq_EAXm(const void* addr)
   1125     {
   1126         m_formatter.oneByteOp64(OP_MOV_OvEAX);
   1127         m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
   1128     }
   1129 
   1130     void movq_mr(int offset, RegisterID base, RegisterID dst)
   1131     {
   1132         m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
   1133     }
   1134 
   1135     void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
   1136     {
   1137         m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
   1138     }
   1139 
   1140     void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
   1141     {
   1142         m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
   1143     }
   1144 
   1145     void movq_i32m(int imm, int offset, RegisterID base)
   1146     {
   1147         m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
   1148         m_formatter.immediate32(imm);
   1149     }
   1150 
   1151     void movq_i64r(int64_t imm, RegisterID dst)
   1152     {
   1153         m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
   1154         m_formatter.immediate64(imm);
   1155     }
   1156 
   1157     void movsxd_rr(RegisterID src, RegisterID dst)
   1158     {
   1159         m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
   1160     }
   1161 
   1162 
   1163 #else
   1164     void movl_rm(RegisterID src, const void* addr)
   1165     {
   1166         if (src == X86Registers::eax)
   1167             movl_EAXm(addr);
   1168         else
   1169             m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
   1170     }
   1171 
   1172     void movl_mr(const void* addr, RegisterID dst)
   1173     {
   1174         if (dst == X86Registers::eax)
   1175             movl_mEAX(addr);
   1176         else
   1177             m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
   1178     }
   1179 
   1180     void movl_i32m(int imm, const void* addr)
   1181     {
   1182         m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
   1183         m_formatter.immediate32(imm);
   1184     }
   1185 #endif
   1186 
   1187     void movzwl_mr(int offset, RegisterID base, RegisterID dst)
   1188     {
   1189         m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
   1190     }
   1191 
   1192     void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
   1193     {
   1194         m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
   1195     }
   1196 
   1197     void movzbl_rr(RegisterID src, RegisterID dst)
   1198     {
   1199         // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
   1200         // is in the range ESP-EDI, and the src would not have required a REX).  Unneeded
   1201         // REX prefixes are defined to be silently ignored by the processor.
   1202         m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
   1203     }
   1204 
   1205     void leal_mr(int offset, RegisterID base, RegisterID dst)
   1206     {
   1207         m_formatter.oneByteOp(OP_LEA, dst, base, offset);
   1208     }
   1209 #if CPU(X86_64)
   1210     void leaq_mr(int offset, RegisterID base, RegisterID dst)
   1211     {
   1212         m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
   1213     }
   1214 #endif
   1215 
   1216     // Flow control:
   1217 
   1218     JmpSrc call()
   1219     {
   1220         m_formatter.oneByteOp(OP_CALL_rel32);
   1221         return m_formatter.immediateRel32();
   1222     }
   1223 
   1224     JmpSrc call(RegisterID dst)
   1225     {
   1226         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
   1227         return JmpSrc(m_formatter.size());
   1228     }
   1229 
   1230     void call_m(int offset, RegisterID base)
   1231     {
   1232         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
   1233     }
   1234 
   1235     JmpSrc jmp()
   1236     {
   1237         m_formatter.oneByteOp(OP_JMP_rel32);
   1238         return m_formatter.immediateRel32();
   1239     }
   1240 
   1241     // Return a JmpSrc so we have a label to the jump, so we can use this
   1242     // To make a tail recursive call on x86-64.  The MacroAssembler
   1243     // really shouldn't wrap this as a Jump, since it can't be linked. :-/
   1244     JmpSrc jmp_r(RegisterID dst)
   1245     {
   1246         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
   1247         return JmpSrc(m_formatter.size());
   1248     }
   1249 
   1250     void jmp_m(int offset, RegisterID base)
   1251     {
   1252         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
   1253     }
   1254 
   1255     JmpSrc jne()
   1256     {
   1257         m_formatter.twoByteOp(jccRel32(ConditionNE));
   1258         return m_formatter.immediateRel32();
   1259     }
   1260 
   1261     JmpSrc jnz()
   1262     {
   1263         return jne();
   1264     }
   1265 
   1266     JmpSrc je()
   1267     {
   1268         m_formatter.twoByteOp(jccRel32(ConditionE));
   1269         return m_formatter.immediateRel32();
   1270     }
   1271 
   1272     JmpSrc jz()
   1273     {
   1274         return je();
   1275     }
   1276 
   1277     JmpSrc jl()
   1278     {
   1279         m_formatter.twoByteOp(jccRel32(ConditionL));
   1280         return m_formatter.immediateRel32();
   1281     }
   1282 
   1283     JmpSrc jb()
   1284     {
   1285         m_formatter.twoByteOp(jccRel32(ConditionB));
   1286         return m_formatter.immediateRel32();
   1287     }
   1288 
   1289     JmpSrc jle()
   1290     {
   1291         m_formatter.twoByteOp(jccRel32(ConditionLE));
   1292         return m_formatter.immediateRel32();
   1293     }
   1294 
   1295     JmpSrc jbe()
   1296     {
   1297         m_formatter.twoByteOp(jccRel32(ConditionBE));
   1298         return m_formatter.immediateRel32();
   1299     }
   1300 
   1301     JmpSrc jge()
   1302     {
   1303         m_formatter.twoByteOp(jccRel32(ConditionGE));
   1304         return m_formatter.immediateRel32();
   1305     }
   1306 
   1307     JmpSrc jg()
   1308     {
   1309         m_formatter.twoByteOp(jccRel32(ConditionG));
   1310         return m_formatter.immediateRel32();
   1311     }
   1312 
   1313     JmpSrc ja()
   1314     {
   1315         m_formatter.twoByteOp(jccRel32(ConditionA));
   1316         return m_formatter.immediateRel32();
   1317     }
   1318 
   1319     JmpSrc jae()
   1320     {
   1321         m_formatter.twoByteOp(jccRel32(ConditionAE));
   1322         return m_formatter.immediateRel32();
   1323     }
   1324 
   1325     JmpSrc jo()
   1326     {
   1327         m_formatter.twoByteOp(jccRel32(ConditionO));
   1328         return m_formatter.immediateRel32();
   1329     }
   1330 
   1331     JmpSrc jp()
   1332     {
   1333         m_formatter.twoByteOp(jccRel32(ConditionP));
   1334         return m_formatter.immediateRel32();
   1335     }
   1336 
   1337     JmpSrc js()
   1338     {
   1339         m_formatter.twoByteOp(jccRel32(ConditionS));
   1340         return m_formatter.immediateRel32();
   1341     }
   1342 
   1343     JmpSrc jCC(Condition cond)
   1344     {
   1345         m_formatter.twoByteOp(jccRel32(cond));
   1346         return m_formatter.immediateRel32();
   1347     }
   1348 
   1349     // SSE operations:
   1350 
   1351     void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1352     {
   1353         m_formatter.prefix(PRE_SSE_F2);
   1354         m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1355     }
   1356 
   1357     void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1358     {
   1359         m_formatter.prefix(PRE_SSE_F2);
   1360         m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
   1361     }
   1362 
   1363     void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
   1364     {
   1365         m_formatter.prefix(PRE_SSE_F2);
   1366         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
   1367     }
   1368 
   1369     void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1370     {
   1371         m_formatter.prefix(PRE_SSE_F2);
   1372         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
   1373     }
   1374 
   1375 #if !CPU(X86_64)
   1376     void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
   1377     {
   1378         m_formatter.prefix(PRE_SSE_F2);
   1379         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
   1380     }
   1381 #endif
   1382 
   1383     void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
   1384     {
   1385         m_formatter.prefix(PRE_SSE_F2);
   1386         m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
   1387     }
   1388 
   1389     void movd_rr(XMMRegisterID src, RegisterID dst)
   1390     {
   1391         m_formatter.prefix(PRE_SSE_66);
   1392         m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
   1393     }
   1394 
   1395 #if CPU(X86_64)
   1396     void movq_rr(XMMRegisterID src, RegisterID dst)
   1397     {
   1398         m_formatter.prefix(PRE_SSE_66);
   1399         m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
   1400     }
   1401 
   1402     void movq_rr(RegisterID src, XMMRegisterID dst)
   1403     {
   1404         m_formatter.prefix(PRE_SSE_66);
   1405         m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
   1406     }
   1407 #endif
   1408 
   1409     void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1410     {
   1411         m_formatter.prefix(PRE_SSE_F2);
   1412         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1413     }
   1414 
   1415     void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
   1416     {
   1417         m_formatter.prefix(PRE_SSE_F2);
   1418         m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
   1419     }
   1420 
   1421     void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1422     {
   1423         m_formatter.prefix(PRE_SSE_F2);
   1424         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
   1425     }
   1426 
   1427 #if !CPU(X86_64)
   1428     void movsd_mr(const void* address, XMMRegisterID dst)
   1429     {
   1430         m_formatter.prefix(PRE_SSE_F2);
   1431         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
   1432     }
   1433 #endif
   1434 
   1435     void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1436     {
   1437         m_formatter.prefix(PRE_SSE_F2);
   1438         m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1439     }
   1440 
   1441     void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1442     {
   1443         m_formatter.prefix(PRE_SSE_F2);
   1444         m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
   1445     }
   1446 
   1447     void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
   1448     {
   1449         m_formatter.prefix(PRE_SSE_66);
   1450         m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
   1451         m_formatter.immediate8(whichWord);
   1452     }
   1453 
   1454     void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1455     {
   1456         m_formatter.prefix(PRE_SSE_F2);
   1457         m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1458     }
   1459 
   1460     void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1461     {
   1462         m_formatter.prefix(PRE_SSE_F2);
   1463         m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
   1464     }
   1465 
   1466     void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
   1467     {
   1468         m_formatter.prefix(PRE_SSE_66);
   1469         m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1470     }
   1471 
   1472     void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1473     {
   1474         m_formatter.prefix(PRE_SSE_66);
   1475         m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
   1476     }
   1477 
   1478     void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1479     {
   1480         m_formatter.prefix(PRE_SSE_F2);
   1481         m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1482     }
   1483 
   1484     void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
   1485     {
   1486         m_formatter.prefix(PRE_SSE_F2);
   1487         m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
   1488     }
   1489 
   1490     void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
   1491     {
   1492         m_formatter.prefix(PRE_SSE_66);
   1493         m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
   1494     }
   1495 
   1496     void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
   1497     {
   1498         m_formatter.prefix(PRE_SSE_F2);
   1499         m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
   1500     }
   1501 
   1502     // Misc instructions:
   1503 
   1504     void int3()
   1505     {
   1506         m_formatter.oneByteOp(OP_INT3);
   1507     }
   1508 
   1509     void ret()
   1510     {
   1511         m_formatter.oneByteOp(OP_RET);
   1512     }
   1513 
   1514     void predictNotTaken()
   1515     {
   1516         m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
   1517     }
   1518 
   1519     // Assembler admin methods:
   1520 
   1521     JmpDst label()
   1522     {
   1523         return JmpDst(m_formatter.size());
   1524     }
   1525 
   1526     static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
   1527     {
   1528         return JmpDst(jump.m_offset + offset);
   1529     }
   1530 
   1531     JmpDst align(int alignment)
   1532     {
   1533         while (!m_formatter.isAligned(alignment))
   1534             m_formatter.oneByteOp(OP_HLT);
   1535 
   1536         return label();
   1537     }
   1538 
   1539     // Linking & patching:
   1540     //
   1541     // 'link' and 'patch' methods are for use on unprotected code - such as the code
   1542     // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
   1543     // code has been finalized it is (platform support permitting) within a non-
   1544     // writable region of memory; to modify the code in an execute-only execuable
   1545     // pool the 'repatch' and 'relink' methods should be used.
   1546 
   1547     void linkJump(JmpSrc from, JmpDst to)
   1548     {
   1549         ASSERT(from.m_offset != -1);
   1550         ASSERT(to.m_offset != -1);
   1551 
   1552         char* code = reinterpret_cast<char*>(m_formatter.data());
   1553         ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
   1554         setRel32(code + from.m_offset, code + to.m_offset);
   1555     }
   1556 
   1557     static void linkJump(void* code, JmpSrc from, void* to)
   1558     {
   1559         ASSERT(from.m_offset != -1);
   1560 
   1561         setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
   1562     }
   1563 
   1564     static void linkCall(void* code, JmpSrc from, void* to)
   1565     {
   1566         ASSERT(from.m_offset != -1);
   1567 
   1568         setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
   1569     }
   1570 
   1571     static void linkPointer(void* code, JmpDst where, void* value)
   1572     {
   1573         ASSERT(where.m_offset != -1);
   1574 
   1575         setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
   1576     }
   1577 
   1578     static void relinkJump(void* from, void* to)
   1579     {
   1580         setRel32(from, to);
   1581     }
   1582 
   1583     static void relinkCall(void* from, void* to)
   1584     {
   1585         setRel32(from, to);
   1586     }
   1587 
   1588     static void repatchInt32(void* where, int32_t value)
   1589     {
   1590         setInt32(where, value);
   1591     }
   1592 
   1593     static void repatchPointer(void* where, void* value)
   1594     {
   1595         setPointer(where, value);
   1596     }
   1597 
   1598     static unsigned getCallReturnOffset(JmpSrc call)
   1599     {
   1600         ASSERT(call.m_offset >= 0);
   1601         return call.m_offset;
   1602     }
   1603 
   1604     static void* getRelocatedAddress(void* code, JmpSrc jump)
   1605     {
   1606         ASSERT(jump.m_offset != -1);
   1607 
   1608         return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
   1609     }
   1610 
   1611     static void* getRelocatedAddress(void* code, JmpDst destination)
   1612     {
   1613         ASSERT(destination.m_offset != -1);
   1614 
   1615         return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
   1616     }
   1617 
   1618     static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
   1619     {
   1620         return dst.m_offset - src.m_offset;
   1621     }
   1622 
   1623     static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
   1624     {
   1625         return dst.m_offset - src.m_offset;
   1626     }
   1627 
   1628     static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
   1629     {
   1630         return dst.m_offset - src.m_offset;
   1631     }
   1632 
   1633     void* executableCopy(ExecutablePool* allocator)
   1634     {
   1635         void* copy = m_formatter.executableCopy(allocator);
   1636         ASSERT(copy);
   1637         return copy;
   1638     }
   1639 
   1640     void rewindToLabel(JmpDst rewindTo) { m_formatter.rewindToLabel(rewindTo); }
   1641 
   1642 #ifndef NDEBUG
   1643     unsigned debugOffset() { return m_formatter.debugOffset(); }
   1644 #endif
   1645 
   1646 private:
   1647 
   1648     static void setPointer(void* where, void* value)
   1649     {
   1650         reinterpret_cast<void**>(where)[-1] = value;
   1651     }
   1652 
   1653     static void setInt32(void* where, int32_t value)
   1654     {
   1655         reinterpret_cast<int32_t*>(where)[-1] = value;
   1656     }
   1657 
   1658     static void setRel32(void* from, void* to)
   1659     {
   1660         intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
   1661         ASSERT(offset == static_cast<int32_t>(offset));
   1662 
   1663         setInt32(from, offset);
   1664     }
   1665 
   1666     class X86InstructionFormatter {
   1667 
   1668         static const int maxInstructionSize = 16;
   1669 
   1670     public:
   1671 
   1672         // Legacy prefix bytes:
   1673         //
   1674         // These are emmitted prior to the instruction.
   1675 
   1676         void prefix(OneByteOpcodeID pre)
   1677         {
   1678             m_buffer.putByte(pre);
   1679         }
   1680 
   1681         // Word-sized operands / no operand instruction formatters.
   1682         //
   1683         // In addition to the opcode, the following operand permutations are supported:
   1684         //   * None - instruction takes no operands.
   1685         //   * One register - the low three bits of the RegisterID are added into the opcode.
   1686         //   * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
   1687         //   * Three argument ModRM - a register, and a register and an offset describing a memory operand.
   1688         //   * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
   1689         //
   1690         // For 32-bit x86 targets, the address operand may also be provided as a void*.
   1691         // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
   1692         //
   1693         // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
   1694 
   1695         void oneByteOp(OneByteOpcodeID opcode)
   1696         {
   1697             m_buffer.ensureSpace(maxInstructionSize);
   1698             m_buffer.putByteUnchecked(opcode);
   1699         }
   1700 
   1701         void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
   1702         {
   1703             m_buffer.ensureSpace(maxInstructionSize);
   1704             emitRexIfNeeded(0, 0, reg);
   1705             m_buffer.putByteUnchecked(opcode + (reg & 7));
   1706         }
   1707 
   1708         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
   1709         {
   1710             m_buffer.ensureSpace(maxInstructionSize);
   1711             emitRexIfNeeded(reg, 0, rm);
   1712             m_buffer.putByteUnchecked(opcode);
   1713             registerModRM(reg, rm);
   1714         }
   1715 
   1716         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1717         {
   1718             m_buffer.ensureSpace(maxInstructionSize);
   1719             emitRexIfNeeded(reg, 0, base);
   1720             m_buffer.putByteUnchecked(opcode);
   1721             memoryModRM(reg, base, offset);
   1722         }
   1723 
   1724         void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1725         {
   1726             m_buffer.ensureSpace(maxInstructionSize);
   1727             emitRexIfNeeded(reg, 0, base);
   1728             m_buffer.putByteUnchecked(opcode);
   1729             memoryModRM_disp32(reg, base, offset);
   1730         }
   1731 
   1732         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
   1733         {
   1734             m_buffer.ensureSpace(maxInstructionSize);
   1735             emitRexIfNeeded(reg, index, base);
   1736             m_buffer.putByteUnchecked(opcode);
   1737             memoryModRM(reg, base, index, scale, offset);
   1738         }
   1739 
   1740 #if !CPU(X86_64)
   1741         void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
   1742         {
   1743             m_buffer.ensureSpace(maxInstructionSize);
   1744             m_buffer.putByteUnchecked(opcode);
   1745             memoryModRM(reg, address);
   1746         }
   1747 #endif
   1748 
   1749         void twoByteOp(TwoByteOpcodeID opcode)
   1750         {
   1751             m_buffer.ensureSpace(maxInstructionSize);
   1752             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1753             m_buffer.putByteUnchecked(opcode);
   1754         }
   1755 
   1756         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
   1757         {
   1758             m_buffer.ensureSpace(maxInstructionSize);
   1759             emitRexIfNeeded(reg, 0, rm);
   1760             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1761             m_buffer.putByteUnchecked(opcode);
   1762             registerModRM(reg, rm);
   1763         }
   1764 
   1765         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1766         {
   1767             m_buffer.ensureSpace(maxInstructionSize);
   1768             emitRexIfNeeded(reg, 0, base);
   1769             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1770             m_buffer.putByteUnchecked(opcode);
   1771             memoryModRM(reg, base, offset);
   1772         }
   1773 
   1774         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
   1775         {
   1776             m_buffer.ensureSpace(maxInstructionSize);
   1777             emitRexIfNeeded(reg, index, base);
   1778             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1779             m_buffer.putByteUnchecked(opcode);
   1780             memoryModRM(reg, base, index, scale, offset);
   1781         }
   1782 
   1783 #if !CPU(X86_64)
   1784         void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
   1785         {
   1786             m_buffer.ensureSpace(maxInstructionSize);
   1787             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1788             m_buffer.putByteUnchecked(opcode);
   1789             memoryModRM(reg, address);
   1790         }
   1791 #endif
   1792 
   1793 #if CPU(X86_64)
   1794         // Quad-word-sized operands:
   1795         //
   1796         // Used to format 64-bit operantions, planting a REX.w prefix.
   1797         // When planting d64 or f64 instructions, not requiring a REX.w prefix,
   1798         // the normal (non-'64'-postfixed) formatters should be used.
   1799 
   1800         void oneByteOp64(OneByteOpcodeID opcode)
   1801         {
   1802             m_buffer.ensureSpace(maxInstructionSize);
   1803             emitRexW(0, 0, 0);
   1804             m_buffer.putByteUnchecked(opcode);
   1805         }
   1806 
   1807         void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
   1808         {
   1809             m_buffer.ensureSpace(maxInstructionSize);
   1810             emitRexW(0, 0, reg);
   1811             m_buffer.putByteUnchecked(opcode + (reg & 7));
   1812         }
   1813 
   1814         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
   1815         {
   1816             m_buffer.ensureSpace(maxInstructionSize);
   1817             emitRexW(reg, 0, rm);
   1818             m_buffer.putByteUnchecked(opcode);
   1819             registerModRM(reg, rm);
   1820         }
   1821 
   1822         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1823         {
   1824             m_buffer.ensureSpace(maxInstructionSize);
   1825             emitRexW(reg, 0, base);
   1826             m_buffer.putByteUnchecked(opcode);
   1827             memoryModRM(reg, base, offset);
   1828         }
   1829 
   1830         void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
   1831         {
   1832             m_buffer.ensureSpace(maxInstructionSize);
   1833             emitRexW(reg, 0, base);
   1834             m_buffer.putByteUnchecked(opcode);
   1835             memoryModRM_disp32(reg, base, offset);
   1836         }
   1837 
   1838         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
   1839         {
   1840             m_buffer.ensureSpace(maxInstructionSize);
   1841             emitRexW(reg, index, base);
   1842             m_buffer.putByteUnchecked(opcode);
   1843             memoryModRM(reg, base, index, scale, offset);
   1844         }
   1845 
   1846         void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
   1847         {
   1848             m_buffer.ensureSpace(maxInstructionSize);
   1849             emitRexW(reg, 0, rm);
   1850             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1851             m_buffer.putByteUnchecked(opcode);
   1852             registerModRM(reg, rm);
   1853         }
   1854 #endif
   1855 
   1856         // Byte-operands:
   1857         //
   1858         // These methods format byte operations.  Byte operations differ from the normal
   1859         // formatters in the circumstances under which they will decide to emit REX prefixes.
   1860         // These should be used where any register operand signifies a byte register.
   1861         //
   1862         // The disctinction is due to the handling of register numbers in the range 4..7 on
   1863         // x86-64.  These register numbers may either represent the second byte of the first
   1864         // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
   1865         //
   1866         // Since ah..bh cannot be used in all permutations of operands (specifically cannot
   1867         // be accessed where a REX prefix is present), these are likely best treated as
   1868         // deprecated.  In order to ensure the correct registers spl..dil are selected a
   1869         // REX prefix will be emitted for any byte register operand in the range 4..15.
   1870         //
   1871         // These formatters may be used in instructions where a mix of operand sizes, in which
   1872         // case an unnecessary REX will be emitted, for example:
   1873         //     movzbl %al, %edi
   1874         // In this case a REX will be planted since edi is 7 (and were this a byte operand
   1875         // a REX would be required to specify dil instead of bh).  Unneeded REX prefixes will
   1876         // be silently ignored by the processor.
   1877         //
   1878         // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
   1879         // is provided to check byte register operands.
   1880 
   1881         void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
   1882         {
   1883             m_buffer.ensureSpace(maxInstructionSize);
   1884             emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
   1885             m_buffer.putByteUnchecked(opcode);
   1886             registerModRM(groupOp, rm);
   1887         }
   1888 
   1889         void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
   1890         {
   1891             m_buffer.ensureSpace(maxInstructionSize);
   1892             emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
   1893             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1894             m_buffer.putByteUnchecked(opcode);
   1895             registerModRM(reg, rm);
   1896         }
   1897 
   1898         void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
   1899         {
   1900             m_buffer.ensureSpace(maxInstructionSize);
   1901             emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
   1902             m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
   1903             m_buffer.putByteUnchecked(opcode);
   1904             registerModRM(groupOp, rm);
   1905         }
   1906 
   1907         // Immediates:
   1908         //
   1909         // An immedaite should be appended where appropriate after an op has been emitted.
   1910         // The writes are unchecked since the opcode formatters above will have ensured space.
   1911 
   1912         void immediate8(int imm)
   1913         {
   1914             m_buffer.putByteUnchecked(imm);
   1915         }
   1916 
   1917         void immediate16(int imm)
   1918         {
   1919             m_buffer.putShortUnchecked(imm);
   1920         }
   1921 
   1922         void immediate32(int imm)
   1923         {
   1924             m_buffer.putIntUnchecked(imm);
   1925         }
   1926 
   1927         void immediate64(int64_t imm)
   1928         {
   1929             m_buffer.putInt64Unchecked(imm);
   1930         }
   1931 
   1932         JmpSrc immediateRel32()
   1933         {
   1934             m_buffer.putIntUnchecked(0);
   1935             return JmpSrc(m_buffer.size());
   1936         }
   1937 
   1938         // Administrative methods:
   1939 
   1940         size_t size() const { return m_buffer.size(); }
   1941         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
   1942         void* data() const { return m_buffer.data(); }
   1943         void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
   1944 
   1945         void rewindToLabel(JmpDst rewindTo) { m_buffer.rewindToOffset(rewindTo.m_offset); }
   1946 
   1947 #ifndef NDEBUG
   1948         unsigned debugOffset() { return m_buffer.debugOffset(); }
   1949 #endif
   1950 
   1951     private:
   1952 
   1953         // Internals; ModRm and REX formatters.
   1954 
   1955         static const RegisterID noBase = X86Registers::ebp;
   1956         static const RegisterID hasSib = X86Registers::esp;
   1957         static const RegisterID noIndex = X86Registers::esp;
   1958 #if CPU(X86_64)
   1959         static const RegisterID noBase2 = X86Registers::r13;
   1960         static const RegisterID hasSib2 = X86Registers::r12;
   1961 
   1962         // Registers r8 & above require a REX prefixe.
   1963         inline bool regRequiresRex(int reg)
   1964         {
   1965             return (reg >= X86Registers::r8);
   1966         }
   1967 
   1968         // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
   1969         inline bool byteRegRequiresRex(int reg)
   1970         {
   1971             return (reg >= X86Registers::esp);
   1972         }
   1973 
   1974         // Format a REX prefix byte.
   1975         inline void emitRex(bool w, int r, int x, int b)
   1976         {
   1977             m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
   1978         }
   1979 
   1980         // Used to plant a REX byte with REX.w set (for 64-bit operations).
   1981         inline void emitRexW(int r, int x, int b)
   1982         {
   1983             emitRex(true, r, x, b);
   1984         }
   1985 
   1986         // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
   1987         // regRequiresRex() to check other registers (i.e. address base & index).
   1988         inline void emitRexIf(bool condition, int r, int x, int b)
   1989         {
   1990             if (condition) emitRex(false, r, x, b);
   1991         }
   1992 
   1993         // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
   1994         inline void emitRexIfNeeded(int r, int x, int b)
   1995         {
   1996             emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
   1997         }
   1998 #else
   1999         // No REX prefix bytes on 32-bit x86.
   2000         inline bool regRequiresRex(int) { return false; }
   2001         inline bool byteRegRequiresRex(int) { return false; }
   2002         inline void emitRexIf(bool, int, int, int) {}
   2003         inline void emitRexIfNeeded(int, int, int) {}
   2004 #endif
   2005 
   2006         enum ModRmMode {
   2007             ModRmMemoryNoDisp,
   2008             ModRmMemoryDisp8,
   2009             ModRmMemoryDisp32,
   2010             ModRmRegister,
   2011         };
   2012 
   2013         void putModRm(ModRmMode mode, int reg, RegisterID rm)
   2014         {
   2015             m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
   2016         }
   2017 
   2018         void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
   2019         {
   2020             ASSERT(mode != ModRmRegister);
   2021 
   2022             putModRm(mode, reg, hasSib);
   2023             m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
   2024         }
   2025 
   2026         void registerModRM(int reg, RegisterID rm)
   2027         {
   2028             putModRm(ModRmRegister, reg, rm);
   2029         }
   2030 
   2031         void memoryModRM(int reg, RegisterID base, int offset)
   2032         {
   2033             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
   2034 #if CPU(X86_64)
   2035             if ((base == hasSib) || (base == hasSib2)) {
   2036 #else
   2037             if (base == hasSib) {
   2038 #endif
   2039                 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
   2040                     putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
   2041                 else if (CAN_SIGN_EXTEND_8_32(offset)) {
   2042                     putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
   2043                     m_buffer.putByteUnchecked(offset);
   2044                 } else {
   2045                     putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
   2046                     m_buffer.putIntUnchecked(offset);
   2047                 }
   2048             } else {
   2049 #if CPU(X86_64)
   2050                 if (!offset && (base != noBase) && (base != noBase2))
   2051 #else
   2052                 if (!offset && (base != noBase))
   2053 #endif
   2054                     putModRm(ModRmMemoryNoDisp, reg, base);
   2055                 else if (CAN_SIGN_EXTEND_8_32(offset)) {
   2056                     putModRm(ModRmMemoryDisp8, reg, base);
   2057                     m_buffer.putByteUnchecked(offset);
   2058                 } else {
   2059                     putModRm(ModRmMemoryDisp32, reg, base);
   2060                     m_buffer.putIntUnchecked(offset);
   2061                 }
   2062             }
   2063         }
   2064 
   2065         void memoryModRM_disp32(int reg, RegisterID base, int offset)
   2066         {
   2067             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
   2068 #if CPU(X86_64)
   2069             if ((base == hasSib) || (base == hasSib2)) {
   2070 #else
   2071             if (base == hasSib) {
   2072 #endif
   2073                 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
   2074                 m_buffer.putIntUnchecked(offset);
   2075             } else {
   2076                 putModRm(ModRmMemoryDisp32, reg, base);
   2077                 m_buffer.putIntUnchecked(offset);
   2078             }
   2079         }
   2080 
   2081         void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
   2082         {
   2083             ASSERT(index != noIndex);
   2084 
   2085 #if CPU(X86_64)
   2086             if (!offset && (base != noBase) && (base != noBase2))
   2087 #else
   2088             if (!offset && (base != noBase))
   2089 #endif
   2090                 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
   2091             else if (CAN_SIGN_EXTEND_8_32(offset)) {
   2092                 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
   2093                 m_buffer.putByteUnchecked(offset);
   2094             } else {
   2095                 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
   2096                 m_buffer.putIntUnchecked(offset);
   2097             }
   2098         }
   2099 
   2100 #if !CPU(X86_64)
   2101         void memoryModRM(int reg, const void* address)
   2102         {
   2103             // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
   2104             putModRm(ModRmMemoryNoDisp, reg, noBase);
   2105             m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
   2106         }
   2107 #endif
   2108 
   2109         AssemblerBuffer m_buffer;
   2110     } m_formatter;
   2111 };
   2112 
   2113 } // namespace JSC
   2114 
   2115 #endif // ENABLE(ASSEMBLER) && CPU(X86)
   2116 
   2117 #endif // X86Assembler_h
   2118