1 //===-- X86InstrExtension.td - Sign and Zero Extensions ----*- tablegen -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file describes the sign and zero extension operations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 let neverHasSideEffects = 1 in { 15 let Defs = [AX], Uses = [AL] in 16 def CBW : I<0x98, RawFrm, (outs), (ins), 17 "{cbtw|cbw}", []>, OpSize; // AX = signext(AL) 18 let Defs = [EAX], Uses = [AX] in 19 def CWDE : I<0x98, RawFrm, (outs), (ins), 20 "{cwtl|cwde}", []>; // EAX = signext(AX) 21 22 let Defs = [AX,DX], Uses = [AX] in 23 def CWD : I<0x99, RawFrm, (outs), (ins), 24 "{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX) 25 let Defs = [EAX,EDX], Uses = [EAX] in 26 def CDQ : I<0x99, RawFrm, (outs), (ins), 27 "{cltd|cdq}", []>; // EDX:EAX = signext(EAX) 28 29 30 let Defs = [RAX], Uses = [EAX] in 31 def CDQE : RI<0x98, RawFrm, (outs), (ins), 32 "{cltq|cdqe}", []>; // RAX = signext(EAX) 33 34 let Defs = [RAX,RDX], Uses = [RAX] in 35 def CQO : RI<0x99, RawFrm, (outs), (ins), 36 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX) 37 } 38 39 40 41 // Sign/Zero extenders 42 def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src), 43 "movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_R8>, 44 TB, OpSize; 45 def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), 46 "movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_M8>, 47 TB, OpSize; 48 def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8:$src), 49 "movs{bl|x}\t{$src, $dst|$dst, $src}", 50 [(set GR32:$dst, (sext GR8:$src))], IIC_MOVSX>, TB; 51 def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src), 52 "movs{bl|x}\t{$src, $dst|$dst, $src}", 53 [(set GR32:$dst, (sextloadi32i8 addr:$src))], IIC_MOVSX>, TB; 54 def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src), 55 "movs{wl|x}\t{$src, $dst|$dst, $src}", 56 [(set GR32:$dst, (sext GR16:$src))], IIC_MOVSX>, TB; 57 def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), 58 "movs{wl|x}\t{$src, $dst|$dst, $src}", 59 [(set GR32:$dst, (sextloadi32i16 addr:$src))], IIC_MOVSX>, 60 TB; 61 62 def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src), 63 "movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_R8>, 64 TB, OpSize; 65 def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), 66 "movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_M8>, 67 TB, OpSize; 68 def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src), 69 "movz{bl|x}\t{$src, $dst|$dst, $src}", 70 [(set GR32:$dst, (zext GR8:$src))], IIC_MOVZX>, TB; 71 def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src), 72 "movz{bl|x}\t{$src, $dst|$dst, $src}", 73 [(set GR32:$dst, (zextloadi32i8 addr:$src))], IIC_MOVZX>, TB; 74 def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src), 75 "movz{wl|x}\t{$src, $dst|$dst, $src}", 76 [(set GR32:$dst, (zext GR16:$src))], IIC_MOVZX>, TB; 77 def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), 78 "movz{wl|x}\t{$src, $dst|$dst, $src}", 79 [(set GR32:$dst, (zextloadi32i16 addr:$src))], IIC_MOVZX>, 80 TB; 81 82 // These are the same as the regular MOVZX32rr8 and MOVZX32rm8 83 // except that they use GR32_NOREX for the output operand register class 84 // instead of GR32. This allows them to operate on h registers on x86-64. 85 def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg, 86 (outs GR32_NOREX:$dst), (ins GR8_NOREX:$src), 87 "movz{bl|x}\t{$src, $dst|$dst, $src}", 88 [], IIC_MOVZX>, TB; 89 let mayLoad = 1 in 90 def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem, 91 (outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src), 92 "movz{bl|x}\t{$src, $dst|$dst, $src}", 93 [], IIC_MOVZX>, TB; 94 95 // MOVSX64rr8 always has a REX prefix and it has an 8-bit register 96 // operand, which makes it a rare instruction with an 8-bit register 97 // operand that can never access an h register. If support for h registers 98 // were generalized, this would require a special register class. 99 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), 100 "movs{bq|x}\t{$src, $dst|$dst, $src}", 101 [(set GR64:$dst, (sext GR8:$src))], IIC_MOVSX>, TB; 102 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src), 103 "movs{bq|x}\t{$src, $dst|$dst, $src}", 104 [(set GR64:$dst, (sextloadi64i8 addr:$src))], IIC_MOVSX>, 105 TB; 106 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), 107 "movs{wq|x}\t{$src, $dst|$dst, $src}", 108 [(set GR64:$dst, (sext GR16:$src))], IIC_MOVSX>, TB; 109 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), 110 "movs{wq|x}\t{$src, $dst|$dst, $src}", 111 [(set GR64:$dst, (sextloadi64i16 addr:$src))], IIC_MOVSX>, 112 TB; 113 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src), 114 "movs{lq|xd}\t{$src, $dst|$dst, $src}", 115 [(set GR64:$dst, (sext GR32:$src))], IIC_MOVSX>; 116 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), 117 "movs{lq|xd}\t{$src, $dst|$dst, $src}", 118 [(set GR64:$dst, (sextloadi64i32 addr:$src))], IIC_MOVSX>; 119 120 // movzbq and movzwq encodings for the disassembler 121 def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src), 122 "movz{bq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, 123 TB; 124 def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src), 125 "movz{bq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, 126 TB; 127 def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), 128 "movz{wq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, 129 TB; 130 def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), 131 "movz{wq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>, 132 TB; 133 134 // FIXME: These should be Pat patterns. 135 let isCodeGenOnly = 1 in { 136 137 // Use movzbl instead of movzbq when the destination is a register; it's 138 // equivalent due to implicit zero-extending, and it has a smaller encoding. 139 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), 140 "", [(set GR64:$dst, (zext GR8:$src))], IIC_MOVZX>, TB; 141 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src), 142 "", [(set GR64:$dst, (zextloadi64i8 addr:$src))], IIC_MOVZX>, 143 TB; 144 // Use movzwl instead of movzwq when the destination is a register; it's 145 // equivalent due to implicit zero-extending, and it has a smaller encoding. 146 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), 147 "", [(set GR64:$dst, (zext GR16:$src))], IIC_MOVZX>, TB; 148 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), 149 "", [(set GR64:$dst, (zextloadi64i16 addr:$src))], 150 IIC_MOVZX>, TB; 151 152 // There's no movzlq instruction, but movl can be used for this purpose, using 153 // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero 154 // extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit 155 // zero-extension, however this isn't possible when the 32-bit value is 156 // defined by a truncate or is copied from something where the high bits aren't 157 // necessarily all zero. In such cases, we fall back to these explicit zext 158 // instructions. 159 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src), 160 "", [(set GR64:$dst, (zext GR32:$src))], IIC_MOVZX>; 161 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), 162 "", [(set GR64:$dst, (zextloadi64i32 addr:$src))], 163 IIC_MOVZX>; 164 } 165 166