1 //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "MCTargetDesc/X86BaseInfo.h" 11 #include "llvm/ADT/APFloat.h" 12 #include "llvm/ADT/SmallString.h" 13 #include "llvm/ADT/SmallVector.h" 14 #include "llvm/ADT/StringSwitch.h" 15 #include "llvm/ADT/Twine.h" 16 #include "llvm/MC/MCExpr.h" 17 #include "llvm/MC/MCInst.h" 18 #include "llvm/MC/MCParser/MCAsmLexer.h" 19 #include "llvm/MC/MCParser/MCAsmParser.h" 20 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 21 #include "llvm/MC/MCRegisterInfo.h" 22 #include "llvm/MC/MCStreamer.h" 23 #include "llvm/MC/MCSubtargetInfo.h" 24 #include "llvm/MC/MCSymbol.h" 25 #include "llvm/MC/MCTargetAsmParser.h" 26 #include "llvm/Support/SourceMgr.h" 27 #include "llvm/Support/TargetRegistry.h" 28 #include "llvm/Support/raw_ostream.h" 29 30 using namespace llvm; 31 32 namespace { 33 struct X86Operand; 34 35 class X86AsmParser : public MCTargetAsmParser { 36 MCSubtargetInfo &STI; 37 MCAsmParser &Parser; 38 ParseInstructionInfo *InstInfo; 39 private: 40 MCAsmParser &getParser() const { return Parser; } 41 42 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 43 44 bool Error(SMLoc L, const Twine &Msg, 45 ArrayRef<SMRange> Ranges = ArrayRef<SMRange>(), 46 bool MatchingInlineAsm = false) { 47 if (MatchingInlineAsm) return true; 48 return Parser.Error(L, Msg, Ranges); 49 } 50 51 X86Operand *ErrorOperand(SMLoc Loc, StringRef Msg) { 52 Error(Loc, Msg); 53 return 0; 54 } 55 56 X86Operand *ParseOperand(); 57 X86Operand *ParseATTOperand(); 58 X86Operand *ParseIntelOperand(); 59 X86Operand *ParseIntelOffsetOfOperator(SMLoc StartLoc); 60 X86Operand *ParseIntelOperator(SMLoc StartLoc, unsigned OpKind); 61 X86Operand *ParseIntelMemOperand(unsigned SegReg, SMLoc StartLoc); 62 X86Operand *ParseIntelBracExpression(unsigned SegReg, unsigned Size); 63 X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc); 64 65 bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr **NewDisp, 66 SmallString<64> &Err); 67 68 bool ParseDirectiveWord(unsigned Size, SMLoc L); 69 bool ParseDirectiveCode(StringRef IDVal, SMLoc L); 70 71 bool processInstruction(MCInst &Inst, 72 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 73 74 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 75 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 76 MCStreamer &Out, unsigned &ErrorInfo, 77 bool MatchingInlineAsm); 78 79 /// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi) 80 /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode. 81 bool isSrcOp(X86Operand &Op); 82 83 /// isDstOp - Returns true if operand is either (%rdi) or %es:(%rdi) 84 /// in 64bit mode or (%edi) or %es:(%edi) in 32bit mode. 85 bool isDstOp(X86Operand &Op); 86 87 bool is64BitMode() const { 88 // FIXME: Can tablegen auto-generate this? 89 return (STI.getFeatureBits() & X86::Mode64Bit) != 0; 90 } 91 void SwitchMode() { 92 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(X86::Mode64Bit)); 93 setAvailableFeatures(FB); 94 } 95 96 /// @name Auto-generated Matcher Functions 97 /// { 98 99 #define GET_ASSEMBLER_HEADER 100 #include "X86GenAsmMatcher.inc" 101 102 /// } 103 104 public: 105 X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser) 106 : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) { 107 108 // Initialize the set of available features. 109 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 110 } 111 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 112 113 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, 114 SMLoc NameLoc, 115 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 116 117 virtual bool ParseDirective(AsmToken DirectiveID); 118 119 bool isParsingIntelSyntax() { 120 return getParser().getAssemblerDialect(); 121 } 122 }; 123 } // end anonymous namespace 124 125 /// @name Auto-generated Match Functions 126 /// { 127 128 static unsigned MatchRegisterName(StringRef Name); 129 130 /// } 131 132 static bool isImmSExti16i8Value(uint64_t Value) { 133 return (( Value <= 0x000000000000007FULL)|| 134 (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)|| 135 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL)); 136 } 137 138 static bool isImmSExti32i8Value(uint64_t Value) { 139 return (( Value <= 0x000000000000007FULL)|| 140 (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)|| 141 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL)); 142 } 143 144 static bool isImmZExtu32u8Value(uint64_t Value) { 145 return (Value <= 0x00000000000000FFULL); 146 } 147 148 static bool isImmSExti64i8Value(uint64_t Value) { 149 return (( Value <= 0x000000000000007FULL)|| 150 (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL)); 151 } 152 153 static bool isImmSExti64i32Value(uint64_t Value) { 154 return (( Value <= 0x000000007FFFFFFFULL)|| 155 (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL)); 156 } 157 namespace { 158 159 /// X86Operand - Instances of this class represent a parsed X86 machine 160 /// instruction. 161 struct X86Operand : public MCParsedAsmOperand { 162 enum KindTy { 163 Token, 164 Register, 165 Immediate, 166 Memory 167 } Kind; 168 169 SMLoc StartLoc, EndLoc; 170 SMLoc OffsetOfLoc; 171 bool AddressOf; 172 173 struct TokOp { 174 const char *Data; 175 unsigned Length; 176 }; 177 178 struct RegOp { 179 unsigned RegNo; 180 }; 181 182 struct ImmOp { 183 const MCExpr *Val; 184 bool NeedAsmRewrite; 185 }; 186 187 struct MemOp { 188 unsigned SegReg; 189 const MCExpr *Disp; 190 unsigned BaseReg; 191 unsigned IndexReg; 192 unsigned Scale; 193 unsigned Size; 194 bool NeedSizeDir; 195 }; 196 197 union { 198 struct TokOp Tok; 199 struct RegOp Reg; 200 struct ImmOp Imm; 201 struct MemOp Mem; 202 }; 203 204 X86Operand(KindTy K, SMLoc Start, SMLoc End) 205 : Kind(K), StartLoc(Start), EndLoc(End) {} 206 207 /// getStartLoc - Get the location of the first token of this operand. 208 SMLoc getStartLoc() const { return StartLoc; } 209 /// getEndLoc - Get the location of the last token of this operand. 210 SMLoc getEndLoc() const { return EndLoc; } 211 /// getLocRange - Get the range between the first and last token of this 212 /// operand. 213 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } 214 /// getOffsetOfLoc - Get the location of the offset operator. 215 SMLoc getOffsetOfLoc() const { return OffsetOfLoc; } 216 217 virtual void print(raw_ostream &OS) const {} 218 219 StringRef getToken() const { 220 assert(Kind == Token && "Invalid access!"); 221 return StringRef(Tok.Data, Tok.Length); 222 } 223 void setTokenValue(StringRef Value) { 224 assert(Kind == Token && "Invalid access!"); 225 Tok.Data = Value.data(); 226 Tok.Length = Value.size(); 227 } 228 229 unsigned getReg() const { 230 assert(Kind == Register && "Invalid access!"); 231 return Reg.RegNo; 232 } 233 234 const MCExpr *getImm() const { 235 assert(Kind == Immediate && "Invalid access!"); 236 return Imm.Val; 237 } 238 239 bool needAsmRewrite() const { 240 assert(Kind == Immediate && "Invalid access!"); 241 return Imm.NeedAsmRewrite; 242 } 243 244 const MCExpr *getMemDisp() const { 245 assert(Kind == Memory && "Invalid access!"); 246 return Mem.Disp; 247 } 248 unsigned getMemSegReg() const { 249 assert(Kind == Memory && "Invalid access!"); 250 return Mem.SegReg; 251 } 252 unsigned getMemBaseReg() const { 253 assert(Kind == Memory && "Invalid access!"); 254 return Mem.BaseReg; 255 } 256 unsigned getMemIndexReg() const { 257 assert(Kind == Memory && "Invalid access!"); 258 return Mem.IndexReg; 259 } 260 unsigned getMemScale() const { 261 assert(Kind == Memory && "Invalid access!"); 262 return Mem.Scale; 263 } 264 265 bool isToken() const {return Kind == Token; } 266 267 bool isImm() const { return Kind == Immediate; } 268 269 bool isImmSExti16i8() const { 270 if (!isImm()) 271 return false; 272 273 // If this isn't a constant expr, just assume it fits and let relaxation 274 // handle it. 275 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 276 if (!CE) 277 return true; 278 279 // Otherwise, check the value is in a range that makes sense for this 280 // extension. 281 return isImmSExti16i8Value(CE->getValue()); 282 } 283 bool isImmSExti32i8() const { 284 if (!isImm()) 285 return false; 286 287 // If this isn't a constant expr, just assume it fits and let relaxation 288 // handle it. 289 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 290 if (!CE) 291 return true; 292 293 // Otherwise, check the value is in a range that makes sense for this 294 // extension. 295 return isImmSExti32i8Value(CE->getValue()); 296 } 297 bool isImmZExtu32u8() const { 298 if (!isImm()) 299 return false; 300 301 // If this isn't a constant expr, just assume it fits and let relaxation 302 // handle it. 303 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 304 if (!CE) 305 return true; 306 307 // Otherwise, check the value is in a range that makes sense for this 308 // extension. 309 return isImmZExtu32u8Value(CE->getValue()); 310 } 311 bool isImmSExti64i8() const { 312 if (!isImm()) 313 return false; 314 315 // If this isn't a constant expr, just assume it fits and let relaxation 316 // handle it. 317 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 318 if (!CE) 319 return true; 320 321 // Otherwise, check the value is in a range that makes sense for this 322 // extension. 323 return isImmSExti64i8Value(CE->getValue()); 324 } 325 bool isImmSExti64i32() const { 326 if (!isImm()) 327 return false; 328 329 // If this isn't a constant expr, just assume it fits and let relaxation 330 // handle it. 331 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 332 if (!CE) 333 return true; 334 335 // Otherwise, check the value is in a range that makes sense for this 336 // extension. 337 return isImmSExti64i32Value(CE->getValue()); 338 } 339 340 unsigned getMemSize() const { 341 assert(Kind == Memory && "Invalid access!"); 342 return Mem.Size; 343 } 344 345 bool isOffsetOf() const { 346 return OffsetOfLoc.getPointer(); 347 } 348 349 bool needAddressOf() const { 350 return AddressOf; 351 } 352 353 bool needSizeDirective() const { 354 assert(Kind == Memory && "Invalid access!"); 355 return Mem.NeedSizeDir; 356 } 357 358 bool isMem() const { return Kind == Memory; } 359 bool isMem8() const { 360 return Kind == Memory && (!Mem.Size || Mem.Size == 8); 361 } 362 bool isMem16() const { 363 return Kind == Memory && (!Mem.Size || Mem.Size == 16); 364 } 365 bool isMem32() const { 366 return Kind == Memory && (!Mem.Size || Mem.Size == 32); 367 } 368 bool isMem64() const { 369 return Kind == Memory && (!Mem.Size || Mem.Size == 64); 370 } 371 bool isMem80() const { 372 return Kind == Memory && (!Mem.Size || Mem.Size == 80); 373 } 374 bool isMem128() const { 375 return Kind == Memory && (!Mem.Size || Mem.Size == 128); 376 } 377 bool isMem256() const { 378 return Kind == Memory && (!Mem.Size || Mem.Size == 256); 379 } 380 381 bool isMemVX32() const { 382 return Kind == Memory && (!Mem.Size || Mem.Size == 32) && 383 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15; 384 } 385 bool isMemVY32() const { 386 return Kind == Memory && (!Mem.Size || Mem.Size == 32) && 387 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15; 388 } 389 bool isMemVX64() const { 390 return Kind == Memory && (!Mem.Size || Mem.Size == 64) && 391 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15; 392 } 393 bool isMemVY64() const { 394 return Kind == Memory && (!Mem.Size || Mem.Size == 64) && 395 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15; 396 } 397 398 bool isAbsMem() const { 399 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() && 400 !getMemIndexReg() && getMemScale() == 1; 401 } 402 403 bool isReg() const { return Kind == Register; } 404 405 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 406 // Add as immediates when possible. 407 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 408 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 409 else 410 Inst.addOperand(MCOperand::CreateExpr(Expr)); 411 } 412 413 void addRegOperands(MCInst &Inst, unsigned N) const { 414 assert(N == 1 && "Invalid number of operands!"); 415 Inst.addOperand(MCOperand::CreateReg(getReg())); 416 } 417 418 void addImmOperands(MCInst &Inst, unsigned N) const { 419 assert(N == 1 && "Invalid number of operands!"); 420 addExpr(Inst, getImm()); 421 } 422 423 void addMem8Operands(MCInst &Inst, unsigned N) const { 424 addMemOperands(Inst, N); 425 } 426 void addMem16Operands(MCInst &Inst, unsigned N) const { 427 addMemOperands(Inst, N); 428 } 429 void addMem32Operands(MCInst &Inst, unsigned N) const { 430 addMemOperands(Inst, N); 431 } 432 void addMem64Operands(MCInst &Inst, unsigned N) const { 433 addMemOperands(Inst, N); 434 } 435 void addMem80Operands(MCInst &Inst, unsigned N) const { 436 addMemOperands(Inst, N); 437 } 438 void addMem128Operands(MCInst &Inst, unsigned N) const { 439 addMemOperands(Inst, N); 440 } 441 void addMem256Operands(MCInst &Inst, unsigned N) const { 442 addMemOperands(Inst, N); 443 } 444 void addMemVX32Operands(MCInst &Inst, unsigned N) const { 445 addMemOperands(Inst, N); 446 } 447 void addMemVY32Operands(MCInst &Inst, unsigned N) const { 448 addMemOperands(Inst, N); 449 } 450 void addMemVX64Operands(MCInst &Inst, unsigned N) const { 451 addMemOperands(Inst, N); 452 } 453 void addMemVY64Operands(MCInst &Inst, unsigned N) const { 454 addMemOperands(Inst, N); 455 } 456 457 void addMemOperands(MCInst &Inst, unsigned N) const { 458 assert((N == 5) && "Invalid number of operands!"); 459 Inst.addOperand(MCOperand::CreateReg(getMemBaseReg())); 460 Inst.addOperand(MCOperand::CreateImm(getMemScale())); 461 Inst.addOperand(MCOperand::CreateReg(getMemIndexReg())); 462 addExpr(Inst, getMemDisp()); 463 Inst.addOperand(MCOperand::CreateReg(getMemSegReg())); 464 } 465 466 void addAbsMemOperands(MCInst &Inst, unsigned N) const { 467 assert((N == 1) && "Invalid number of operands!"); 468 // Add as immediates when possible. 469 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp())) 470 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 471 else 472 Inst.addOperand(MCOperand::CreateExpr(getMemDisp())); 473 } 474 475 static X86Operand *CreateToken(StringRef Str, SMLoc Loc) { 476 SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size()); 477 X86Operand *Res = new X86Operand(Token, Loc, EndLoc); 478 Res->Tok.Data = Str.data(); 479 Res->Tok.Length = Str.size(); 480 return Res; 481 } 482 483 static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, 484 bool AddressOf = false, 485 SMLoc OffsetOfLoc = SMLoc()) { 486 X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc); 487 Res->Reg.RegNo = RegNo; 488 Res->AddressOf = AddressOf; 489 Res->OffsetOfLoc = OffsetOfLoc; 490 return Res; 491 } 492 493 static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc, 494 bool NeedRewrite = true){ 495 X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc); 496 Res->Imm.Val = Val; 497 Res->Imm.NeedAsmRewrite = NeedRewrite; 498 return Res; 499 } 500 501 /// Create an absolute memory operand. 502 static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, 503 unsigned Size = 0, bool NeedSizeDir = false) { 504 X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc); 505 Res->Mem.SegReg = 0; 506 Res->Mem.Disp = Disp; 507 Res->Mem.BaseReg = 0; 508 Res->Mem.IndexReg = 0; 509 Res->Mem.Scale = 1; 510 Res->Mem.Size = Size; 511 Res->Mem.NeedSizeDir = NeedSizeDir; 512 Res->AddressOf = false; 513 return Res; 514 } 515 516 /// Create a generalized memory operand. 517 static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp, 518 unsigned BaseReg, unsigned IndexReg, 519 unsigned Scale, SMLoc StartLoc, SMLoc EndLoc, 520 unsigned Size = 0, bool NeedSizeDir = false) { 521 // We should never just have a displacement, that should be parsed as an 522 // absolute memory operand. 523 assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!"); 524 525 // The scale should always be one of {1,2,4,8}. 526 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) && 527 "Invalid scale!"); 528 X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc); 529 Res->Mem.SegReg = SegReg; 530 Res->Mem.Disp = Disp; 531 Res->Mem.BaseReg = BaseReg; 532 Res->Mem.IndexReg = IndexReg; 533 Res->Mem.Scale = Scale; 534 Res->Mem.Size = Size; 535 Res->Mem.NeedSizeDir = NeedSizeDir; 536 Res->AddressOf = false; 537 return Res; 538 } 539 }; 540 541 } // end anonymous namespace. 542 543 bool X86AsmParser::isSrcOp(X86Operand &Op) { 544 unsigned basereg = is64BitMode() ? X86::RSI : X86::ESI; 545 546 return (Op.isMem() && 547 (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::DS) && 548 isa<MCConstantExpr>(Op.Mem.Disp) && 549 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 && 550 Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0); 551 } 552 553 bool X86AsmParser::isDstOp(X86Operand &Op) { 554 unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI; 555 556 return Op.isMem() && 557 (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) && 558 isa<MCConstantExpr>(Op.Mem.Disp) && 559 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 && 560 Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0; 561 } 562 563 bool X86AsmParser::ParseRegister(unsigned &RegNo, 564 SMLoc &StartLoc, SMLoc &EndLoc) { 565 RegNo = 0; 566 const AsmToken &PercentTok = Parser.getTok(); 567 StartLoc = PercentTok.getLoc(); 568 569 // If we encounter a %, ignore it. This code handles registers with and 570 // without the prefix, unprefixed registers can occur in cfi directives. 571 if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent)) 572 Parser.Lex(); // Eat percent token. 573 574 const AsmToken &Tok = Parser.getTok(); 575 EndLoc = Tok.getEndLoc(); 576 577 if (Tok.isNot(AsmToken::Identifier)) { 578 if (isParsingIntelSyntax()) return true; 579 return Error(StartLoc, "invalid register name", 580 SMRange(StartLoc, EndLoc)); 581 } 582 583 RegNo = MatchRegisterName(Tok.getString()); 584 585 // If the match failed, try the register name as lowercase. 586 if (RegNo == 0) 587 RegNo = MatchRegisterName(Tok.getString().lower()); 588 589 if (!is64BitMode()) { 590 // FIXME: This should be done using Requires<In32BitMode> and 591 // Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also 592 // checked. 593 // FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a 594 // REX prefix. 595 if (RegNo == X86::RIZ || 596 X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) || 597 X86II::isX86_64NonExtLowByteReg(RegNo) || 598 X86II::isX86_64ExtendedReg(RegNo)) 599 return Error(StartLoc, "register %" 600 + Tok.getString() + " is only available in 64-bit mode", 601 SMRange(StartLoc, EndLoc)); 602 } 603 604 // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens. 605 if (RegNo == 0 && (Tok.getString() == "st" || Tok.getString() == "ST")) { 606 RegNo = X86::ST0; 607 Parser.Lex(); // Eat 'st' 608 609 // Check to see if we have '(4)' after %st. 610 if (getLexer().isNot(AsmToken::LParen)) 611 return false; 612 // Lex the paren. 613 getParser().Lex(); 614 615 const AsmToken &IntTok = Parser.getTok(); 616 if (IntTok.isNot(AsmToken::Integer)) 617 return Error(IntTok.getLoc(), "expected stack index"); 618 switch (IntTok.getIntVal()) { 619 case 0: RegNo = X86::ST0; break; 620 case 1: RegNo = X86::ST1; break; 621 case 2: RegNo = X86::ST2; break; 622 case 3: RegNo = X86::ST3; break; 623 case 4: RegNo = X86::ST4; break; 624 case 5: RegNo = X86::ST5; break; 625 case 6: RegNo = X86::ST6; break; 626 case 7: RegNo = X86::ST7; break; 627 default: return Error(IntTok.getLoc(), "invalid stack index"); 628 } 629 630 if (getParser().Lex().isNot(AsmToken::RParen)) 631 return Error(Parser.getTok().getLoc(), "expected ')'"); 632 633 EndLoc = Parser.getTok().getEndLoc(); 634 Parser.Lex(); // Eat ')' 635 return false; 636 } 637 638 EndLoc = Parser.getTok().getEndLoc(); 639 640 // If this is "db[0-7]", match it as an alias 641 // for dr[0-7]. 642 if (RegNo == 0 && Tok.getString().size() == 3 && 643 Tok.getString().startswith("db")) { 644 switch (Tok.getString()[2]) { 645 case '0': RegNo = X86::DR0; break; 646 case '1': RegNo = X86::DR1; break; 647 case '2': RegNo = X86::DR2; break; 648 case '3': RegNo = X86::DR3; break; 649 case '4': RegNo = X86::DR4; break; 650 case '5': RegNo = X86::DR5; break; 651 case '6': RegNo = X86::DR6; break; 652 case '7': RegNo = X86::DR7; break; 653 } 654 655 if (RegNo != 0) { 656 EndLoc = Parser.getTok().getEndLoc(); 657 Parser.Lex(); // Eat it. 658 return false; 659 } 660 } 661 662 if (RegNo == 0) { 663 if (isParsingIntelSyntax()) return true; 664 return Error(StartLoc, "invalid register name", 665 SMRange(StartLoc, EndLoc)); 666 } 667 668 Parser.Lex(); // Eat identifier token. 669 return false; 670 } 671 672 X86Operand *X86AsmParser::ParseOperand() { 673 if (isParsingIntelSyntax()) 674 return ParseIntelOperand(); 675 return ParseATTOperand(); 676 } 677 678 /// getIntelMemOperandSize - Return intel memory operand size. 679 static unsigned getIntelMemOperandSize(StringRef OpStr) { 680 unsigned Size = StringSwitch<unsigned>(OpStr) 681 .Cases("BYTE", "byte", 8) 682 .Cases("WORD", "word", 16) 683 .Cases("DWORD", "dword", 32) 684 .Cases("QWORD", "qword", 64) 685 .Cases("XWORD", "xword", 80) 686 .Cases("XMMWORD", "xmmword", 128) 687 .Cases("YMMWORD", "ymmword", 256) 688 .Default(0); 689 return Size; 690 } 691 692 enum IntelBracExprState { 693 IBES_START, 694 IBES_LBRAC, 695 IBES_RBRAC, 696 IBES_REGISTER, 697 IBES_REGISTER_STAR, 698 IBES_REGISTER_STAR_INTEGER, 699 IBES_INTEGER, 700 IBES_INTEGER_STAR, 701 IBES_INDEX_REGISTER, 702 IBES_IDENTIFIER, 703 IBES_DISP_EXPR, 704 IBES_MINUS, 705 IBES_ERROR 706 }; 707 708 class IntelBracExprStateMachine { 709 IntelBracExprState State; 710 unsigned BaseReg, IndexReg, Scale; 711 int64_t Disp; 712 713 unsigned TmpReg; 714 int64_t TmpInteger; 715 716 bool isPlus; 717 718 public: 719 IntelBracExprStateMachine(MCAsmParser &parser) : 720 State(IBES_START), BaseReg(0), IndexReg(0), Scale(1), Disp(0), 721 TmpReg(0), TmpInteger(0), isPlus(true) {} 722 723 unsigned getBaseReg() { return BaseReg; } 724 unsigned getIndexReg() { return IndexReg; } 725 unsigned getScale() { return Scale; } 726 int64_t getDisp() { return Disp; } 727 bool isValidEndState() { return State == IBES_RBRAC; } 728 729 void onPlus() { 730 switch (State) { 731 default: 732 State = IBES_ERROR; 733 break; 734 case IBES_INTEGER: 735 State = IBES_START; 736 if (isPlus) 737 Disp += TmpInteger; 738 else 739 Disp -= TmpInteger; 740 break; 741 case IBES_REGISTER: 742 State = IBES_START; 743 // If we already have a BaseReg, then assume this is the IndexReg with a 744 // scale of 1. 745 if (!BaseReg) { 746 BaseReg = TmpReg; 747 } else { 748 assert (!IndexReg && "BaseReg/IndexReg already set!"); 749 IndexReg = TmpReg; 750 Scale = 1; 751 } 752 break; 753 case IBES_INDEX_REGISTER: 754 State = IBES_START; 755 break; 756 } 757 isPlus = true; 758 } 759 void onMinus() { 760 switch (State) { 761 default: 762 State = IBES_ERROR; 763 break; 764 case IBES_START: 765 State = IBES_MINUS; 766 break; 767 case IBES_INTEGER: 768 State = IBES_START; 769 if (isPlus) 770 Disp += TmpInteger; 771 else 772 Disp -= TmpInteger; 773 break; 774 case IBES_REGISTER: 775 State = IBES_START; 776 // If we already have a BaseReg, then assume this is the IndexReg with a 777 // scale of 1. 778 if (!BaseReg) { 779 BaseReg = TmpReg; 780 } else { 781 assert (!IndexReg && "BaseReg/IndexReg already set!"); 782 IndexReg = TmpReg; 783 Scale = 1; 784 } 785 break; 786 case IBES_INDEX_REGISTER: 787 State = IBES_START; 788 break; 789 } 790 isPlus = false; 791 } 792 void onRegister(unsigned Reg) { 793 switch (State) { 794 default: 795 State = IBES_ERROR; 796 break; 797 case IBES_START: 798 State = IBES_REGISTER; 799 TmpReg = Reg; 800 break; 801 case IBES_INTEGER_STAR: 802 assert (!IndexReg && "IndexReg already set!"); 803 State = IBES_INDEX_REGISTER; 804 IndexReg = Reg; 805 Scale = TmpInteger; 806 break; 807 } 808 } 809 void onDispExpr() { 810 switch (State) { 811 default: 812 State = IBES_ERROR; 813 break; 814 case IBES_START: 815 State = IBES_DISP_EXPR; 816 break; 817 } 818 } 819 void onInteger(int64_t TmpInt) { 820 switch (State) { 821 default: 822 State = IBES_ERROR; 823 break; 824 case IBES_START: 825 State = IBES_INTEGER; 826 TmpInteger = TmpInt; 827 break; 828 case IBES_MINUS: 829 State = IBES_INTEGER; 830 TmpInteger = TmpInt; 831 break; 832 case IBES_REGISTER_STAR: 833 assert (!IndexReg && "IndexReg already set!"); 834 State = IBES_INDEX_REGISTER; 835 IndexReg = TmpReg; 836 Scale = TmpInt; 837 break; 838 } 839 } 840 void onStar() { 841 switch (State) { 842 default: 843 State = IBES_ERROR; 844 break; 845 case IBES_INTEGER: 846 State = IBES_INTEGER_STAR; 847 break; 848 case IBES_REGISTER: 849 State = IBES_REGISTER_STAR; 850 break; 851 } 852 } 853 void onLBrac() { 854 switch (State) { 855 default: 856 State = IBES_ERROR; 857 break; 858 case IBES_RBRAC: 859 State = IBES_START; 860 isPlus = true; 861 break; 862 } 863 } 864 void onRBrac() { 865 switch (State) { 866 default: 867 State = IBES_ERROR; 868 break; 869 case IBES_DISP_EXPR: 870 State = IBES_RBRAC; 871 break; 872 case IBES_INTEGER: 873 State = IBES_RBRAC; 874 if (isPlus) 875 Disp += TmpInteger; 876 else 877 Disp -= TmpInteger; 878 break; 879 case IBES_REGISTER: 880 State = IBES_RBRAC; 881 // If we already have a BaseReg, then assume this is the IndexReg with a 882 // scale of 1. 883 if (!BaseReg) { 884 BaseReg = TmpReg; 885 } else { 886 assert (!IndexReg && "BaseReg/IndexReg already set!"); 887 IndexReg = TmpReg; 888 Scale = 1; 889 } 890 break; 891 case IBES_INDEX_REGISTER: 892 State = IBES_RBRAC; 893 break; 894 } 895 } 896 }; 897 898 X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, 899 unsigned Size) { 900 const AsmToken &Tok = Parser.getTok(); 901 SMLoc Start = Tok.getLoc(), End = Tok.getEndLoc(); 902 903 // Eat '[' 904 if (getLexer().isNot(AsmToken::LBrac)) 905 return ErrorOperand(Start, "Expected '[' token!"); 906 Parser.Lex(); 907 908 unsigned TmpReg = 0; 909 910 // Try to handle '[' 'symbol' ']' 911 if (getLexer().is(AsmToken::Identifier)) { 912 if (ParseRegister(TmpReg, Start, End)) { 913 const MCExpr *Disp; 914 if (getParser().parseExpression(Disp, End)) 915 return 0; 916 917 if (getLexer().isNot(AsmToken::RBrac)) 918 return ErrorOperand(Parser.getTok().getLoc(), "Expected ']' token!"); 919 // Adjust the EndLoc due to the ']'. 920 End = SMLoc::getFromPointer(Parser.getTok().getEndLoc().getPointer()-1); 921 Parser.Lex(); 922 return X86Operand::CreateMem(Disp, Start, End, Size); 923 } 924 } 925 926 // Parse [ BaseReg + Scale*IndexReg + Disp ]. 927 bool Done = false; 928 IntelBracExprStateMachine SM(Parser); 929 930 // If we parsed a register, then the end loc has already been set and 931 // the identifier has already been lexed. We also need to update the 932 // state. 933 if (TmpReg) 934 SM.onRegister(TmpReg); 935 936 const MCExpr *Disp = 0; 937 while (!Done) { 938 bool UpdateLocLex = true; 939 940 // The period in the dot operator (e.g., [ebx].foo.bar) is parsed as an 941 // identifier. Don't try an parse it as a register. 942 if (Tok.getString().startswith(".")) 943 break; 944 945 switch (getLexer().getKind()) { 946 default: { 947 if (SM.isValidEndState()) { 948 Done = true; 949 break; 950 } 951 return ErrorOperand(Tok.getLoc(), "Unexpected token!"); 952 } 953 case AsmToken::Identifier: { 954 // This could be a register or a displacement expression. 955 if(!ParseRegister(TmpReg, Start, End)) { 956 SM.onRegister(TmpReg); 957 UpdateLocLex = false; 958 break; 959 } else if (!getParser().parseExpression(Disp, End)) { 960 SM.onDispExpr(); 961 UpdateLocLex = false; 962 break; 963 } 964 return ErrorOperand(Tok.getLoc(), "Unexpected identifier!"); 965 } 966 case AsmToken::Integer: { 967 int64_t Val = Tok.getIntVal(); 968 SM.onInteger(Val); 969 break; 970 } 971 case AsmToken::Plus: SM.onPlus(); break; 972 case AsmToken::Minus: SM.onMinus(); break; 973 case AsmToken::Star: SM.onStar(); break; 974 case AsmToken::LBrac: SM.onLBrac(); break; 975 case AsmToken::RBrac: SM.onRBrac(); break; 976 } 977 if (!Done && UpdateLocLex) { 978 End = Tok.getLoc(); 979 Parser.Lex(); // Consume the token. 980 } 981 } 982 983 if (!Disp) 984 Disp = MCConstantExpr::Create(SM.getDisp(), getContext()); 985 986 // Parse the dot operator (e.g., [ebx].foo.bar). 987 if (Tok.getString().startswith(".")) { 988 SmallString<64> Err; 989 const MCExpr *NewDisp; 990 if (ParseIntelDotOperator(Disp, &NewDisp, Err)) 991 return ErrorOperand(Tok.getLoc(), Err); 992 993 End = Parser.getTok().getEndLoc(); 994 Parser.Lex(); // Eat the field. 995 Disp = NewDisp; 996 } 997 998 int BaseReg = SM.getBaseReg(); 999 int IndexReg = SM.getIndexReg(); 1000 1001 // handle [-42] 1002 if (!BaseReg && !IndexReg) { 1003 if (!SegReg) 1004 return X86Operand::CreateMem(Disp, Start, End); 1005 else 1006 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size); 1007 } 1008 1009 int Scale = SM.getScale(); 1010 return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, 1011 Start, End, Size); 1012 } 1013 1014 /// ParseIntelMemOperand - Parse intel style memory operand. 1015 X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, SMLoc Start) { 1016 const AsmToken &Tok = Parser.getTok(); 1017 SMLoc End; 1018 1019 unsigned Size = getIntelMemOperandSize(Tok.getString()); 1020 if (Size) { 1021 Parser.Lex(); 1022 assert ((Tok.getString() == "PTR" || Tok.getString() == "ptr") && 1023 "Unexpected token!"); 1024 Parser.Lex(); 1025 } 1026 1027 if (getLexer().is(AsmToken::LBrac)) 1028 return ParseIntelBracExpression(SegReg, Size); 1029 1030 if (!ParseRegister(SegReg, Start, End)) { 1031 // Handel SegReg : [ ... ] 1032 if (getLexer().isNot(AsmToken::Colon)) 1033 return ErrorOperand(Start, "Expected ':' token!"); 1034 Parser.Lex(); // Eat : 1035 if (getLexer().isNot(AsmToken::LBrac)) 1036 return ErrorOperand(Start, "Expected '[' token!"); 1037 return ParseIntelBracExpression(SegReg, Size); 1038 } 1039 1040 const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext()); 1041 if (getParser().parseExpression(Disp, End)) 1042 return 0; 1043 1044 bool NeedSizeDir = false; 1045 bool IsVarDecl = false; 1046 if (isParsingInlineAsm()) { 1047 if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Disp)) { 1048 const MCSymbol &Sym = SymRef->getSymbol(); 1049 // FIXME: The SemaLookup will fail if the name is anything other then an 1050 // identifier. 1051 // FIXME: Pass a valid SMLoc. 1052 unsigned tLength, tSize, tType; 1053 SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, tLength, 1054 tSize, tType, IsVarDecl); 1055 if (!Size) 1056 Size = tType * 8; // Size is in terms of bits in this context. 1057 NeedSizeDir = Size > 0; 1058 } 1059 } 1060 if (!isParsingInlineAsm()) 1061 return X86Operand::CreateMem(Disp, Start, End, Size); 1062 else { 1063 // If this is not a VarDecl then assume it is a FuncDecl or some other label 1064 // reference. We need an 'r' constraint here, so we need to create register 1065 // operand to ensure proper matching. Just pick a GPR based on the size of 1066 // a pointer. 1067 if (!IsVarDecl) { 1068 unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; 1069 return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true); 1070 } 1071 1072 // When parsing inline assembly we set the base register to a non-zero value 1073 // as we don't know the actual value at this time. This is necessary to 1074 // get the matching correct in some cases. 1075 return X86Operand::CreateMem(/*SegReg*/0, Disp, /*BaseReg*/1, /*IndexReg*/0, 1076 /*Scale*/1, Start, End, Size, NeedSizeDir); 1077 } 1078 } 1079 1080 /// Parse the '.' operator. 1081 bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, 1082 const MCExpr **NewDisp, 1083 SmallString<64> &Err) { 1084 AsmToken Tok = *&Parser.getTok(); 1085 uint64_t OrigDispVal, DotDispVal; 1086 1087 // FIXME: Handle non-constant expressions. 1088 if (const MCConstantExpr *OrigDisp = dyn_cast<MCConstantExpr>(Disp)) { 1089 OrigDispVal = OrigDisp->getValue(); 1090 } else { 1091 Err = "Non-constant offsets are not supported!"; 1092 return true; 1093 } 1094 1095 // Drop the '.'. 1096 StringRef DotDispStr = Tok.getString().drop_front(1); 1097 1098 // .Imm gets lexed as a real. 1099 if (Tok.is(AsmToken::Real)) { 1100 APInt DotDisp; 1101 DotDispStr.getAsInteger(10, DotDisp); 1102 DotDispVal = DotDisp.getZExtValue(); 1103 } else if (Tok.is(AsmToken::Identifier)) { 1104 // We should only see an identifier when parsing the original inline asm. 1105 // The front-end should rewrite this in terms of immediates. 1106 assert (isParsingInlineAsm() && "Unexpected field name!"); 1107 1108 unsigned DotDisp; 1109 std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.'); 1110 if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second, 1111 DotDisp)) { 1112 Err = "Unable to lookup field reference!"; 1113 return true; 1114 } 1115 DotDispVal = DotDisp; 1116 } else { 1117 Err = "Unexpected token type!"; 1118 return true; 1119 } 1120 1121 if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) { 1122 SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data()); 1123 unsigned Len = DotDispStr.size(); 1124 unsigned Val = OrigDispVal + DotDispVal; 1125 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_DotOperator, Loc, Len, 1126 Val)); 1127 } 1128 1129 *NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext()); 1130 return false; 1131 } 1132 1133 /// Parse the 'offset' operator. This operator is used to specify the 1134 /// location rather then the content of a variable. 1135 X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) { 1136 SMLoc OffsetOfLoc = Start; 1137 Parser.Lex(); // Eat offset. 1138 Start = Parser.getTok().getLoc(); 1139 assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier"); 1140 1141 SMLoc End; 1142 const MCExpr *Val; 1143 if (getParser().parseExpression(Val, End)) 1144 return ErrorOperand(Start, "Unable to parse expression!"); 1145 1146 // Don't emit the offset operator. 1147 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7)); 1148 1149 // The offset operator will have an 'r' constraint, thus we need to create 1150 // register operand to ensure proper matching. Just pick a GPR based on 1151 // the size of a pointer. 1152 unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; 1153 return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true, 1154 OffsetOfLoc); 1155 } 1156 1157 enum IntelOperatorKind { 1158 IOK_LENGTH, 1159 IOK_SIZE, 1160 IOK_TYPE 1161 }; 1162 1163 /// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator 1164 /// returns the number of elements in an array. It returns the value 1 for 1165 /// non-array variables. The SIZE operator returns the size of a C or C++ 1166 /// variable. A variable's size is the product of its LENGTH and TYPE. The 1167 /// TYPE operator returns the size of a C or C++ type or variable. If the 1168 /// variable is an array, TYPE returns the size of a single element. 1169 X86Operand *X86AsmParser::ParseIntelOperator(SMLoc Start, unsigned OpKind) { 1170 SMLoc TypeLoc = Start; 1171 Parser.Lex(); // Eat offset. 1172 Start = Parser.getTok().getLoc(); 1173 assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier"); 1174 1175 SMLoc End; 1176 const MCExpr *Val; 1177 if (getParser().parseExpression(Val, End)) 1178 return 0; 1179 1180 unsigned Length = 0, Size = 0, Type = 0; 1181 if (const MCSymbolRefExpr *SymRef = dyn_cast<MCSymbolRefExpr>(Val)) { 1182 const MCSymbol &Sym = SymRef->getSymbol(); 1183 // FIXME: The SemaLookup will fail if the name is anything other then an 1184 // identifier. 1185 // FIXME: Pass a valid SMLoc. 1186 bool IsVarDecl; 1187 if (!SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Length, 1188 Size, Type, IsVarDecl)) 1189 return ErrorOperand(Start, "Unable to lookup expr!"); 1190 } 1191 unsigned CVal; 1192 switch(OpKind) { 1193 default: llvm_unreachable("Unexpected operand kind!"); 1194 case IOK_LENGTH: CVal = Length; break; 1195 case IOK_SIZE: CVal = Size; break; 1196 case IOK_TYPE: CVal = Type; break; 1197 } 1198 1199 // Rewrite the type operator and the C or C++ type or variable in terms of an 1200 // immediate. E.g. TYPE foo -> $$4 1201 unsigned Len = End.getPointer() - TypeLoc.getPointer(); 1202 InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, CVal)); 1203 1204 const MCExpr *Imm = MCConstantExpr::Create(CVal, getContext()); 1205 return X86Operand::CreateImm(Imm, Start, End, /*NeedAsmRewrite*/false); 1206 } 1207 1208 X86Operand *X86AsmParser::ParseIntelOperand() { 1209 SMLoc Start = Parser.getTok().getLoc(), End; 1210 StringRef AsmTokStr = Parser.getTok().getString(); 1211 1212 // Offset, length, type and size operators. 1213 if (isParsingInlineAsm()) { 1214 if (AsmTokStr == "offset" || AsmTokStr == "OFFSET") 1215 return ParseIntelOffsetOfOperator(Start); 1216 if (AsmTokStr == "length" || AsmTokStr == "LENGTH") 1217 return ParseIntelOperator(Start, IOK_LENGTH); 1218 if (AsmTokStr == "size" || AsmTokStr == "SIZE") 1219 return ParseIntelOperator(Start, IOK_SIZE); 1220 if (AsmTokStr == "type" || AsmTokStr == "TYPE") 1221 return ParseIntelOperator(Start, IOK_TYPE); 1222 } 1223 1224 // Immediate. 1225 if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) || 1226 getLexer().is(AsmToken::Minus)) { 1227 const MCExpr *Val; 1228 if (!getParser().parseExpression(Val, End)) { 1229 return X86Operand::CreateImm(Val, Start, End); 1230 } 1231 } 1232 1233 // Register. 1234 unsigned RegNo = 0; 1235 if (!ParseRegister(RegNo, Start, End)) { 1236 // If this is a segment register followed by a ':', then this is the start 1237 // of a memory reference, otherwise this is a normal register reference. 1238 if (getLexer().isNot(AsmToken::Colon)) 1239 return X86Operand::CreateReg(RegNo, Start, End); 1240 1241 getParser().Lex(); // Eat the colon. 1242 return ParseIntelMemOperand(RegNo, Start); 1243 } 1244 1245 // Memory operand. 1246 return ParseIntelMemOperand(0, Start); 1247 } 1248 1249 X86Operand *X86AsmParser::ParseATTOperand() { 1250 switch (getLexer().getKind()) { 1251 default: 1252 // Parse a memory operand with no segment register. 1253 return ParseMemOperand(0, Parser.getTok().getLoc()); 1254 case AsmToken::Percent: { 1255 // Read the register. 1256 unsigned RegNo; 1257 SMLoc Start, End; 1258 if (ParseRegister(RegNo, Start, End)) return 0; 1259 if (RegNo == X86::EIZ || RegNo == X86::RIZ) { 1260 Error(Start, "%eiz and %riz can only be used as index registers", 1261 SMRange(Start, End)); 1262 return 0; 1263 } 1264 1265 // If this is a segment register followed by a ':', then this is the start 1266 // of a memory reference, otherwise this is a normal register reference. 1267 if (getLexer().isNot(AsmToken::Colon)) 1268 return X86Operand::CreateReg(RegNo, Start, End); 1269 1270 1271 getParser().Lex(); // Eat the colon. 1272 return ParseMemOperand(RegNo, Start); 1273 } 1274 case AsmToken::Dollar: { 1275 // $42 -> immediate. 1276 SMLoc Start = Parser.getTok().getLoc(), End; 1277 Parser.Lex(); 1278 const MCExpr *Val; 1279 if (getParser().parseExpression(Val, End)) 1280 return 0; 1281 return X86Operand::CreateImm(Val, Start, End); 1282 } 1283 } 1284 } 1285 1286 /// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix 1287 /// has already been parsed if present. 1288 X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { 1289 1290 // We have to disambiguate a parenthesized expression "(4+5)" from the start 1291 // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The 1292 // only way to do this without lookahead is to eat the '(' and see what is 1293 // after it. 1294 const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext()); 1295 if (getLexer().isNot(AsmToken::LParen)) { 1296 SMLoc ExprEnd; 1297 if (getParser().parseExpression(Disp, ExprEnd)) return 0; 1298 1299 // After parsing the base expression we could either have a parenthesized 1300 // memory address or not. If not, return now. If so, eat the (. 1301 if (getLexer().isNot(AsmToken::LParen)) { 1302 // Unless we have a segment register, treat this as an immediate. 1303 if (SegReg == 0) 1304 return X86Operand::CreateMem(Disp, MemStart, ExprEnd); 1305 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd); 1306 } 1307 1308 // Eat the '('. 1309 Parser.Lex(); 1310 } else { 1311 // Okay, we have a '('. We don't know if this is an expression or not, but 1312 // so we have to eat the ( to see beyond it. 1313 SMLoc LParenLoc = Parser.getTok().getLoc(); 1314 Parser.Lex(); // Eat the '('. 1315 1316 if (getLexer().is(AsmToken::Percent) || getLexer().is(AsmToken::Comma)) { 1317 // Nothing to do here, fall into the code below with the '(' part of the 1318 // memory operand consumed. 1319 } else { 1320 SMLoc ExprEnd; 1321 1322 // It must be an parenthesized expression, parse it now. 1323 if (getParser().parseParenExpression(Disp, ExprEnd)) 1324 return 0; 1325 1326 // After parsing the base expression we could either have a parenthesized 1327 // memory address or not. If not, return now. If so, eat the (. 1328 if (getLexer().isNot(AsmToken::LParen)) { 1329 // Unless we have a segment register, treat this as an immediate. 1330 if (SegReg == 0) 1331 return X86Operand::CreateMem(Disp, LParenLoc, ExprEnd); 1332 return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd); 1333 } 1334 1335 // Eat the '('. 1336 Parser.Lex(); 1337 } 1338 } 1339 1340 // If we reached here, then we just ate the ( of the memory operand. Process 1341 // the rest of the memory operand. 1342 unsigned BaseReg = 0, IndexReg = 0, Scale = 1; 1343 SMLoc IndexLoc; 1344 1345 if (getLexer().is(AsmToken::Percent)) { 1346 SMLoc StartLoc, EndLoc; 1347 if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0; 1348 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) { 1349 Error(StartLoc, "eiz and riz can only be used as index registers", 1350 SMRange(StartLoc, EndLoc)); 1351 return 0; 1352 } 1353 } 1354 1355 if (getLexer().is(AsmToken::Comma)) { 1356 Parser.Lex(); // Eat the comma. 1357 IndexLoc = Parser.getTok().getLoc(); 1358 1359 // Following the comma we should have either an index register, or a scale 1360 // value. We don't support the later form, but we want to parse it 1361 // correctly. 1362 // 1363 // Not that even though it would be completely consistent to support syntax 1364 // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this. 1365 if (getLexer().is(AsmToken::Percent)) { 1366 SMLoc L; 1367 if (ParseRegister(IndexReg, L, L)) return 0; 1368 1369 if (getLexer().isNot(AsmToken::RParen)) { 1370 // Parse the scale amount: 1371 // ::= ',' [scale-expression] 1372 if (getLexer().isNot(AsmToken::Comma)) { 1373 Error(Parser.getTok().getLoc(), 1374 "expected comma in scale expression"); 1375 return 0; 1376 } 1377 Parser.Lex(); // Eat the comma. 1378 1379 if (getLexer().isNot(AsmToken::RParen)) { 1380 SMLoc Loc = Parser.getTok().getLoc(); 1381 1382 int64_t ScaleVal; 1383 if (getParser().parseAbsoluteExpression(ScaleVal)){ 1384 Error(Loc, "expected scale expression"); 1385 return 0; 1386 } 1387 1388 // Validate the scale amount. 1389 if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){ 1390 Error(Loc, "scale factor in address must be 1, 2, 4 or 8"); 1391 return 0; 1392 } 1393 Scale = (unsigned)ScaleVal; 1394 } 1395 } 1396 } else if (getLexer().isNot(AsmToken::RParen)) { 1397 // A scale amount without an index is ignored. 1398 // index. 1399 SMLoc Loc = Parser.getTok().getLoc(); 1400 1401 int64_t Value; 1402 if (getParser().parseAbsoluteExpression(Value)) 1403 return 0; 1404 1405 if (Value != 1) 1406 Warning(Loc, "scale factor without index register is ignored"); 1407 Scale = 1; 1408 } 1409 } 1410 1411 // Ok, we've eaten the memory operand, verify we have a ')' and eat it too. 1412 if (getLexer().isNot(AsmToken::RParen)) { 1413 Error(Parser.getTok().getLoc(), "unexpected token in memory operand"); 1414 return 0; 1415 } 1416 SMLoc MemEnd = Parser.getTok().getEndLoc(); 1417 Parser.Lex(); // Eat the ')'. 1418 1419 // If we have both a base register and an index register make sure they are 1420 // both 64-bit or 32-bit registers. 1421 // To support VSIB, IndexReg can be 128-bit or 256-bit registers. 1422 if (BaseReg != 0 && IndexReg != 0) { 1423 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) && 1424 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) || 1425 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) && 1426 IndexReg != X86::RIZ) { 1427 Error(IndexLoc, "index register is 32-bit, but base register is 64-bit"); 1428 return 0; 1429 } 1430 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) && 1431 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) || 1432 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) && 1433 IndexReg != X86::EIZ){ 1434 Error(IndexLoc, "index register is 64-bit, but base register is 32-bit"); 1435 return 0; 1436 } 1437 } 1438 1439 return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, 1440 MemStart, MemEnd); 1441 } 1442 1443 bool X86AsmParser:: 1444 ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, 1445 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1446 InstInfo = &Info; 1447 StringRef PatchedName = Name; 1448 1449 // FIXME: Hack to recognize setneb as setne. 1450 if (PatchedName.startswith("set") && PatchedName.endswith("b") && 1451 PatchedName != "setb" && PatchedName != "setnb") 1452 PatchedName = PatchedName.substr(0, Name.size()-1); 1453 1454 // FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}. 1455 const MCExpr *ExtraImmOp = 0; 1456 if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) && 1457 (PatchedName.endswith("ss") || PatchedName.endswith("sd") || 1458 PatchedName.endswith("ps") || PatchedName.endswith("pd"))) { 1459 bool IsVCMP = PatchedName[0] == 'v'; 1460 unsigned SSECCIdx = IsVCMP ? 4 : 3; 1461 unsigned SSEComparisonCode = StringSwitch<unsigned>( 1462 PatchedName.slice(SSECCIdx, PatchedName.size() - 2)) 1463 .Case("eq", 0x00) 1464 .Case("lt", 0x01) 1465 .Case("le", 0x02) 1466 .Case("unord", 0x03) 1467 .Case("neq", 0x04) 1468 .Case("nlt", 0x05) 1469 .Case("nle", 0x06) 1470 .Case("ord", 0x07) 1471 /* AVX only from here */ 1472 .Case("eq_uq", 0x08) 1473 .Case("nge", 0x09) 1474 .Case("ngt", 0x0A) 1475 .Case("false", 0x0B) 1476 .Case("neq_oq", 0x0C) 1477 .Case("ge", 0x0D) 1478 .Case("gt", 0x0E) 1479 .Case("true", 0x0F) 1480 .Case("eq_os", 0x10) 1481 .Case("lt_oq", 0x11) 1482 .Case("le_oq", 0x12) 1483 .Case("unord_s", 0x13) 1484 .Case("neq_us", 0x14) 1485 .Case("nlt_uq", 0x15) 1486 .Case("nle_uq", 0x16) 1487 .Case("ord_s", 0x17) 1488 .Case("eq_us", 0x18) 1489 .Case("nge_uq", 0x19) 1490 .Case("ngt_uq", 0x1A) 1491 .Case("false_os", 0x1B) 1492 .Case("neq_os", 0x1C) 1493 .Case("ge_oq", 0x1D) 1494 .Case("gt_oq", 0x1E) 1495 .Case("true_us", 0x1F) 1496 .Default(~0U); 1497 if (SSEComparisonCode != ~0U && (IsVCMP || SSEComparisonCode < 8)) { 1498 ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode, 1499 getParser().getContext()); 1500 if (PatchedName.endswith("ss")) { 1501 PatchedName = IsVCMP ? "vcmpss" : "cmpss"; 1502 } else if (PatchedName.endswith("sd")) { 1503 PatchedName = IsVCMP ? "vcmpsd" : "cmpsd"; 1504 } else if (PatchedName.endswith("ps")) { 1505 PatchedName = IsVCMP ? "vcmpps" : "cmpps"; 1506 } else { 1507 assert(PatchedName.endswith("pd") && "Unexpected mnemonic!"); 1508 PatchedName = IsVCMP ? "vcmppd" : "cmppd"; 1509 } 1510 } 1511 } 1512 1513 Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc)); 1514 1515 if (ExtraImmOp && !isParsingIntelSyntax()) 1516 Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc)); 1517 1518 // Determine whether this is an instruction prefix. 1519 bool isPrefix = 1520 Name == "lock" || Name == "rep" || 1521 Name == "repe" || Name == "repz" || 1522 Name == "repne" || Name == "repnz" || 1523 Name == "rex64" || Name == "data16"; 1524 1525 1526 // This does the actual operand parsing. Don't parse any more if we have a 1527 // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we 1528 // just want to parse the "lock" as the first instruction and the "incl" as 1529 // the next one. 1530 if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) { 1531 1532 // Parse '*' modifier. 1533 if (getLexer().is(AsmToken::Star)) { 1534 SMLoc Loc = Parser.getTok().getLoc(); 1535 Operands.push_back(X86Operand::CreateToken("*", Loc)); 1536 Parser.Lex(); // Eat the star. 1537 } 1538 1539 // Read the first operand. 1540 if (X86Operand *Op = ParseOperand()) 1541 Operands.push_back(Op); 1542 else { 1543 Parser.eatToEndOfStatement(); 1544 return true; 1545 } 1546 1547 while (getLexer().is(AsmToken::Comma)) { 1548 Parser.Lex(); // Eat the comma. 1549 1550 // Parse and remember the operand. 1551 if (X86Operand *Op = ParseOperand()) 1552 Operands.push_back(Op); 1553 else { 1554 Parser.eatToEndOfStatement(); 1555 return true; 1556 } 1557 } 1558 1559 if (getLexer().isNot(AsmToken::EndOfStatement)) { 1560 SMLoc Loc = getLexer().getLoc(); 1561 Parser.eatToEndOfStatement(); 1562 return Error(Loc, "unexpected token in argument list"); 1563 } 1564 } 1565 1566 if (getLexer().is(AsmToken::EndOfStatement)) 1567 Parser.Lex(); // Consume the EndOfStatement 1568 else if (isPrefix && getLexer().is(AsmToken::Slash)) 1569 Parser.Lex(); // Consume the prefix separator Slash 1570 1571 if (ExtraImmOp && isParsingIntelSyntax()) 1572 Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc)); 1573 1574 // This is a terrible hack to handle "out[bwl]? %al, (%dx)" -> 1575 // "outb %al, %dx". Out doesn't take a memory form, but this is a widely 1576 // documented form in various unofficial manuals, so a lot of code uses it. 1577 if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") && 1578 Operands.size() == 3) { 1579 X86Operand &Op = *(X86Operand*)Operands.back(); 1580 if (Op.isMem() && Op.Mem.SegReg == 0 && 1581 isa<MCConstantExpr>(Op.Mem.Disp) && 1582 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 && 1583 Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) { 1584 SMLoc Loc = Op.getEndLoc(); 1585 Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc); 1586 delete &Op; 1587 } 1588 } 1589 // Same hack for "in[bwl]? (%dx), %al" -> "inb %dx, %al". 1590 if ((Name == "inb" || Name == "inw" || Name == "inl" || Name == "in") && 1591 Operands.size() == 3) { 1592 X86Operand &Op = *(X86Operand*)Operands.begin()[1]; 1593 if (Op.isMem() && Op.Mem.SegReg == 0 && 1594 isa<MCConstantExpr>(Op.Mem.Disp) && 1595 cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 && 1596 Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) { 1597 SMLoc Loc = Op.getEndLoc(); 1598 Operands.begin()[1] = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc); 1599 delete &Op; 1600 } 1601 } 1602 // Transform "ins[bwl] %dx, %es:(%edi)" into "ins[bwl]" 1603 if (Name.startswith("ins") && Operands.size() == 3 && 1604 (Name == "insb" || Name == "insw" || Name == "insl")) { 1605 X86Operand &Op = *(X86Operand*)Operands.begin()[1]; 1606 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2]; 1607 if (Op.isReg() && Op.getReg() == X86::DX && isDstOp(Op2)) { 1608 Operands.pop_back(); 1609 Operands.pop_back(); 1610 delete &Op; 1611 delete &Op2; 1612 } 1613 } 1614 1615 // Transform "outs[bwl] %ds:(%esi), %dx" into "out[bwl]" 1616 if (Name.startswith("outs") && Operands.size() == 3 && 1617 (Name == "outsb" || Name == "outsw" || Name == "outsl")) { 1618 X86Operand &Op = *(X86Operand*)Operands.begin()[1]; 1619 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2]; 1620 if (isSrcOp(Op) && Op2.isReg() && Op2.getReg() == X86::DX) { 1621 Operands.pop_back(); 1622 Operands.pop_back(); 1623 delete &Op; 1624 delete &Op2; 1625 } 1626 } 1627 1628 // Transform "movs[bwl] %ds:(%esi), %es:(%edi)" into "movs[bwl]" 1629 if (Name.startswith("movs") && Operands.size() == 3 && 1630 (Name == "movsb" || Name == "movsw" || Name == "movsl" || 1631 (is64BitMode() && Name == "movsq"))) { 1632 X86Operand &Op = *(X86Operand*)Operands.begin()[1]; 1633 X86Operand &Op2 = *(X86Operand*)Operands.begin()[2]; 1634 if (isSrcOp(Op) && isDstOp(Op2)) { 1635 Operands.pop_back(); 1636 Operands.pop_back(); 1637 delete &Op; 1638 delete &Op2; 1639 } 1640 } 1641 // Transform "lods[bwl] %ds:(%esi),{%al,%ax,%eax,%rax}" into "lods[bwl]" 1642 if (Name.startswith("lods") && Operands.size() == 3 && 1643 (Name == "lods" || Name == "lodsb" || Name == "lodsw" || 1644 Name == "lodsl" || (is64BitMode() && Name == "lodsq"))) { 1645 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]); 1646 X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]); 1647 if (isSrcOp(*Op1) && Op2->isReg()) { 1648 const char *ins; 1649 unsigned reg = Op2->getReg(); 1650 bool isLods = Name == "lods"; 1651 if (reg == X86::AL && (isLods || Name == "lodsb")) 1652 ins = "lodsb"; 1653 else if (reg == X86::AX && (isLods || Name == "lodsw")) 1654 ins = "lodsw"; 1655 else if (reg == X86::EAX && (isLods || Name == "lodsl")) 1656 ins = "lodsl"; 1657 else if (reg == X86::RAX && (isLods || Name == "lodsq")) 1658 ins = "lodsq"; 1659 else 1660 ins = NULL; 1661 if (ins != NULL) { 1662 Operands.pop_back(); 1663 Operands.pop_back(); 1664 delete Op1; 1665 delete Op2; 1666 if (Name != ins) 1667 static_cast<X86Operand*>(Operands[0])->setTokenValue(ins); 1668 } 1669 } 1670 } 1671 // Transform "stos[bwl] {%al,%ax,%eax,%rax},%es:(%edi)" into "stos[bwl]" 1672 if (Name.startswith("stos") && Operands.size() == 3 && 1673 (Name == "stos" || Name == "stosb" || Name == "stosw" || 1674 Name == "stosl" || (is64BitMode() && Name == "stosq"))) { 1675 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]); 1676 X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]); 1677 if (isDstOp(*Op2) && Op1->isReg()) { 1678 const char *ins; 1679 unsigned reg = Op1->getReg(); 1680 bool isStos = Name == "stos"; 1681 if (reg == X86::AL && (isStos || Name == "stosb")) 1682 ins = "stosb"; 1683 else if (reg == X86::AX && (isStos || Name == "stosw")) 1684 ins = "stosw"; 1685 else if (reg == X86::EAX && (isStos || Name == "stosl")) 1686 ins = "stosl"; 1687 else if (reg == X86::RAX && (isStos || Name == "stosq")) 1688 ins = "stosq"; 1689 else 1690 ins = NULL; 1691 if (ins != NULL) { 1692 Operands.pop_back(); 1693 Operands.pop_back(); 1694 delete Op1; 1695 delete Op2; 1696 if (Name != ins) 1697 static_cast<X86Operand*>(Operands[0])->setTokenValue(ins); 1698 } 1699 } 1700 } 1701 1702 // FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to 1703 // "shift <op>". 1704 if ((Name.startswith("shr") || Name.startswith("sar") || 1705 Name.startswith("shl") || Name.startswith("sal") || 1706 Name.startswith("rcl") || Name.startswith("rcr") || 1707 Name.startswith("rol") || Name.startswith("ror")) && 1708 Operands.size() == 3) { 1709 if (isParsingIntelSyntax()) { 1710 // Intel syntax 1711 X86Operand *Op1 = static_cast<X86Operand*>(Operands[2]); 1712 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) && 1713 cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) { 1714 delete Operands[2]; 1715 Operands.pop_back(); 1716 } 1717 } else { 1718 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]); 1719 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) && 1720 cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) { 1721 delete Operands[1]; 1722 Operands.erase(Operands.begin() + 1); 1723 } 1724 } 1725 } 1726 1727 // Transforms "int $3" into "int3" as a size optimization. We can't write an 1728 // instalias with an immediate operand yet. 1729 if (Name == "int" && Operands.size() == 2) { 1730 X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]); 1731 if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) && 1732 cast<MCConstantExpr>(Op1->getImm())->getValue() == 3) { 1733 delete Operands[1]; 1734 Operands.erase(Operands.begin() + 1); 1735 static_cast<X86Operand*>(Operands[0])->setTokenValue("int3"); 1736 } 1737 } 1738 1739 return false; 1740 } 1741 1742 static bool convertToSExti8(MCInst &Inst, unsigned Opcode, unsigned Reg, 1743 bool isCmp) { 1744 MCInst TmpInst; 1745 TmpInst.setOpcode(Opcode); 1746 if (!isCmp) 1747 TmpInst.addOperand(MCOperand::CreateReg(Reg)); 1748 TmpInst.addOperand(MCOperand::CreateReg(Reg)); 1749 TmpInst.addOperand(Inst.getOperand(0)); 1750 Inst = TmpInst; 1751 return true; 1752 } 1753 1754 static bool convert16i16to16ri8(MCInst &Inst, unsigned Opcode, 1755 bool isCmp = false) { 1756 if (!Inst.getOperand(0).isImm() || 1757 !isImmSExti16i8Value(Inst.getOperand(0).getImm())) 1758 return false; 1759 1760 return convertToSExti8(Inst, Opcode, X86::AX, isCmp); 1761 } 1762 1763 static bool convert32i32to32ri8(MCInst &Inst, unsigned Opcode, 1764 bool isCmp = false) { 1765 if (!Inst.getOperand(0).isImm() || 1766 !isImmSExti32i8Value(Inst.getOperand(0).getImm())) 1767 return false; 1768 1769 return convertToSExti8(Inst, Opcode, X86::EAX, isCmp); 1770 } 1771 1772 static bool convert64i32to64ri8(MCInst &Inst, unsigned Opcode, 1773 bool isCmp = false) { 1774 if (!Inst.getOperand(0).isImm() || 1775 !isImmSExti64i8Value(Inst.getOperand(0).getImm())) 1776 return false; 1777 1778 return convertToSExti8(Inst, Opcode, X86::RAX, isCmp); 1779 } 1780 1781 bool X86AsmParser:: 1782 processInstruction(MCInst &Inst, 1783 const SmallVectorImpl<MCParsedAsmOperand*> &Ops) { 1784 switch (Inst.getOpcode()) { 1785 default: return false; 1786 case X86::AND16i16: return convert16i16to16ri8(Inst, X86::AND16ri8); 1787 case X86::AND32i32: return convert32i32to32ri8(Inst, X86::AND32ri8); 1788 case X86::AND64i32: return convert64i32to64ri8(Inst, X86::AND64ri8); 1789 case X86::XOR16i16: return convert16i16to16ri8(Inst, X86::XOR16ri8); 1790 case X86::XOR32i32: return convert32i32to32ri8(Inst, X86::XOR32ri8); 1791 case X86::XOR64i32: return convert64i32to64ri8(Inst, X86::XOR64ri8); 1792 case X86::OR16i16: return convert16i16to16ri8(Inst, X86::OR16ri8); 1793 case X86::OR32i32: return convert32i32to32ri8(Inst, X86::OR32ri8); 1794 case X86::OR64i32: return convert64i32to64ri8(Inst, X86::OR64ri8); 1795 case X86::CMP16i16: return convert16i16to16ri8(Inst, X86::CMP16ri8, true); 1796 case X86::CMP32i32: return convert32i32to32ri8(Inst, X86::CMP32ri8, true); 1797 case X86::CMP64i32: return convert64i32to64ri8(Inst, X86::CMP64ri8, true); 1798 case X86::ADD16i16: return convert16i16to16ri8(Inst, X86::ADD16ri8); 1799 case X86::ADD32i32: return convert32i32to32ri8(Inst, X86::ADD32ri8); 1800 case X86::ADD64i32: return convert64i32to64ri8(Inst, X86::ADD64ri8); 1801 case X86::SUB16i16: return convert16i16to16ri8(Inst, X86::SUB16ri8); 1802 case X86::SUB32i32: return convert32i32to32ri8(Inst, X86::SUB32ri8); 1803 case X86::SUB64i32: return convert64i32to64ri8(Inst, X86::SUB64ri8); 1804 case X86::ADC16i16: return convert16i16to16ri8(Inst, X86::ADC16ri8); 1805 case X86::ADC32i32: return convert32i32to32ri8(Inst, X86::ADC32ri8); 1806 case X86::ADC64i32: return convert64i32to64ri8(Inst, X86::ADC64ri8); 1807 case X86::SBB16i16: return convert16i16to16ri8(Inst, X86::SBB16ri8); 1808 case X86::SBB32i32: return convert32i32to32ri8(Inst, X86::SBB32ri8); 1809 case X86::SBB64i32: return convert64i32to64ri8(Inst, X86::SBB64ri8); 1810 } 1811 } 1812 1813 static const char *getSubtargetFeatureName(unsigned Val); 1814 bool X86AsmParser:: 1815 MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 1816 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 1817 MCStreamer &Out, unsigned &ErrorInfo, 1818 bool MatchingInlineAsm) { 1819 assert(!Operands.empty() && "Unexpect empty operand list!"); 1820 X86Operand *Op = static_cast<X86Operand*>(Operands[0]); 1821 assert(Op->isToken() && "Leading operand should always be a mnemonic!"); 1822 ArrayRef<SMRange> EmptyRanges = ArrayRef<SMRange>(); 1823 1824 // First, handle aliases that expand to multiple instructions. 1825 // FIXME: This should be replaced with a real .td file alias mechanism. 1826 // Also, MatchInstructionImpl should actually *do* the EmitInstruction 1827 // call. 1828 if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" || 1829 Op->getToken() == "fstsww" || Op->getToken() == "fstcww" || 1830 Op->getToken() == "finit" || Op->getToken() == "fsave" || 1831 Op->getToken() == "fstenv" || Op->getToken() == "fclex") { 1832 MCInst Inst; 1833 Inst.setOpcode(X86::WAIT); 1834 Inst.setLoc(IDLoc); 1835 if (!MatchingInlineAsm) 1836 Out.EmitInstruction(Inst); 1837 1838 const char *Repl = 1839 StringSwitch<const char*>(Op->getToken()) 1840 .Case("finit", "fninit") 1841 .Case("fsave", "fnsave") 1842 .Case("fstcw", "fnstcw") 1843 .Case("fstcww", "fnstcw") 1844 .Case("fstenv", "fnstenv") 1845 .Case("fstsw", "fnstsw") 1846 .Case("fstsww", "fnstsw") 1847 .Case("fclex", "fnclex") 1848 .Default(0); 1849 assert(Repl && "Unknown wait-prefixed instruction"); 1850 delete Operands[0]; 1851 Operands[0] = X86Operand::CreateToken(Repl, IDLoc); 1852 } 1853 1854 bool WasOriginallyInvalidOperand = false; 1855 MCInst Inst; 1856 1857 // First, try a direct match. 1858 switch (MatchInstructionImpl(Operands, Inst, 1859 ErrorInfo, MatchingInlineAsm, 1860 isParsingIntelSyntax())) { 1861 default: break; 1862 case Match_Success: 1863 // Some instructions need post-processing to, for example, tweak which 1864 // encoding is selected. Loop on it while changes happen so the 1865 // individual transformations can chain off each other. 1866 if (!MatchingInlineAsm) 1867 while (processInstruction(Inst, Operands)) 1868 ; 1869 1870 Inst.setLoc(IDLoc); 1871 if (!MatchingInlineAsm) 1872 Out.EmitInstruction(Inst); 1873 Opcode = Inst.getOpcode(); 1874 return false; 1875 case Match_MissingFeature: { 1876 assert(ErrorInfo && "Unknown missing feature!"); 1877 // Special case the error message for the very common case where only 1878 // a single subtarget feature is missing. 1879 std::string Msg = "instruction requires:"; 1880 unsigned Mask = 1; 1881 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) { 1882 if (ErrorInfo & Mask) { 1883 Msg += " "; 1884 Msg += getSubtargetFeatureName(ErrorInfo & Mask); 1885 } 1886 Mask <<= 1; 1887 } 1888 return Error(IDLoc, Msg, EmptyRanges, MatchingInlineAsm); 1889 } 1890 case Match_InvalidOperand: 1891 WasOriginallyInvalidOperand = true; 1892 break; 1893 case Match_MnemonicFail: 1894 break; 1895 } 1896 1897 // FIXME: Ideally, we would only attempt suffix matches for things which are 1898 // valid prefixes, and we could just infer the right unambiguous 1899 // type. However, that requires substantially more matcher support than the 1900 // following hack. 1901 1902 // Change the operand to point to a temporary token. 1903 StringRef Base = Op->getToken(); 1904 SmallString<16> Tmp; 1905 Tmp += Base; 1906 Tmp += ' '; 1907 Op->setTokenValue(Tmp.str()); 1908 1909 // If this instruction starts with an 'f', then it is a floating point stack 1910 // instruction. These come in up to three forms for 32-bit, 64-bit, and 1911 // 80-bit floating point, which use the suffixes s,l,t respectively. 1912 // 1913 // Otherwise, we assume that this may be an integer instruction, which comes 1914 // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively. 1915 const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0"; 1916 1917 // Check for the various suffix matches. 1918 Tmp[Base.size()] = Suffixes[0]; 1919 unsigned ErrorInfoIgnore; 1920 unsigned ErrorInfoMissingFeature = 0; // Init suppresses compiler warnings. 1921 unsigned Match1, Match2, Match3, Match4; 1922 1923 Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, 1924 isParsingIntelSyntax()); 1925 // If this returned as a missing feature failure, remember that. 1926 if (Match1 == Match_MissingFeature) 1927 ErrorInfoMissingFeature = ErrorInfoIgnore; 1928 Tmp[Base.size()] = Suffixes[1]; 1929 Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, 1930 isParsingIntelSyntax()); 1931 // If this returned as a missing feature failure, remember that. 1932 if (Match2 == Match_MissingFeature) 1933 ErrorInfoMissingFeature = ErrorInfoIgnore; 1934 Tmp[Base.size()] = Suffixes[2]; 1935 Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, 1936 isParsingIntelSyntax()); 1937 // If this returned as a missing feature failure, remember that. 1938 if (Match3 == Match_MissingFeature) 1939 ErrorInfoMissingFeature = ErrorInfoIgnore; 1940 Tmp[Base.size()] = Suffixes[3]; 1941 Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, 1942 isParsingIntelSyntax()); 1943 // If this returned as a missing feature failure, remember that. 1944 if (Match4 == Match_MissingFeature) 1945 ErrorInfoMissingFeature = ErrorInfoIgnore; 1946 1947 // Restore the old token. 1948 Op->setTokenValue(Base); 1949 1950 // If exactly one matched, then we treat that as a successful match (and the 1951 // instruction will already have been filled in correctly, since the failing 1952 // matches won't have modified it). 1953 unsigned NumSuccessfulMatches = 1954 (Match1 == Match_Success) + (Match2 == Match_Success) + 1955 (Match3 == Match_Success) + (Match4 == Match_Success); 1956 if (NumSuccessfulMatches == 1) { 1957 Inst.setLoc(IDLoc); 1958 if (!MatchingInlineAsm) 1959 Out.EmitInstruction(Inst); 1960 Opcode = Inst.getOpcode(); 1961 return false; 1962 } 1963 1964 // Otherwise, the match failed, try to produce a decent error message. 1965 1966 // If we had multiple suffix matches, then identify this as an ambiguous 1967 // match. 1968 if (NumSuccessfulMatches > 1) { 1969 char MatchChars[4]; 1970 unsigned NumMatches = 0; 1971 if (Match1 == Match_Success) MatchChars[NumMatches++] = Suffixes[0]; 1972 if (Match2 == Match_Success) MatchChars[NumMatches++] = Suffixes[1]; 1973 if (Match3 == Match_Success) MatchChars[NumMatches++] = Suffixes[2]; 1974 if (Match4 == Match_Success) MatchChars[NumMatches++] = Suffixes[3]; 1975 1976 SmallString<126> Msg; 1977 raw_svector_ostream OS(Msg); 1978 OS << "ambiguous instructions require an explicit suffix (could be "; 1979 for (unsigned i = 0; i != NumMatches; ++i) { 1980 if (i != 0) 1981 OS << ", "; 1982 if (i + 1 == NumMatches) 1983 OS << "or "; 1984 OS << "'" << Base << MatchChars[i] << "'"; 1985 } 1986 OS << ")"; 1987 Error(IDLoc, OS.str(), EmptyRanges, MatchingInlineAsm); 1988 return true; 1989 } 1990 1991 // Okay, we know that none of the variants matched successfully. 1992 1993 // If all of the instructions reported an invalid mnemonic, then the original 1994 // mnemonic was invalid. 1995 if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) && 1996 (Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) { 1997 if (!WasOriginallyInvalidOperand) { 1998 ArrayRef<SMRange> Ranges = MatchingInlineAsm ? EmptyRanges : 1999 Op->getLocRange(); 2000 return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'", 2001 Ranges, MatchingInlineAsm); 2002 } 2003 2004 // Recover location info for the operand if we know which was the problem. 2005 if (ErrorInfo != ~0U) { 2006 if (ErrorInfo >= Operands.size()) 2007 return Error(IDLoc, "too few operands for instruction", 2008 EmptyRanges, MatchingInlineAsm); 2009 2010 X86Operand *Operand = (X86Operand*)Operands[ErrorInfo]; 2011 if (Operand->getStartLoc().isValid()) { 2012 SMRange OperandRange = Operand->getLocRange(); 2013 return Error(Operand->getStartLoc(), "invalid operand for instruction", 2014 OperandRange, MatchingInlineAsm); 2015 } 2016 } 2017 2018 return Error(IDLoc, "invalid operand for instruction", EmptyRanges, 2019 MatchingInlineAsm); 2020 } 2021 2022 // If one instruction matched with a missing feature, report this as a 2023 // missing feature. 2024 if ((Match1 == Match_MissingFeature) + (Match2 == Match_MissingFeature) + 2025 (Match3 == Match_MissingFeature) + (Match4 == Match_MissingFeature) == 1){ 2026 std::string Msg = "instruction requires:"; 2027 unsigned Mask = 1; 2028 for (unsigned i = 0; i < (sizeof(ErrorInfoMissingFeature)*8-1); ++i) { 2029 if (ErrorInfoMissingFeature & Mask) { 2030 Msg += " "; 2031 Msg += getSubtargetFeatureName(ErrorInfoMissingFeature & Mask); 2032 } 2033 Mask <<= 1; 2034 } 2035 return Error(IDLoc, Msg, EmptyRanges, MatchingInlineAsm); 2036 } 2037 2038 // If one instruction matched with an invalid operand, report this as an 2039 // operand failure. 2040 if ((Match1 == Match_InvalidOperand) + (Match2 == Match_InvalidOperand) + 2041 (Match3 == Match_InvalidOperand) + (Match4 == Match_InvalidOperand) == 1){ 2042 Error(IDLoc, "invalid operand for instruction", EmptyRanges, 2043 MatchingInlineAsm); 2044 return true; 2045 } 2046 2047 // If all of these were an outright failure, report it in a useless way. 2048 Error(IDLoc, "unknown use of instruction mnemonic without a size suffix", 2049 EmptyRanges, MatchingInlineAsm); 2050 return true; 2051 } 2052 2053 2054 bool X86AsmParser::ParseDirective(AsmToken DirectiveID) { 2055 StringRef IDVal = DirectiveID.getIdentifier(); 2056 if (IDVal == ".word") 2057 return ParseDirectiveWord(2, DirectiveID.getLoc()); 2058 else if (IDVal.startswith(".code")) 2059 return ParseDirectiveCode(IDVal, DirectiveID.getLoc()); 2060 else if (IDVal.startswith(".att_syntax")) { 2061 getParser().setAssemblerDialect(0); 2062 return false; 2063 } else if (IDVal.startswith(".intel_syntax")) { 2064 getParser().setAssemblerDialect(1); 2065 if (getLexer().isNot(AsmToken::EndOfStatement)) { 2066 if(Parser.getTok().getString() == "noprefix") { 2067 // FIXME : Handle noprefix 2068 Parser.Lex(); 2069 } else 2070 return true; 2071 } 2072 return false; 2073 } 2074 return true; 2075 } 2076 2077 /// ParseDirectiveWord 2078 /// ::= .word [ expression (, expression)* ] 2079 bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { 2080 if (getLexer().isNot(AsmToken::EndOfStatement)) { 2081 for (;;) { 2082 const MCExpr *Value; 2083 if (getParser().parseExpression(Value)) 2084 return true; 2085 2086 getParser().getStreamer().EmitValue(Value, Size); 2087 2088 if (getLexer().is(AsmToken::EndOfStatement)) 2089 break; 2090 2091 // FIXME: Improve diagnostic. 2092 if (getLexer().isNot(AsmToken::Comma)) 2093 return Error(L, "unexpected token in directive"); 2094 Parser.Lex(); 2095 } 2096 } 2097 2098 Parser.Lex(); 2099 return false; 2100 } 2101 2102 /// ParseDirectiveCode 2103 /// ::= .code32 | .code64 2104 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) { 2105 if (IDVal == ".code32") { 2106 Parser.Lex(); 2107 if (is64BitMode()) { 2108 SwitchMode(); 2109 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 2110 } 2111 } else if (IDVal == ".code64") { 2112 Parser.Lex(); 2113 if (!is64BitMode()) { 2114 SwitchMode(); 2115 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64); 2116 } 2117 } else { 2118 return Error(L, "unexpected directive " + IDVal); 2119 } 2120 2121 return false; 2122 } 2123 2124 // Force static initialization. 2125 extern "C" void LLVMInitializeX86AsmParser() { 2126 RegisterMCAsmParser<X86AsmParser> X(TheX86_32Target); 2127 RegisterMCAsmParser<X86AsmParser> Y(TheX86_64Target); 2128 } 2129 2130 #define GET_REGISTER_MATCHER 2131 #define GET_MATCHER_IMPLEMENTATION 2132 #define GET_SUBTARGET_FEATURE_NAME 2133 #include "X86GenAsmMatcher.inc" 2134