1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "MCTargetDesc/ARMBaseInfo.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "MCTargetDesc/ARMMCExpr.h" 13 #include "llvm/MC/MCParser/MCAsmLexer.h" 14 #include "llvm/MC/MCParser/MCAsmParser.h" 15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16 #include "llvm/MC/MCAsmInfo.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCStreamer.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCInst.h" 21 #include "llvm/MC/MCInstrDesc.h" 22 #include "llvm/MC/MCRegisterInfo.h" 23 #include "llvm/MC/MCSubtargetInfo.h" 24 #include "llvm/MC/MCTargetAsmParser.h" 25 #include "llvm/Support/MathExtras.h" 26 #include "llvm/Support/SourceMgr.h" 27 #include "llvm/Support/TargetRegistry.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include "llvm/ADT/BitVector.h" 30 #include "llvm/ADT/OwningPtr.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/StringExtras.h" 34 #include "llvm/ADT/StringSwitch.h" 35 #include "llvm/ADT/Twine.h" 36 37 using namespace llvm; 38 39 namespace { 40 41 class ARMOperand; 42 43 class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131 #define GET_ASSEMBLER_HEADER 132 #include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 165 166 // Asm Match Converter Methods 167 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 168 const SmallVectorImpl<MCParsedAsmOperand*> &); 169 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 170 const SmallVectorImpl<MCParsedAsmOperand*> &); 171 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 172 const SmallVectorImpl<MCParsedAsmOperand*> &); 173 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 174 const SmallVectorImpl<MCParsedAsmOperand*> &); 175 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 202 bool validateInstruction(MCInst &Inst, 203 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 204 void processInstruction(MCInst &Inst, 205 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 206 bool shouldOmitCCOutOperand(StringRef Mnemonic, 207 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 208 209 public: 210 enum ARMMatchResultTy { 211 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 212 Match_RequiresNotITBlock, 213 Match_RequiresV6, 214 Match_RequiresThumb2 215 }; 216 217 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 218 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 219 MCAsmParserExtension::Initialize(_Parser); 220 221 // Initialize the set of available features. 222 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 223 224 // Not in an ITBlock to start with. 225 ITState.CurPosition = ~0U; 226 } 227 228 // Implementation of the MCTargetAsmParser interface: 229 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 230 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 231 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 232 bool ParseDirective(AsmToken DirectiveID); 233 234 unsigned checkTargetMatchPredicate(MCInst &Inst); 235 236 bool MatchAndEmitInstruction(SMLoc IDLoc, 237 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 238 MCStreamer &Out); 239 }; 240 } // end anonymous namespace 241 242 namespace { 243 244 /// ARMOperand - Instances of this class represent a parsed ARM machine 245 /// instruction. 246 class ARMOperand : public MCParsedAsmOperand { 247 enum KindTy { 248 k_CondCode, 249 k_CCOut, 250 k_ITCondMask, 251 k_CoprocNum, 252 k_CoprocReg, 253 k_CoprocOption, 254 k_Immediate, 255 k_FPImmediate, 256 k_MemBarrierOpt, 257 k_Memory, 258 k_PostIndexRegister, 259 k_MSRMask, 260 k_ProcIFlags, 261 k_VectorIndex, 262 k_Register, 263 k_RegisterList, 264 k_DPRRegisterList, 265 k_SPRRegisterList, 266 k_VectorList, 267 k_ShiftedRegister, 268 k_ShiftedImmediate, 269 k_ShifterImmediate, 270 k_RotateImmediate, 271 k_BitfieldDescriptor, 272 k_Token 273 } Kind; 274 275 SMLoc StartLoc, EndLoc; 276 SmallVector<unsigned, 8> Registers; 277 278 union { 279 struct { 280 ARMCC::CondCodes Val; 281 } CC; 282 283 struct { 284 unsigned Val; 285 } Cop; 286 287 struct { 288 unsigned Val; 289 } CoprocOption; 290 291 struct { 292 unsigned Mask:4; 293 } ITMask; 294 295 struct { 296 ARM_MB::MemBOpt Val; 297 } MBOpt; 298 299 struct { 300 ARM_PROC::IFlags Val; 301 } IFlags; 302 303 struct { 304 unsigned Val; 305 } MMask; 306 307 struct { 308 const char *Data; 309 unsigned Length; 310 } Tok; 311 312 struct { 313 unsigned RegNum; 314 } Reg; 315 316 // A vector register list is a sequential list of 1 to 4 registers. 317 struct { 318 unsigned RegNum; 319 unsigned Count; 320 } VectorList; 321 322 struct { 323 unsigned Val; 324 } VectorIndex; 325 326 struct { 327 const MCExpr *Val; 328 } Imm; 329 330 struct { 331 unsigned Val; // encoded 8-bit representation 332 } FPImm; 333 334 /// Combined record for all forms of ARM address expressions. 335 struct { 336 unsigned BaseRegNum; 337 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 338 // was specified. 339 const MCConstantExpr *OffsetImm; // Offset immediate value 340 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 341 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 342 unsigned ShiftImm; // shift for OffsetReg. 343 unsigned Alignment; // 0 = no alignment specified 344 // n = alignment in bytes (8, 16, or 32) 345 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 346 } Memory; 347 348 struct { 349 unsigned RegNum; 350 bool isAdd; 351 ARM_AM::ShiftOpc ShiftTy; 352 unsigned ShiftImm; 353 } PostIdxReg; 354 355 struct { 356 bool isASR; 357 unsigned Imm; 358 } ShifterImm; 359 struct { 360 ARM_AM::ShiftOpc ShiftTy; 361 unsigned SrcReg; 362 unsigned ShiftReg; 363 unsigned ShiftImm; 364 } RegShiftedReg; 365 struct { 366 ARM_AM::ShiftOpc ShiftTy; 367 unsigned SrcReg; 368 unsigned ShiftImm; 369 } RegShiftedImm; 370 struct { 371 unsigned Imm; 372 } RotImm; 373 struct { 374 unsigned LSB; 375 unsigned Width; 376 } Bitfield; 377 }; 378 379 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 380 public: 381 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 382 Kind = o.Kind; 383 StartLoc = o.StartLoc; 384 EndLoc = o.EndLoc; 385 switch (Kind) { 386 case k_CondCode: 387 CC = o.CC; 388 break; 389 case k_ITCondMask: 390 ITMask = o.ITMask; 391 break; 392 case k_Token: 393 Tok = o.Tok; 394 break; 395 case k_CCOut: 396 case k_Register: 397 Reg = o.Reg; 398 break; 399 case k_RegisterList: 400 case k_DPRRegisterList: 401 case k_SPRRegisterList: 402 Registers = o.Registers; 403 break; 404 case k_VectorList: 405 VectorList = o.VectorList; 406 break; 407 case k_CoprocNum: 408 case k_CoprocReg: 409 Cop = o.Cop; 410 break; 411 case k_CoprocOption: 412 CoprocOption = o.CoprocOption; 413 break; 414 case k_Immediate: 415 Imm = o.Imm; 416 break; 417 case k_FPImmediate: 418 FPImm = o.FPImm; 419 break; 420 case k_MemBarrierOpt: 421 MBOpt = o.MBOpt; 422 break; 423 case k_Memory: 424 Memory = o.Memory; 425 break; 426 case k_PostIndexRegister: 427 PostIdxReg = o.PostIdxReg; 428 break; 429 case k_MSRMask: 430 MMask = o.MMask; 431 break; 432 case k_ProcIFlags: 433 IFlags = o.IFlags; 434 break; 435 case k_ShifterImmediate: 436 ShifterImm = o.ShifterImm; 437 break; 438 case k_ShiftedRegister: 439 RegShiftedReg = o.RegShiftedReg; 440 break; 441 case k_ShiftedImmediate: 442 RegShiftedImm = o.RegShiftedImm; 443 break; 444 case k_RotateImmediate: 445 RotImm = o.RotImm; 446 break; 447 case k_BitfieldDescriptor: 448 Bitfield = o.Bitfield; 449 break; 450 case k_VectorIndex: 451 VectorIndex = o.VectorIndex; 452 break; 453 } 454 } 455 456 /// getStartLoc - Get the location of the first token of this operand. 457 SMLoc getStartLoc() const { return StartLoc; } 458 /// getEndLoc - Get the location of the last token of this operand. 459 SMLoc getEndLoc() const { return EndLoc; } 460 461 ARMCC::CondCodes getCondCode() const { 462 assert(Kind == k_CondCode && "Invalid access!"); 463 return CC.Val; 464 } 465 466 unsigned getCoproc() const { 467 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 468 return Cop.Val; 469 } 470 471 StringRef getToken() const { 472 assert(Kind == k_Token && "Invalid access!"); 473 return StringRef(Tok.Data, Tok.Length); 474 } 475 476 unsigned getReg() const { 477 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 478 return Reg.RegNum; 479 } 480 481 const SmallVectorImpl<unsigned> &getRegList() const { 482 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 483 Kind == k_SPRRegisterList) && "Invalid access!"); 484 return Registers; 485 } 486 487 const MCExpr *getImm() const { 488 assert(Kind == k_Immediate && "Invalid access!"); 489 return Imm.Val; 490 } 491 492 unsigned getFPImm() const { 493 assert(Kind == k_FPImmediate && "Invalid access!"); 494 return FPImm.Val; 495 } 496 497 unsigned getVectorIndex() const { 498 assert(Kind == k_VectorIndex && "Invalid access!"); 499 return VectorIndex.Val; 500 } 501 502 ARM_MB::MemBOpt getMemBarrierOpt() const { 503 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 504 return MBOpt.Val; 505 } 506 507 ARM_PROC::IFlags getProcIFlags() const { 508 assert(Kind == k_ProcIFlags && "Invalid access!"); 509 return IFlags.Val; 510 } 511 512 unsigned getMSRMask() const { 513 assert(Kind == k_MSRMask && "Invalid access!"); 514 return MMask.Val; 515 } 516 517 bool isCoprocNum() const { return Kind == k_CoprocNum; } 518 bool isCoprocReg() const { return Kind == k_CoprocReg; } 519 bool isCoprocOption() const { return Kind == k_CoprocOption; } 520 bool isCondCode() const { return Kind == k_CondCode; } 521 bool isCCOut() const { return Kind == k_CCOut; } 522 bool isITMask() const { return Kind == k_ITCondMask; } 523 bool isITCondCode() const { return Kind == k_CondCode; } 524 bool isImm() const { return Kind == k_Immediate; } 525 bool isFPImm() const { return Kind == k_FPImmediate; } 526 bool isImm8s4() const { 527 if (Kind != k_Immediate) 528 return false; 529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 530 if (!CE) return false; 531 int64_t Value = CE->getValue(); 532 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 533 } 534 bool isImm0_1020s4() const { 535 if (Kind != k_Immediate) 536 return false; 537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 538 if (!CE) return false; 539 int64_t Value = CE->getValue(); 540 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 541 } 542 bool isImm0_508s4() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 549 } 550 bool isImm0_255() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return Value >= 0 && Value < 256; 557 } 558 bool isImm0_7() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return Value >= 0 && Value < 8; 565 } 566 bool isImm0_15() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value >= 0 && Value < 16; 573 } 574 bool isImm0_31() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 32; 581 } 582 bool isImm1_16() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return Value > 0 && Value < 17; 589 } 590 bool isImm1_32() const { 591 if (Kind != k_Immediate) 592 return false; 593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 594 if (!CE) return false; 595 int64_t Value = CE->getValue(); 596 return Value > 0 && Value < 33; 597 } 598 bool isImm0_65535() const { 599 if (Kind != k_Immediate) 600 return false; 601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 602 if (!CE) return false; 603 int64_t Value = CE->getValue(); 604 return Value >= 0 && Value < 65536; 605 } 606 bool isImm0_65535Expr() const { 607 if (Kind != k_Immediate) 608 return false; 609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 610 // If it's not a constant expression, it'll generate a fixup and be 611 // handled later. 612 if (!CE) return true; 613 int64_t Value = CE->getValue(); 614 return Value >= 0 && Value < 65536; 615 } 616 bool isImm24bit() const { 617 if (Kind != k_Immediate) 618 return false; 619 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 620 if (!CE) return false; 621 int64_t Value = CE->getValue(); 622 return Value >= 0 && Value <= 0xffffff; 623 } 624 bool isImmThumbSR() const { 625 if (Kind != k_Immediate) 626 return false; 627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 628 if (!CE) return false; 629 int64_t Value = CE->getValue(); 630 return Value > 0 && Value < 33; 631 } 632 bool isPKHLSLImm() const { 633 if (Kind != k_Immediate) 634 return false; 635 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 636 if (!CE) return false; 637 int64_t Value = CE->getValue(); 638 return Value >= 0 && Value < 32; 639 } 640 bool isPKHASRImm() const { 641 if (Kind != k_Immediate) 642 return false; 643 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 644 if (!CE) return false; 645 int64_t Value = CE->getValue(); 646 return Value > 0 && Value <= 32; 647 } 648 bool isARMSOImm() const { 649 if (Kind != k_Immediate) 650 return false; 651 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 652 if (!CE) return false; 653 int64_t Value = CE->getValue(); 654 return ARM_AM::getSOImmVal(Value) != -1; 655 } 656 bool isT2SOImm() const { 657 if (Kind != k_Immediate) 658 return false; 659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 660 if (!CE) return false; 661 int64_t Value = CE->getValue(); 662 return ARM_AM::getT2SOImmVal(Value) != -1; 663 } 664 bool isSetEndImm() const { 665 if (Kind != k_Immediate) 666 return false; 667 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 668 if (!CE) return false; 669 int64_t Value = CE->getValue(); 670 return Value == 1 || Value == 0; 671 } 672 bool isReg() const { return Kind == k_Register; } 673 bool isRegList() const { return Kind == k_RegisterList; } 674 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 675 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 676 bool isToken() const { return Kind == k_Token; } 677 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 678 bool isMemory() const { return Kind == k_Memory; } 679 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 680 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 681 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 682 bool isRotImm() const { return Kind == k_RotateImmediate; } 683 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 684 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 685 bool isPostIdxReg() const { 686 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 687 } 688 bool isMemNoOffset(bool alignOK = false) const { 689 if (!isMemory()) 690 return false; 691 // No offset of any kind. 692 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 693 (alignOK || Memory.Alignment == 0); 694 } 695 bool isAlignedMemory() const { 696 return isMemNoOffset(true); 697 } 698 bool isAddrMode2() const { 699 if (!isMemory() || Memory.Alignment != 0) return false; 700 // Check for register offset. 701 if (Memory.OffsetRegNum) return true; 702 // Immediate offset in range [-4095, 4095]. 703 if (!Memory.OffsetImm) return true; 704 int64_t Val = Memory.OffsetImm->getValue(); 705 return Val > -4096 && Val < 4096; 706 } 707 bool isAM2OffsetImm() const { 708 if (Kind != k_Immediate) 709 return false; 710 // Immediate offset in range [-4095, 4095]. 711 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 712 if (!CE) return false; 713 int64_t Val = CE->getValue(); 714 return Val > -4096 && Val < 4096; 715 } 716 bool isAddrMode3() const { 717 if (!isMemory() || Memory.Alignment != 0) return false; 718 // No shifts are legal for AM3. 719 if (Memory.ShiftType != ARM_AM::no_shift) return false; 720 // Check for register offset. 721 if (Memory.OffsetRegNum) return true; 722 // Immediate offset in range [-255, 255]. 723 if (!Memory.OffsetImm) return true; 724 int64_t Val = Memory.OffsetImm->getValue(); 725 return Val > -256 && Val < 256; 726 } 727 bool isAM3Offset() const { 728 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 729 return false; 730 if (Kind == k_PostIndexRegister) 731 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 732 // Immediate offset in range [-255, 255]. 733 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 734 if (!CE) return false; 735 int64_t Val = CE->getValue(); 736 // Special case, #-0 is INT32_MIN. 737 return (Val > -256 && Val < 256) || Val == INT32_MIN; 738 } 739 bool isAddrMode5() const { 740 if (!isMemory() || Memory.Alignment != 0) return false; 741 // Check for register offset. 742 if (Memory.OffsetRegNum) return false; 743 // Immediate offset in range [-1020, 1020] and a multiple of 4. 744 if (!Memory.OffsetImm) return true; 745 int64_t Val = Memory.OffsetImm->getValue(); 746 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 747 Val == INT32_MIN; 748 } 749 bool isMemTBB() const { 750 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 751 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 752 return false; 753 return true; 754 } 755 bool isMemTBH() const { 756 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 757 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 758 Memory.Alignment != 0 ) 759 return false; 760 return true; 761 } 762 bool isMemRegOffset() const { 763 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 764 return false; 765 return true; 766 } 767 bool isT2MemRegOffset() const { 768 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 769 Memory.Alignment != 0) 770 return false; 771 // Only lsl #{0, 1, 2, 3} allowed. 772 if (Memory.ShiftType == ARM_AM::no_shift) 773 return true; 774 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 775 return false; 776 return true; 777 } 778 bool isMemThumbRR() const { 779 // Thumb reg+reg addressing is simple. Just two registers, a base and 780 // an offset. No shifts, negations or any other complicating factors. 781 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 782 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 783 return false; 784 return isARMLowRegister(Memory.BaseRegNum) && 785 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 786 } 787 bool isMemThumbRIs4() const { 788 if (!isMemory() || Memory.OffsetRegNum != 0 || 789 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 790 return false; 791 // Immediate offset, multiple of 4 in range [0, 124]. 792 if (!Memory.OffsetImm) return true; 793 int64_t Val = Memory.OffsetImm->getValue(); 794 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 795 } 796 bool isMemThumbRIs2() const { 797 if (!isMemory() || Memory.OffsetRegNum != 0 || 798 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 799 return false; 800 // Immediate offset, multiple of 4 in range [0, 62]. 801 if (!Memory.OffsetImm) return true; 802 int64_t Val = Memory.OffsetImm->getValue(); 803 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 804 } 805 bool isMemThumbRIs1() const { 806 if (!isMemory() || Memory.OffsetRegNum != 0 || 807 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 808 return false; 809 // Immediate offset in range [0, 31]. 810 if (!Memory.OffsetImm) return true; 811 int64_t Val = Memory.OffsetImm->getValue(); 812 return Val >= 0 && Val <= 31; 813 } 814 bool isMemThumbSPI() const { 815 if (!isMemory() || Memory.OffsetRegNum != 0 || 816 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 817 return false; 818 // Immediate offset, multiple of 4 in range [0, 1020]. 819 if (!Memory.OffsetImm) return true; 820 int64_t Val = Memory.OffsetImm->getValue(); 821 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 822 } 823 bool isMemImm8s4Offset() const { 824 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 825 return false; 826 // Immediate offset a multiple of 4 in range [-1020, 1020]. 827 if (!Memory.OffsetImm) return true; 828 int64_t Val = Memory.OffsetImm->getValue(); 829 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 830 } 831 bool isMemImm0_1020s4Offset() const { 832 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 833 return false; 834 // Immediate offset a multiple of 4 in range [0, 1020]. 835 if (!Memory.OffsetImm) return true; 836 int64_t Val = Memory.OffsetImm->getValue(); 837 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 838 } 839 bool isMemImm8Offset() const { 840 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 841 return false; 842 // Immediate offset in range [-255, 255]. 843 if (!Memory.OffsetImm) return true; 844 int64_t Val = Memory.OffsetImm->getValue(); 845 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 846 } 847 bool isMemPosImm8Offset() const { 848 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 849 return false; 850 // Immediate offset in range [0, 255]. 851 if (!Memory.OffsetImm) return true; 852 int64_t Val = Memory.OffsetImm->getValue(); 853 return Val >= 0 && Val < 256; 854 } 855 bool isMemNegImm8Offset() const { 856 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 857 return false; 858 // Immediate offset in range [-255, -1]. 859 if (!Memory.OffsetImm) return true; 860 int64_t Val = Memory.OffsetImm->getValue(); 861 return Val > -256 && Val < 0; 862 } 863 bool isMemUImm12Offset() const { 864 // If we have an immediate that's not a constant, treat it as a label 865 // reference needing a fixup. If it is a constant, it's something else 866 // and we reject it. 867 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 868 return true; 869 870 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 871 return false; 872 // Immediate offset in range [0, 4095]. 873 if (!Memory.OffsetImm) return true; 874 int64_t Val = Memory.OffsetImm->getValue(); 875 return (Val >= 0 && Val < 4096); 876 } 877 bool isMemImm12Offset() const { 878 // If we have an immediate that's not a constant, treat it as a label 879 // reference needing a fixup. If it is a constant, it's something else 880 // and we reject it. 881 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 882 return true; 883 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [-4095, 4095]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 890 } 891 bool isPostIdxImm8() const { 892 if (Kind != k_Immediate) 893 return false; 894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 895 if (!CE) return false; 896 int64_t Val = CE->getValue(); 897 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 898 } 899 bool isPostIdxImm8s4() const { 900 if (Kind != k_Immediate) 901 return false; 902 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 903 if (!CE) return false; 904 int64_t Val = CE->getValue(); 905 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 906 (Val == INT32_MIN); 907 } 908 909 bool isMSRMask() const { return Kind == k_MSRMask; } 910 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 911 912 // NEON operands. 913 bool isVecListOneD() const { 914 if (Kind != k_VectorList) return false; 915 return VectorList.Count == 1; 916 } 917 918 bool isVectorIndex8() const { 919 if (Kind != k_VectorIndex) return false; 920 return VectorIndex.Val < 8; 921 } 922 bool isVectorIndex16() const { 923 if (Kind != k_VectorIndex) return false; 924 return VectorIndex.Val < 4; 925 } 926 bool isVectorIndex32() const { 927 if (Kind != k_VectorIndex) return false; 928 return VectorIndex.Val < 2; 929 } 930 931 bool isNEONi8splat() const { 932 if (Kind != k_Immediate) 933 return false; 934 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 935 // Must be a constant. 936 if (!CE) return false; 937 int64_t Value = CE->getValue(); 938 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 939 // value. 940 return Value >= 0 && Value < 256; 941 } 942 943 bool isNEONi16splat() const { 944 if (Kind != k_Immediate) 945 return false; 946 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 947 // Must be a constant. 948 if (!CE) return false; 949 int64_t Value = CE->getValue(); 950 // i16 value in the range [0,255] or [0x0100, 0xff00] 951 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 952 } 953 954 bool isNEONi32splat() const { 955 if (Kind != k_Immediate) 956 return false; 957 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 958 // Must be a constant. 959 if (!CE) return false; 960 int64_t Value = CE->getValue(); 961 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 962 return (Value >= 0 && Value < 256) || 963 (Value >= 0x0100 && Value <= 0xff00) || 964 (Value >= 0x010000 && Value <= 0xff0000) || 965 (Value >= 0x01000000 && Value <= 0xff000000); 966 } 967 968 bool isNEONi32vmov() const { 969 if (Kind != k_Immediate) 970 return false; 971 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 972 // Must be a constant. 973 if (!CE) return false; 974 int64_t Value = CE->getValue(); 975 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 976 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 977 return (Value >= 0 && Value < 256) || 978 (Value >= 0x0100 && Value <= 0xff00) || 979 (Value >= 0x010000 && Value <= 0xff0000) || 980 (Value >= 0x01000000 && Value <= 0xff000000) || 981 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 982 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 983 } 984 985 bool isNEONi64splat() const { 986 if (Kind != k_Immediate) 987 return false; 988 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 989 // Must be a constant. 990 if (!CE) return false; 991 uint64_t Value = CE->getValue(); 992 // i64 value with each byte being either 0 or 0xff. 993 for (unsigned i = 0; i < 8; ++i) 994 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 995 return true; 996 } 997 998 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 999 // Add as immediates when possible. Null MCExpr = 0. 1000 if (Expr == 0) 1001 Inst.addOperand(MCOperand::CreateImm(0)); 1002 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1003 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1004 else 1005 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1006 } 1007 1008 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1009 assert(N == 2 && "Invalid number of operands!"); 1010 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1011 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1012 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1013 } 1014 1015 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1016 assert(N == 1 && "Invalid number of operands!"); 1017 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1018 } 1019 1020 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1021 assert(N == 1 && "Invalid number of operands!"); 1022 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1023 } 1024 1025 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1026 assert(N == 1 && "Invalid number of operands!"); 1027 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1028 } 1029 1030 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1031 assert(N == 1 && "Invalid number of operands!"); 1032 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1033 } 1034 1035 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1036 assert(N == 1 && "Invalid number of operands!"); 1037 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1038 } 1039 1040 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1041 assert(N == 1 && "Invalid number of operands!"); 1042 Inst.addOperand(MCOperand::CreateReg(getReg())); 1043 } 1044 1045 void addRegOperands(MCInst &Inst, unsigned N) const { 1046 assert(N == 1 && "Invalid number of operands!"); 1047 Inst.addOperand(MCOperand::CreateReg(getReg())); 1048 } 1049 1050 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1051 assert(N == 3 && "Invalid number of operands!"); 1052 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1053 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1054 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1055 Inst.addOperand(MCOperand::CreateImm( 1056 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1057 } 1058 1059 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1060 assert(N == 2 && "Invalid number of operands!"); 1061 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1062 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1063 Inst.addOperand(MCOperand::CreateImm( 1064 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1065 } 1066 1067 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1068 assert(N == 1 && "Invalid number of operands!"); 1069 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1070 ShifterImm.Imm)); 1071 } 1072 1073 void addRegListOperands(MCInst &Inst, unsigned N) const { 1074 assert(N == 1 && "Invalid number of operands!"); 1075 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1076 for (SmallVectorImpl<unsigned>::const_iterator 1077 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1078 Inst.addOperand(MCOperand::CreateReg(*I)); 1079 } 1080 1081 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1082 addRegListOperands(Inst, N); 1083 } 1084 1085 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1086 addRegListOperands(Inst, N); 1087 } 1088 1089 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1090 assert(N == 1 && "Invalid number of operands!"); 1091 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1092 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1093 } 1094 1095 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1096 assert(N == 1 && "Invalid number of operands!"); 1097 // Munge the lsb/width into a bitfield mask. 1098 unsigned lsb = Bitfield.LSB; 1099 unsigned width = Bitfield.Width; 1100 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1101 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1102 (32 - (lsb + width))); 1103 Inst.addOperand(MCOperand::CreateImm(Mask)); 1104 } 1105 1106 void addImmOperands(MCInst &Inst, unsigned N) const { 1107 assert(N == 1 && "Invalid number of operands!"); 1108 addExpr(Inst, getImm()); 1109 } 1110 1111 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1112 assert(N == 1 && "Invalid number of operands!"); 1113 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1114 } 1115 1116 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1117 assert(N == 1 && "Invalid number of operands!"); 1118 // FIXME: We really want to scale the value here, but the LDRD/STRD 1119 // instruction don't encode operands that way yet. 1120 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1121 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1122 } 1123 1124 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1125 assert(N == 1 && "Invalid number of operands!"); 1126 // The immediate is scaled by four in the encoding and is stored 1127 // in the MCInst as such. Lop off the low two bits here. 1128 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1129 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1130 } 1131 1132 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1133 assert(N == 1 && "Invalid number of operands!"); 1134 // The immediate is scaled by four in the encoding and is stored 1135 // in the MCInst as such. Lop off the low two bits here. 1136 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1137 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1138 } 1139 1140 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1141 assert(N == 1 && "Invalid number of operands!"); 1142 addExpr(Inst, getImm()); 1143 } 1144 1145 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1146 assert(N == 1 && "Invalid number of operands!"); 1147 addExpr(Inst, getImm()); 1148 } 1149 1150 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1151 assert(N == 1 && "Invalid number of operands!"); 1152 addExpr(Inst, getImm()); 1153 } 1154 1155 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1156 assert(N == 1 && "Invalid number of operands!"); 1157 addExpr(Inst, getImm()); 1158 } 1159 1160 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 // The constant encodes as the immediate-1, and we store in the instruction 1163 // the bits as encoded, so subtract off one here. 1164 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1165 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1166 } 1167 1168 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1169 assert(N == 1 && "Invalid number of operands!"); 1170 // The constant encodes as the immediate-1, and we store in the instruction 1171 // the bits as encoded, so subtract off one here. 1172 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1173 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1174 } 1175 1176 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1177 assert(N == 1 && "Invalid number of operands!"); 1178 addExpr(Inst, getImm()); 1179 } 1180 1181 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1182 assert(N == 1 && "Invalid number of operands!"); 1183 addExpr(Inst, getImm()); 1184 } 1185 1186 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1187 assert(N == 1 && "Invalid number of operands!"); 1188 addExpr(Inst, getImm()); 1189 } 1190 1191 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1192 assert(N == 1 && "Invalid number of operands!"); 1193 // The constant encodes as the immediate, except for 32, which encodes as 1194 // zero. 1195 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1196 unsigned Imm = CE->getValue(); 1197 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1198 } 1199 1200 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1201 assert(N == 1 && "Invalid number of operands!"); 1202 addExpr(Inst, getImm()); 1203 } 1204 1205 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1206 assert(N == 1 && "Invalid number of operands!"); 1207 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1208 // the instruction as well. 1209 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1210 int Val = CE->getValue(); 1211 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1212 } 1213 1214 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1215 assert(N == 1 && "Invalid number of operands!"); 1216 addExpr(Inst, getImm()); 1217 } 1218 1219 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1220 assert(N == 1 && "Invalid number of operands!"); 1221 addExpr(Inst, getImm()); 1222 } 1223 1224 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1225 assert(N == 1 && "Invalid number of operands!"); 1226 addExpr(Inst, getImm()); 1227 } 1228 1229 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1230 assert(N == 1 && "Invalid number of operands!"); 1231 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1232 } 1233 1234 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1235 assert(N == 1 && "Invalid number of operands!"); 1236 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1237 } 1238 1239 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1240 assert(N == 2 && "Invalid number of operands!"); 1241 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1242 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1243 } 1244 1245 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1246 assert(N == 3 && "Invalid number of operands!"); 1247 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1248 if (!Memory.OffsetRegNum) { 1249 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1250 // Special case for #-0 1251 if (Val == INT32_MIN) Val = 0; 1252 if (Val < 0) Val = -Val; 1253 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1254 } else { 1255 // For register offset, we encode the shift type and negation flag 1256 // here. 1257 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1258 Memory.ShiftImm, Memory.ShiftType); 1259 } 1260 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1261 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1262 Inst.addOperand(MCOperand::CreateImm(Val)); 1263 } 1264 1265 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1266 assert(N == 2 && "Invalid number of operands!"); 1267 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1268 assert(CE && "non-constant AM2OffsetImm operand!"); 1269 int32_t Val = CE->getValue(); 1270 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1271 // Special case for #-0 1272 if (Val == INT32_MIN) Val = 0; 1273 if (Val < 0) Val = -Val; 1274 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1275 Inst.addOperand(MCOperand::CreateReg(0)); 1276 Inst.addOperand(MCOperand::CreateImm(Val)); 1277 } 1278 1279 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1280 assert(N == 3 && "Invalid number of operands!"); 1281 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1282 if (!Memory.OffsetRegNum) { 1283 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1284 // Special case for #-0 1285 if (Val == INT32_MIN) Val = 0; 1286 if (Val < 0) Val = -Val; 1287 Val = ARM_AM::getAM3Opc(AddSub, Val); 1288 } else { 1289 // For register offset, we encode the shift type and negation flag 1290 // here. 1291 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1292 } 1293 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1294 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1295 Inst.addOperand(MCOperand::CreateImm(Val)); 1296 } 1297 1298 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1299 assert(N == 2 && "Invalid number of operands!"); 1300 if (Kind == k_PostIndexRegister) { 1301 int32_t Val = 1302 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1303 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1304 Inst.addOperand(MCOperand::CreateImm(Val)); 1305 return; 1306 } 1307 1308 // Constant offset. 1309 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1310 int32_t Val = CE->getValue(); 1311 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1312 // Special case for #-0 1313 if (Val == INT32_MIN) Val = 0; 1314 if (Val < 0) Val = -Val; 1315 Val = ARM_AM::getAM3Opc(AddSub, Val); 1316 Inst.addOperand(MCOperand::CreateReg(0)); 1317 Inst.addOperand(MCOperand::CreateImm(Val)); 1318 } 1319 1320 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1321 assert(N == 2 && "Invalid number of operands!"); 1322 // The lower two bits are always zero and as such are not encoded. 1323 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1324 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1325 // Special case for #-0 1326 if (Val == INT32_MIN) Val = 0; 1327 if (Val < 0) Val = -Val; 1328 Val = ARM_AM::getAM5Opc(AddSub, Val); 1329 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1330 Inst.addOperand(MCOperand::CreateImm(Val)); 1331 } 1332 1333 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1334 assert(N == 2 && "Invalid number of operands!"); 1335 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1336 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1337 Inst.addOperand(MCOperand::CreateImm(Val)); 1338 } 1339 1340 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1341 assert(N == 2 && "Invalid number of operands!"); 1342 // The lower two bits are always zero and as such are not encoded. 1343 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1344 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1345 Inst.addOperand(MCOperand::CreateImm(Val)); 1346 } 1347 1348 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1349 assert(N == 2 && "Invalid number of operands!"); 1350 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1351 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1352 Inst.addOperand(MCOperand::CreateImm(Val)); 1353 } 1354 1355 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1356 addMemImm8OffsetOperands(Inst, N); 1357 } 1358 1359 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1360 addMemImm8OffsetOperands(Inst, N); 1361 } 1362 1363 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1364 assert(N == 2 && "Invalid number of operands!"); 1365 // If this is an immediate, it's a label reference. 1366 if (Kind == k_Immediate) { 1367 addExpr(Inst, getImm()); 1368 Inst.addOperand(MCOperand::CreateImm(0)); 1369 return; 1370 } 1371 1372 // Otherwise, it's a normal memory reg+offset. 1373 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1374 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1375 Inst.addOperand(MCOperand::CreateImm(Val)); 1376 } 1377 1378 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1379 assert(N == 2 && "Invalid number of operands!"); 1380 // If this is an immediate, it's a label reference. 1381 if (Kind == k_Immediate) { 1382 addExpr(Inst, getImm()); 1383 Inst.addOperand(MCOperand::CreateImm(0)); 1384 return; 1385 } 1386 1387 // Otherwise, it's a normal memory reg+offset. 1388 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1389 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1390 Inst.addOperand(MCOperand::CreateImm(Val)); 1391 } 1392 1393 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1394 assert(N == 2 && "Invalid number of operands!"); 1395 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1396 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1397 } 1398 1399 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1400 assert(N == 2 && "Invalid number of operands!"); 1401 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1402 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1403 } 1404 1405 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1406 assert(N == 3 && "Invalid number of operands!"); 1407 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1408 Memory.ShiftImm, Memory.ShiftType); 1409 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1410 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1411 Inst.addOperand(MCOperand::CreateImm(Val)); 1412 } 1413 1414 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1415 assert(N == 3 && "Invalid number of operands!"); 1416 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1417 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1418 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1419 } 1420 1421 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1422 assert(N == 2 && "Invalid number of operands!"); 1423 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1424 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1425 } 1426 1427 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1428 assert(N == 2 && "Invalid number of operands!"); 1429 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1430 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1431 Inst.addOperand(MCOperand::CreateImm(Val)); 1432 } 1433 1434 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1435 assert(N == 2 && "Invalid number of operands!"); 1436 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1437 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1438 Inst.addOperand(MCOperand::CreateImm(Val)); 1439 } 1440 1441 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1442 assert(N == 2 && "Invalid number of operands!"); 1443 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1444 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1445 Inst.addOperand(MCOperand::CreateImm(Val)); 1446 } 1447 1448 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1449 assert(N == 2 && "Invalid number of operands!"); 1450 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1451 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1452 Inst.addOperand(MCOperand::CreateImm(Val)); 1453 } 1454 1455 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1456 assert(N == 1 && "Invalid number of operands!"); 1457 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1458 assert(CE && "non-constant post-idx-imm8 operand!"); 1459 int Imm = CE->getValue(); 1460 bool isAdd = Imm >= 0; 1461 if (Imm == INT32_MIN) Imm = 0; 1462 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1463 Inst.addOperand(MCOperand::CreateImm(Imm)); 1464 } 1465 1466 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1467 assert(N == 1 && "Invalid number of operands!"); 1468 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1469 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1470 int Imm = CE->getValue(); 1471 bool isAdd = Imm >= 0; 1472 if (Imm == INT32_MIN) Imm = 0; 1473 // Immediate is scaled by 4. 1474 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1475 Inst.addOperand(MCOperand::CreateImm(Imm)); 1476 } 1477 1478 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1479 assert(N == 2 && "Invalid number of operands!"); 1480 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1481 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1482 } 1483 1484 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1485 assert(N == 2 && "Invalid number of operands!"); 1486 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1487 // The sign, shift type, and shift amount are encoded in a single operand 1488 // using the AM2 encoding helpers. 1489 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1490 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1491 PostIdxReg.ShiftTy); 1492 Inst.addOperand(MCOperand::CreateImm(Imm)); 1493 } 1494 1495 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1496 assert(N == 1 && "Invalid number of operands!"); 1497 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1498 } 1499 1500 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1501 assert(N == 1 && "Invalid number of operands!"); 1502 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1503 } 1504 1505 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1506 assert(N == 1 && "Invalid number of operands!"); 1507 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1508 } 1509 1510 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1511 assert(N == 1 && "Invalid number of operands!"); 1512 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1513 } 1514 1515 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1516 assert(N == 1 && "Invalid number of operands!"); 1517 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1518 } 1519 1520 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1521 assert(N == 1 && "Invalid number of operands!"); 1522 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1523 } 1524 1525 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1526 assert(N == 1 && "Invalid number of operands!"); 1527 // The immediate encodes the type of constant as well as the value. 1528 // Mask in that this is an i8 splat. 1529 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1530 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1531 } 1532 1533 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1534 assert(N == 1 && "Invalid number of operands!"); 1535 // The immediate encodes the type of constant as well as the value. 1536 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1537 unsigned Value = CE->getValue(); 1538 if (Value >= 256) 1539 Value = (Value >> 8) | 0xa00; 1540 else 1541 Value |= 0x800; 1542 Inst.addOperand(MCOperand::CreateImm(Value)); 1543 } 1544 1545 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1546 assert(N == 1 && "Invalid number of operands!"); 1547 // The immediate encodes the type of constant as well as the value. 1548 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1549 unsigned Value = CE->getValue(); 1550 if (Value >= 256 && Value <= 0xff00) 1551 Value = (Value >> 8) | 0x200; 1552 else if (Value > 0xffff && Value <= 0xff0000) 1553 Value = (Value >> 16) | 0x400; 1554 else if (Value > 0xffffff) 1555 Value = (Value >> 24) | 0x600; 1556 Inst.addOperand(MCOperand::CreateImm(Value)); 1557 } 1558 1559 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1560 assert(N == 1 && "Invalid number of operands!"); 1561 // The immediate encodes the type of constant as well as the value. 1562 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1563 unsigned Value = CE->getValue(); 1564 if (Value >= 256 && Value <= 0xffff) 1565 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1566 else if (Value > 0xffff && Value <= 0xffffff) 1567 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1568 else if (Value > 0xffffff) 1569 Value = (Value >> 24) | 0x600; 1570 Inst.addOperand(MCOperand::CreateImm(Value)); 1571 } 1572 1573 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1574 assert(N == 1 && "Invalid number of operands!"); 1575 // The immediate encodes the type of constant as well as the value. 1576 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1577 uint64_t Value = CE->getValue(); 1578 unsigned Imm = 0; 1579 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1580 Imm |= (Value & 1) << i; 1581 } 1582 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1583 } 1584 1585 virtual void print(raw_ostream &OS) const; 1586 1587 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1588 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1589 Op->ITMask.Mask = Mask; 1590 Op->StartLoc = S; 1591 Op->EndLoc = S; 1592 return Op; 1593 } 1594 1595 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1596 ARMOperand *Op = new ARMOperand(k_CondCode); 1597 Op->CC.Val = CC; 1598 Op->StartLoc = S; 1599 Op->EndLoc = S; 1600 return Op; 1601 } 1602 1603 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1604 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1605 Op->Cop.Val = CopVal; 1606 Op->StartLoc = S; 1607 Op->EndLoc = S; 1608 return Op; 1609 } 1610 1611 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1612 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1613 Op->Cop.Val = CopVal; 1614 Op->StartLoc = S; 1615 Op->EndLoc = S; 1616 return Op; 1617 } 1618 1619 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1620 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1621 Op->Cop.Val = Val; 1622 Op->StartLoc = S; 1623 Op->EndLoc = E; 1624 return Op; 1625 } 1626 1627 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1628 ARMOperand *Op = new ARMOperand(k_CCOut); 1629 Op->Reg.RegNum = RegNum; 1630 Op->StartLoc = S; 1631 Op->EndLoc = S; 1632 return Op; 1633 } 1634 1635 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1636 ARMOperand *Op = new ARMOperand(k_Token); 1637 Op->Tok.Data = Str.data(); 1638 Op->Tok.Length = Str.size(); 1639 Op->StartLoc = S; 1640 Op->EndLoc = S; 1641 return Op; 1642 } 1643 1644 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1645 ARMOperand *Op = new ARMOperand(k_Register); 1646 Op->Reg.RegNum = RegNum; 1647 Op->StartLoc = S; 1648 Op->EndLoc = E; 1649 return Op; 1650 } 1651 1652 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1653 unsigned SrcReg, 1654 unsigned ShiftReg, 1655 unsigned ShiftImm, 1656 SMLoc S, SMLoc E) { 1657 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1658 Op->RegShiftedReg.ShiftTy = ShTy; 1659 Op->RegShiftedReg.SrcReg = SrcReg; 1660 Op->RegShiftedReg.ShiftReg = ShiftReg; 1661 Op->RegShiftedReg.ShiftImm = ShiftImm; 1662 Op->StartLoc = S; 1663 Op->EndLoc = E; 1664 return Op; 1665 } 1666 1667 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1668 unsigned SrcReg, 1669 unsigned ShiftImm, 1670 SMLoc S, SMLoc E) { 1671 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1672 Op->RegShiftedImm.ShiftTy = ShTy; 1673 Op->RegShiftedImm.SrcReg = SrcReg; 1674 Op->RegShiftedImm.ShiftImm = ShiftImm; 1675 Op->StartLoc = S; 1676 Op->EndLoc = E; 1677 return Op; 1678 } 1679 1680 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1681 SMLoc S, SMLoc E) { 1682 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1683 Op->ShifterImm.isASR = isASR; 1684 Op->ShifterImm.Imm = Imm; 1685 Op->StartLoc = S; 1686 Op->EndLoc = E; 1687 return Op; 1688 } 1689 1690 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1691 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1692 Op->RotImm.Imm = Imm; 1693 Op->StartLoc = S; 1694 Op->EndLoc = E; 1695 return Op; 1696 } 1697 1698 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1699 SMLoc S, SMLoc E) { 1700 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1701 Op->Bitfield.LSB = LSB; 1702 Op->Bitfield.Width = Width; 1703 Op->StartLoc = S; 1704 Op->EndLoc = E; 1705 return Op; 1706 } 1707 1708 static ARMOperand * 1709 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1710 SMLoc StartLoc, SMLoc EndLoc) { 1711 KindTy Kind = k_RegisterList; 1712 1713 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1714 Kind = k_DPRRegisterList; 1715 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1716 contains(Regs.front().first)) 1717 Kind = k_SPRRegisterList; 1718 1719 ARMOperand *Op = new ARMOperand(Kind); 1720 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1721 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1722 Op->Registers.push_back(I->first); 1723 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1724 Op->StartLoc = StartLoc; 1725 Op->EndLoc = EndLoc; 1726 return Op; 1727 } 1728 1729 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1730 SMLoc S, SMLoc E) { 1731 ARMOperand *Op = new ARMOperand(k_VectorList); 1732 Op->VectorList.RegNum = RegNum; 1733 Op->VectorList.Count = Count; 1734 Op->StartLoc = S; 1735 Op->EndLoc = E; 1736 return Op; 1737 } 1738 1739 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1740 MCContext &Ctx) { 1741 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1742 Op->VectorIndex.Val = Idx; 1743 Op->StartLoc = S; 1744 Op->EndLoc = E; 1745 return Op; 1746 } 1747 1748 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1749 ARMOperand *Op = new ARMOperand(k_Immediate); 1750 Op->Imm.Val = Val; 1751 Op->StartLoc = S; 1752 Op->EndLoc = E; 1753 return Op; 1754 } 1755 1756 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1757 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1758 Op->FPImm.Val = Val; 1759 Op->StartLoc = S; 1760 Op->EndLoc = S; 1761 return Op; 1762 } 1763 1764 static ARMOperand *CreateMem(unsigned BaseRegNum, 1765 const MCConstantExpr *OffsetImm, 1766 unsigned OffsetRegNum, 1767 ARM_AM::ShiftOpc ShiftType, 1768 unsigned ShiftImm, 1769 unsigned Alignment, 1770 bool isNegative, 1771 SMLoc S, SMLoc E) { 1772 ARMOperand *Op = new ARMOperand(k_Memory); 1773 Op->Memory.BaseRegNum = BaseRegNum; 1774 Op->Memory.OffsetImm = OffsetImm; 1775 Op->Memory.OffsetRegNum = OffsetRegNum; 1776 Op->Memory.ShiftType = ShiftType; 1777 Op->Memory.ShiftImm = ShiftImm; 1778 Op->Memory.Alignment = Alignment; 1779 Op->Memory.isNegative = isNegative; 1780 Op->StartLoc = S; 1781 Op->EndLoc = E; 1782 return Op; 1783 } 1784 1785 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1786 ARM_AM::ShiftOpc ShiftTy, 1787 unsigned ShiftImm, 1788 SMLoc S, SMLoc E) { 1789 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1790 Op->PostIdxReg.RegNum = RegNum; 1791 Op->PostIdxReg.isAdd = isAdd; 1792 Op->PostIdxReg.ShiftTy = ShiftTy; 1793 Op->PostIdxReg.ShiftImm = ShiftImm; 1794 Op->StartLoc = S; 1795 Op->EndLoc = E; 1796 return Op; 1797 } 1798 1799 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1800 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1801 Op->MBOpt.Val = Opt; 1802 Op->StartLoc = S; 1803 Op->EndLoc = S; 1804 return Op; 1805 } 1806 1807 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1808 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1809 Op->IFlags.Val = IFlags; 1810 Op->StartLoc = S; 1811 Op->EndLoc = S; 1812 return Op; 1813 } 1814 1815 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1816 ARMOperand *Op = new ARMOperand(k_MSRMask); 1817 Op->MMask.Val = MMask; 1818 Op->StartLoc = S; 1819 Op->EndLoc = S; 1820 return Op; 1821 } 1822 }; 1823 1824 } // end anonymous namespace. 1825 1826 void ARMOperand::print(raw_ostream &OS) const { 1827 switch (Kind) { 1828 case k_FPImmediate: 1829 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1830 << ") >"; 1831 break; 1832 case k_CondCode: 1833 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1834 break; 1835 case k_CCOut: 1836 OS << "<ccout " << getReg() << ">"; 1837 break; 1838 case k_ITCondMask: { 1839 static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)", 1840 "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)", 1841 "(tee)", "(eee)" }; 1842 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1843 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1844 break; 1845 } 1846 case k_CoprocNum: 1847 OS << "<coprocessor number: " << getCoproc() << ">"; 1848 break; 1849 case k_CoprocReg: 1850 OS << "<coprocessor register: " << getCoproc() << ">"; 1851 break; 1852 case k_CoprocOption: 1853 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1854 break; 1855 case k_MSRMask: 1856 OS << "<mask: " << getMSRMask() << ">"; 1857 break; 1858 case k_Immediate: 1859 getImm()->print(OS); 1860 break; 1861 case k_MemBarrierOpt: 1862 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1863 break; 1864 case k_Memory: 1865 OS << "<memory " 1866 << " base:" << Memory.BaseRegNum; 1867 OS << ">"; 1868 break; 1869 case k_PostIndexRegister: 1870 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1871 << PostIdxReg.RegNum; 1872 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1873 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1874 << PostIdxReg.ShiftImm; 1875 OS << ">"; 1876 break; 1877 case k_ProcIFlags: { 1878 OS << "<ARM_PROC::"; 1879 unsigned IFlags = getProcIFlags(); 1880 for (int i=2; i >= 0; --i) 1881 if (IFlags & (1 << i)) 1882 OS << ARM_PROC::IFlagsToString(1 << i); 1883 OS << ">"; 1884 break; 1885 } 1886 case k_Register: 1887 OS << "<register " << getReg() << ">"; 1888 break; 1889 case k_ShifterImmediate: 1890 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1891 << " #" << ShifterImm.Imm << ">"; 1892 break; 1893 case k_ShiftedRegister: 1894 OS << "<so_reg_reg " 1895 << RegShiftedReg.SrcReg 1896 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1897 << ", " << RegShiftedReg.ShiftReg << ", " 1898 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1899 << ">"; 1900 break; 1901 case k_ShiftedImmediate: 1902 OS << "<so_reg_imm " 1903 << RegShiftedImm.SrcReg 1904 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1905 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1906 << ">"; 1907 break; 1908 case k_RotateImmediate: 1909 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1910 break; 1911 case k_BitfieldDescriptor: 1912 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1913 << ", width: " << Bitfield.Width << ">"; 1914 break; 1915 case k_RegisterList: 1916 case k_DPRRegisterList: 1917 case k_SPRRegisterList: { 1918 OS << "<register_list "; 1919 1920 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1921 for (SmallVectorImpl<unsigned>::const_iterator 1922 I = RegList.begin(), E = RegList.end(); I != E; ) { 1923 OS << *I; 1924 if (++I < E) OS << ", "; 1925 } 1926 1927 OS << ">"; 1928 break; 1929 } 1930 case k_VectorList: 1931 OS << "<vector_list " << VectorList.Count << " * " 1932 << VectorList.RegNum << ">"; 1933 break; 1934 case k_Token: 1935 OS << "'" << getToken() << "'"; 1936 break; 1937 case k_VectorIndex: 1938 OS << "<vectorindex " << getVectorIndex() << ">"; 1939 break; 1940 } 1941 } 1942 1943 /// @name Auto-generated Match Functions 1944 /// { 1945 1946 static unsigned MatchRegisterName(StringRef Name); 1947 1948 /// } 1949 1950 bool ARMAsmParser::ParseRegister(unsigned &RegNo, 1951 SMLoc &StartLoc, SMLoc &EndLoc) { 1952 RegNo = tryParseRegister(); 1953 1954 return (RegNo == (unsigned)-1); 1955 } 1956 1957 /// Try to parse a register name. The token must be an Identifier when called, 1958 /// and if it is a register name the token is eaten and the register number is 1959 /// returned. Otherwise return -1. 1960 /// 1961 int ARMAsmParser::tryParseRegister() { 1962 const AsmToken &Tok = Parser.getTok(); 1963 if (Tok.isNot(AsmToken::Identifier)) return -1; 1964 1965 // FIXME: Validate register for the current architecture; we have to do 1966 // validation later, so maybe there is no need for this here. 1967 std::string upperCase = Tok.getString().str(); 1968 std::string lowerCase = LowercaseString(upperCase); 1969 unsigned RegNum = MatchRegisterName(lowerCase); 1970 if (!RegNum) { 1971 RegNum = StringSwitch<unsigned>(lowerCase) 1972 .Case("r13", ARM::SP) 1973 .Case("r14", ARM::LR) 1974 .Case("r15", ARM::PC) 1975 .Case("ip", ARM::R12) 1976 .Default(0); 1977 } 1978 if (!RegNum) return -1; 1979 1980 Parser.Lex(); // Eat identifier token. 1981 1982 #if 0 1983 // Also check for an index operand. This is only legal for vector registers, 1984 // but that'll get caught OK in operand matching, so we don't need to 1985 // explicitly filter everything else out here. 1986 if (Parser.getTok().is(AsmToken::LBrac)) { 1987 SMLoc SIdx = Parser.getTok().getLoc(); 1988 Parser.Lex(); // Eat left bracket token. 1989 1990 const MCExpr *ImmVal; 1991 if (getParser().ParseExpression(ImmVal)) 1992 return MatchOperand_ParseFail; 1993 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 1994 if (!MCE) { 1995 TokError("immediate value expected for vector index"); 1996 return MatchOperand_ParseFail; 1997 } 1998 1999 SMLoc E = Parser.getTok().getLoc(); 2000 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2001 Error(E, "']' expected"); 2002 return MatchOperand_ParseFail; 2003 } 2004 2005 Parser.Lex(); // Eat right bracket token. 2006 2007 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2008 SIdx, E, 2009 getContext())); 2010 } 2011 #endif 2012 2013 return RegNum; 2014 } 2015 2016 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2017 // If a recoverable error occurs, return 1. If an irrecoverable error 2018 // occurs, return -1. An irrecoverable error is one where tokens have been 2019 // consumed in the process of trying to parse the shifter (i.e., when it is 2020 // indeed a shifter operand, but malformed). 2021 int ARMAsmParser::tryParseShiftRegister( 2022 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2023 SMLoc S = Parser.getTok().getLoc(); 2024 const AsmToken &Tok = Parser.getTok(); 2025 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2026 2027 std::string upperCase = Tok.getString().str(); 2028 std::string lowerCase = LowercaseString(upperCase); 2029 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2030 .Case("lsl", ARM_AM::lsl) 2031 .Case("lsr", ARM_AM::lsr) 2032 .Case("asr", ARM_AM::asr) 2033 .Case("ror", ARM_AM::ror) 2034 .Case("rrx", ARM_AM::rrx) 2035 .Default(ARM_AM::no_shift); 2036 2037 if (ShiftTy == ARM_AM::no_shift) 2038 return 1; 2039 2040 Parser.Lex(); // Eat the operator. 2041 2042 // The source register for the shift has already been added to the 2043 // operand list, so we need to pop it off and combine it into the shifted 2044 // register operand instead. 2045 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2046 if (!PrevOp->isReg()) 2047 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2048 int SrcReg = PrevOp->getReg(); 2049 int64_t Imm = 0; 2050 int ShiftReg = 0; 2051 if (ShiftTy == ARM_AM::rrx) { 2052 // RRX Doesn't have an explicit shift amount. The encoder expects 2053 // the shift register to be the same as the source register. Seems odd, 2054 // but OK. 2055 ShiftReg = SrcReg; 2056 } else { 2057 // Figure out if this is shifted by a constant or a register (for non-RRX). 2058 if (Parser.getTok().is(AsmToken::Hash)) { 2059 Parser.Lex(); // Eat hash. 2060 SMLoc ImmLoc = Parser.getTok().getLoc(); 2061 const MCExpr *ShiftExpr = 0; 2062 if (getParser().ParseExpression(ShiftExpr)) { 2063 Error(ImmLoc, "invalid immediate shift value"); 2064 return -1; 2065 } 2066 // The expression must be evaluatable as an immediate. 2067 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2068 if (!CE) { 2069 Error(ImmLoc, "invalid immediate shift value"); 2070 return -1; 2071 } 2072 // Range check the immediate. 2073 // lsl, ror: 0 <= imm <= 31 2074 // lsr, asr: 0 <= imm <= 32 2075 Imm = CE->getValue(); 2076 if (Imm < 0 || 2077 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2078 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2079 Error(ImmLoc, "immediate shift value out of range"); 2080 return -1; 2081 } 2082 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2083 ShiftReg = tryParseRegister(); 2084 SMLoc L = Parser.getTok().getLoc(); 2085 if (ShiftReg == -1) { 2086 Error (L, "expected immediate or register in shift operand"); 2087 return -1; 2088 } 2089 } else { 2090 Error (Parser.getTok().getLoc(), 2091 "expected immediate or register in shift operand"); 2092 return -1; 2093 } 2094 } 2095 2096 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2097 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2098 ShiftReg, Imm, 2099 S, Parser.getTok().getLoc())); 2100 else 2101 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2102 S, Parser.getTok().getLoc())); 2103 2104 return 0; 2105 } 2106 2107 2108 /// Try to parse a register name. The token must be an Identifier when called. 2109 /// If it's a register, an AsmOperand is created. Another AsmOperand is created 2110 /// if there is a "writeback". 'true' if it's not a register. 2111 /// 2112 /// TODO this is likely to change to allow different register types and or to 2113 /// parse for a specific register type. 2114 bool ARMAsmParser:: 2115 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2116 SMLoc S = Parser.getTok().getLoc(); 2117 int RegNo = tryParseRegister(); 2118 if (RegNo == -1) 2119 return true; 2120 2121 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2122 2123 const AsmToken &ExclaimTok = Parser.getTok(); 2124 if (ExclaimTok.is(AsmToken::Exclaim)) { 2125 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2126 ExclaimTok.getLoc())); 2127 Parser.Lex(); // Eat exclaim token 2128 return false; 2129 } 2130 2131 // Also check for an index operand. This is only legal for vector registers, 2132 // but that'll get caught OK in operand matching, so we don't need to 2133 // explicitly filter everything else out here. 2134 if (Parser.getTok().is(AsmToken::LBrac)) { 2135 SMLoc SIdx = Parser.getTok().getLoc(); 2136 Parser.Lex(); // Eat left bracket token. 2137 2138 const MCExpr *ImmVal; 2139 if (getParser().ParseExpression(ImmVal)) 2140 return MatchOperand_ParseFail; 2141 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2142 if (!MCE) { 2143 TokError("immediate value expected for vector index"); 2144 return MatchOperand_ParseFail; 2145 } 2146 2147 SMLoc E = Parser.getTok().getLoc(); 2148 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2149 Error(E, "']' expected"); 2150 return MatchOperand_ParseFail; 2151 } 2152 2153 Parser.Lex(); // Eat right bracket token. 2154 2155 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2156 SIdx, E, 2157 getContext())); 2158 } 2159 2160 return false; 2161 } 2162 2163 /// MatchCoprocessorOperandName - Try to parse an coprocessor related 2164 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2165 /// "c5", ... 2166 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2167 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2168 // but efficient. 2169 switch (Name.size()) { 2170 default: break; 2171 case 2: 2172 if (Name[0] != CoprocOp) 2173 return -1; 2174 switch (Name[1]) { 2175 default: return -1; 2176 case '0': return 0; 2177 case '1': return 1; 2178 case '2': return 2; 2179 case '3': return 3; 2180 case '4': return 4; 2181 case '5': return 5; 2182 case '6': return 6; 2183 case '7': return 7; 2184 case '8': return 8; 2185 case '9': return 9; 2186 } 2187 break; 2188 case 3: 2189 if (Name[0] != CoprocOp || Name[1] != '1') 2190 return -1; 2191 switch (Name[2]) { 2192 default: return -1; 2193 case '0': return 10; 2194 case '1': return 11; 2195 case '2': return 12; 2196 case '3': return 13; 2197 case '4': return 14; 2198 case '5': return 15; 2199 } 2200 break; 2201 } 2202 2203 return -1; 2204 } 2205 2206 /// parseITCondCode - Try to parse a condition code for an IT instruction. 2207 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2208 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2209 SMLoc S = Parser.getTok().getLoc(); 2210 const AsmToken &Tok = Parser.getTok(); 2211 if (!Tok.is(AsmToken::Identifier)) 2212 return MatchOperand_NoMatch; 2213 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2214 .Case("eq", ARMCC::EQ) 2215 .Case("ne", ARMCC::NE) 2216 .Case("hs", ARMCC::HS) 2217 .Case("cs", ARMCC::HS) 2218 .Case("lo", ARMCC::LO) 2219 .Case("cc", ARMCC::LO) 2220 .Case("mi", ARMCC::MI) 2221 .Case("pl", ARMCC::PL) 2222 .Case("vs", ARMCC::VS) 2223 .Case("vc", ARMCC::VC) 2224 .Case("hi", ARMCC::HI) 2225 .Case("ls", ARMCC::LS) 2226 .Case("ge", ARMCC::GE) 2227 .Case("lt", ARMCC::LT) 2228 .Case("gt", ARMCC::GT) 2229 .Case("le", ARMCC::LE) 2230 .Case("al", ARMCC::AL) 2231 .Default(~0U); 2232 if (CC == ~0U) 2233 return MatchOperand_NoMatch; 2234 Parser.Lex(); // Eat the token. 2235 2236 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2237 2238 return MatchOperand_Success; 2239 } 2240 2241 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2242 /// token must be an Identifier when called, and if it is a coprocessor 2243 /// number, the token is eaten and the operand is added to the operand list. 2244 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2245 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2246 SMLoc S = Parser.getTok().getLoc(); 2247 const AsmToken &Tok = Parser.getTok(); 2248 if (Tok.isNot(AsmToken::Identifier)) 2249 return MatchOperand_NoMatch; 2250 2251 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2252 if (Num == -1) 2253 return MatchOperand_NoMatch; 2254 2255 Parser.Lex(); // Eat identifier token. 2256 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2257 return MatchOperand_Success; 2258 } 2259 2260 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2261 /// token must be an Identifier when called, and if it is a coprocessor 2262 /// number, the token is eaten and the operand is added to the operand list. 2263 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2264 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2265 SMLoc S = Parser.getTok().getLoc(); 2266 const AsmToken &Tok = Parser.getTok(); 2267 if (Tok.isNot(AsmToken::Identifier)) 2268 return MatchOperand_NoMatch; 2269 2270 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2271 if (Reg == -1) 2272 return MatchOperand_NoMatch; 2273 2274 Parser.Lex(); // Eat identifier token. 2275 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2276 return MatchOperand_Success; 2277 } 2278 2279 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2280 /// coproc_option : '{' imm0_255 '}' 2281 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2282 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2283 SMLoc S = Parser.getTok().getLoc(); 2284 2285 // If this isn't a '{', this isn't a coprocessor immediate operand. 2286 if (Parser.getTok().isNot(AsmToken::LCurly)) 2287 return MatchOperand_NoMatch; 2288 Parser.Lex(); // Eat the '{' 2289 2290 const MCExpr *Expr; 2291 SMLoc Loc = Parser.getTok().getLoc(); 2292 if (getParser().ParseExpression(Expr)) { 2293 Error(Loc, "illegal expression"); 2294 return MatchOperand_ParseFail; 2295 } 2296 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2297 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2298 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2299 return MatchOperand_ParseFail; 2300 } 2301 int Val = CE->getValue(); 2302 2303 // Check for and consume the closing '}' 2304 if (Parser.getTok().isNot(AsmToken::RCurly)) 2305 return MatchOperand_ParseFail; 2306 SMLoc E = Parser.getTok().getLoc(); 2307 Parser.Lex(); // Eat the '}' 2308 2309 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2310 return MatchOperand_Success; 2311 } 2312 2313 // For register list parsing, we need to map from raw GPR register numbering 2314 // to the enumeration values. The enumeration values aren't sorted by 2315 // register number due to our using "sp", "lr" and "pc" as canonical names. 2316 static unsigned getNextRegister(unsigned Reg) { 2317 // If this is a GPR, we need to do it manually, otherwise we can rely 2318 // on the sort ordering of the enumeration since the other reg-classes 2319 // are sane. 2320 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2321 return Reg + 1; 2322 switch(Reg) { 2323 default: assert(0 && "Invalid GPR number!"); 2324 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2325 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2326 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2327 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2328 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2329 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2330 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2331 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2332 } 2333 } 2334 2335 /// Parse a register list. 2336 bool ARMAsmParser:: 2337 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2338 assert(Parser.getTok().is(AsmToken::LCurly) && 2339 "Token is not a Left Curly Brace"); 2340 SMLoc S = Parser.getTok().getLoc(); 2341 Parser.Lex(); // Eat '{' token. 2342 SMLoc RegLoc = Parser.getTok().getLoc(); 2343 2344 // Check the first register in the list to see what register class 2345 // this is a list of. 2346 int Reg = tryParseRegister(); 2347 if (Reg == -1) 2348 return Error(RegLoc, "register expected"); 2349 2350 MCRegisterClass *RC; 2351 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2352 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2353 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2354 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2355 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2356 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2357 else 2358 return Error(RegLoc, "invalid register in register list"); 2359 2360 // The reglist instructions have at most 16 registers, so reserve 2361 // space for that many. 2362 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2363 // Store the first register. 2364 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2365 2366 // This starts immediately after the first register token in the list, 2367 // so we can see either a comma or a minus (range separator) as a legal 2368 // next token. 2369 while (Parser.getTok().is(AsmToken::Comma) || 2370 Parser.getTok().is(AsmToken::Minus)) { 2371 if (Parser.getTok().is(AsmToken::Minus)) { 2372 Parser.Lex(); // Eat the comma. 2373 SMLoc EndLoc = Parser.getTok().getLoc(); 2374 int EndReg = tryParseRegister(); 2375 if (EndReg == -1) 2376 return Error(EndLoc, "register expected"); 2377 // If the register is the same as the start reg, there's nothing 2378 // more to do. 2379 if (Reg == EndReg) 2380 continue; 2381 // The register must be in the same register class as the first. 2382 if (!RC->contains(EndReg)) 2383 return Error(EndLoc, "invalid register in register list"); 2384 // Ranges must go from low to high. 2385 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2386 return Error(EndLoc, "bad range in register list"); 2387 2388 // Add all the registers in the range to the register list. 2389 while (Reg != EndReg) { 2390 Reg = getNextRegister(Reg); 2391 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2392 } 2393 continue; 2394 } 2395 Parser.Lex(); // Eat the comma. 2396 RegLoc = Parser.getTok().getLoc(); 2397 int OldReg = Reg; 2398 Reg = tryParseRegister(); 2399 if (Reg == -1) 2400 return Error(RegLoc, "register expected"); 2401 // The register must be in the same register class as the first. 2402 if (!RC->contains(Reg)) 2403 return Error(RegLoc, "invalid register in register list"); 2404 // List must be monotonically increasing. 2405 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2406 return Error(RegLoc, "register list not in ascending order"); 2407 // VFP register lists must also be contiguous. 2408 // It's OK to use the enumeration values directly here rather, as the 2409 // VFP register classes have the enum sorted properly. 2410 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2411 Reg != OldReg + 1) 2412 return Error(RegLoc, "non-contiguous register range"); 2413 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2414 } 2415 2416 SMLoc E = Parser.getTok().getLoc(); 2417 if (Parser.getTok().isNot(AsmToken::RCurly)) 2418 return Error(E, "'}' expected"); 2419 Parser.Lex(); // Eat '}' token. 2420 2421 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2422 return false; 2423 } 2424 2425 // parse a vector register list 2426 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2427 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2428 if(Parser.getTok().isNot(AsmToken::LCurly)) 2429 return MatchOperand_NoMatch; 2430 2431 SMLoc S = Parser.getTok().getLoc(); 2432 Parser.Lex(); // Eat '{' token. 2433 SMLoc RegLoc = Parser.getTok().getLoc(); 2434 2435 int Reg = tryParseRegister(); 2436 if (Reg == -1) { 2437 Error(RegLoc, "register expected"); 2438 return MatchOperand_ParseFail; 2439 } 2440 2441 unsigned FirstReg = Reg; 2442 unsigned Count = 1; 2443 while (Parser.getTok().is(AsmToken::Comma)) { 2444 Parser.Lex(); // Eat the comma. 2445 RegLoc = Parser.getTok().getLoc(); 2446 int OldReg = Reg; 2447 Reg = tryParseRegister(); 2448 if (Reg == -1) { 2449 Error(RegLoc, "register expected"); 2450 return MatchOperand_ParseFail; 2451 } 2452 // vector register lists must also be contiguous. 2453 // It's OK to use the enumeration values directly here rather, as the 2454 // VFP register classes have the enum sorted properly. 2455 if (Reg != OldReg + 1) { 2456 Error(RegLoc, "non-contiguous register range"); 2457 return MatchOperand_ParseFail; 2458 } 2459 2460 ++Count; 2461 } 2462 2463 SMLoc E = Parser.getTok().getLoc(); 2464 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2465 Error(E, "'}' expected"); 2466 return MatchOperand_ParseFail; 2467 } 2468 Parser.Lex(); // Eat '}' token. 2469 2470 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2471 return MatchOperand_Success; 2472 } 2473 2474 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2475 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2476 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2477 SMLoc S = Parser.getTok().getLoc(); 2478 const AsmToken &Tok = Parser.getTok(); 2479 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2480 StringRef OptStr = Tok.getString(); 2481 2482 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2483 .Case("sy", ARM_MB::SY) 2484 .Case("st", ARM_MB::ST) 2485 .Case("sh", ARM_MB::ISH) 2486 .Case("ish", ARM_MB::ISH) 2487 .Case("shst", ARM_MB::ISHST) 2488 .Case("ishst", ARM_MB::ISHST) 2489 .Case("nsh", ARM_MB::NSH) 2490 .Case("un", ARM_MB::NSH) 2491 .Case("nshst", ARM_MB::NSHST) 2492 .Case("unst", ARM_MB::NSHST) 2493 .Case("osh", ARM_MB::OSH) 2494 .Case("oshst", ARM_MB::OSHST) 2495 .Default(~0U); 2496 2497 if (Opt == ~0U) 2498 return MatchOperand_NoMatch; 2499 2500 Parser.Lex(); // Eat identifier token. 2501 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2502 return MatchOperand_Success; 2503 } 2504 2505 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2506 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2507 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2508 SMLoc S = Parser.getTok().getLoc(); 2509 const AsmToken &Tok = Parser.getTok(); 2510 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2511 StringRef IFlagsStr = Tok.getString(); 2512 2513 // An iflags string of "none" is interpreted to mean that none of the AIF 2514 // bits are set. Not a terribly useful instruction, but a valid encoding. 2515 unsigned IFlags = 0; 2516 if (IFlagsStr != "none") { 2517 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2518 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2519 .Case("a", ARM_PROC::A) 2520 .Case("i", ARM_PROC::I) 2521 .Case("f", ARM_PROC::F) 2522 .Default(~0U); 2523 2524 // If some specific iflag is already set, it means that some letter is 2525 // present more than once, this is not acceptable. 2526 if (Flag == ~0U || (IFlags & Flag)) 2527 return MatchOperand_NoMatch; 2528 2529 IFlags |= Flag; 2530 } 2531 } 2532 2533 Parser.Lex(); // Eat identifier token. 2534 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2535 return MatchOperand_Success; 2536 } 2537 2538 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2539 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2540 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2541 SMLoc S = Parser.getTok().getLoc(); 2542 const AsmToken &Tok = Parser.getTok(); 2543 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2544 StringRef Mask = Tok.getString(); 2545 2546 if (isMClass()) { 2547 // See ARMv6-M 10.1.1 2548 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2549 .Case("apsr", 0) 2550 .Case("iapsr", 1) 2551 .Case("eapsr", 2) 2552 .Case("xpsr", 3) 2553 .Case("ipsr", 5) 2554 .Case("epsr", 6) 2555 .Case("iepsr", 7) 2556 .Case("msp", 8) 2557 .Case("psp", 9) 2558 .Case("primask", 16) 2559 .Case("basepri", 17) 2560 .Case("basepri_max", 18) 2561 .Case("faultmask", 19) 2562 .Case("control", 20) 2563 .Default(~0U); 2564 2565 if (FlagsVal == ~0U) 2566 return MatchOperand_NoMatch; 2567 2568 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2569 // basepri, basepri_max and faultmask only valid for V7m. 2570 return MatchOperand_NoMatch; 2571 2572 Parser.Lex(); // Eat identifier token. 2573 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2574 return MatchOperand_Success; 2575 } 2576 2577 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2578 size_t Start = 0, Next = Mask.find('_'); 2579 StringRef Flags = ""; 2580 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2581 if (Next != StringRef::npos) 2582 Flags = Mask.slice(Next+1, Mask.size()); 2583 2584 // FlagsVal contains the complete mask: 2585 // 3-0: Mask 2586 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2587 unsigned FlagsVal = 0; 2588 2589 if (SpecReg == "apsr") { 2590 FlagsVal = StringSwitch<unsigned>(Flags) 2591 .Case("nzcvq", 0x8) // same as CPSR_f 2592 .Case("g", 0x4) // same as CPSR_s 2593 .Case("nzcvqg", 0xc) // same as CPSR_fs 2594 .Default(~0U); 2595 2596 if (FlagsVal == ~0U) { 2597 if (!Flags.empty()) 2598 return MatchOperand_NoMatch; 2599 else 2600 FlagsVal = 8; // No flag 2601 } 2602 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2603 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2604 Flags = "fc"; 2605 for (int i = 0, e = Flags.size(); i != e; ++i) { 2606 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2607 .Case("c", 1) 2608 .Case("x", 2) 2609 .Case("s", 4) 2610 .Case("f", 8) 2611 .Default(~0U); 2612 2613 // If some specific flag is already set, it means that some letter is 2614 // present more than once, this is not acceptable. 2615 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2616 return MatchOperand_NoMatch; 2617 FlagsVal |= Flag; 2618 } 2619 } else // No match for special register. 2620 return MatchOperand_NoMatch; 2621 2622 // Special register without flags are equivalent to "fc" flags. 2623 if (!FlagsVal) 2624 FlagsVal = 0x9; 2625 2626 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2627 if (SpecReg == "spsr") 2628 FlagsVal |= 16; 2629 2630 Parser.Lex(); // Eat identifier token. 2631 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2632 return MatchOperand_Success; 2633 } 2634 2635 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2636 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2637 int Low, int High) { 2638 const AsmToken &Tok = Parser.getTok(); 2639 if (Tok.isNot(AsmToken::Identifier)) { 2640 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2641 return MatchOperand_ParseFail; 2642 } 2643 StringRef ShiftName = Tok.getString(); 2644 std::string LowerOp = LowercaseString(Op); 2645 std::string UpperOp = UppercaseString(Op); 2646 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2647 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2648 return MatchOperand_ParseFail; 2649 } 2650 Parser.Lex(); // Eat shift type token. 2651 2652 // There must be a '#' and a shift amount. 2653 if (Parser.getTok().isNot(AsmToken::Hash)) { 2654 Error(Parser.getTok().getLoc(), "'#' expected"); 2655 return MatchOperand_ParseFail; 2656 } 2657 Parser.Lex(); // Eat hash token. 2658 2659 const MCExpr *ShiftAmount; 2660 SMLoc Loc = Parser.getTok().getLoc(); 2661 if (getParser().ParseExpression(ShiftAmount)) { 2662 Error(Loc, "illegal expression"); 2663 return MatchOperand_ParseFail; 2664 } 2665 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2666 if (!CE) { 2667 Error(Loc, "constant expression expected"); 2668 return MatchOperand_ParseFail; 2669 } 2670 int Val = CE->getValue(); 2671 if (Val < Low || Val > High) { 2672 Error(Loc, "immediate value out of range"); 2673 return MatchOperand_ParseFail; 2674 } 2675 2676 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2677 2678 return MatchOperand_Success; 2679 } 2680 2681 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2682 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2683 const AsmToken &Tok = Parser.getTok(); 2684 SMLoc S = Tok.getLoc(); 2685 if (Tok.isNot(AsmToken::Identifier)) { 2686 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2687 return MatchOperand_ParseFail; 2688 } 2689 int Val = StringSwitch<int>(Tok.getString()) 2690 .Case("be", 1) 2691 .Case("le", 0) 2692 .Default(-1); 2693 Parser.Lex(); // Eat the token. 2694 2695 if (Val == -1) { 2696 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2697 return MatchOperand_ParseFail; 2698 } 2699 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2700 getContext()), 2701 S, Parser.getTok().getLoc())); 2702 return MatchOperand_Success; 2703 } 2704 2705 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2706 /// instructions. Legal values are: 2707 /// lsl #n 'n' in [0,31] 2708 /// asr #n 'n' in [1,32] 2709 /// n == 32 encoded as n == 0. 2710 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2711 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2712 const AsmToken &Tok = Parser.getTok(); 2713 SMLoc S = Tok.getLoc(); 2714 if (Tok.isNot(AsmToken::Identifier)) { 2715 Error(S, "shift operator 'asr' or 'lsl' expected"); 2716 return MatchOperand_ParseFail; 2717 } 2718 StringRef ShiftName = Tok.getString(); 2719 bool isASR; 2720 if (ShiftName == "lsl" || ShiftName == "LSL") 2721 isASR = false; 2722 else if (ShiftName == "asr" || ShiftName == "ASR") 2723 isASR = true; 2724 else { 2725 Error(S, "shift operator 'asr' or 'lsl' expected"); 2726 return MatchOperand_ParseFail; 2727 } 2728 Parser.Lex(); // Eat the operator. 2729 2730 // A '#' and a shift amount. 2731 if (Parser.getTok().isNot(AsmToken::Hash)) { 2732 Error(Parser.getTok().getLoc(), "'#' expected"); 2733 return MatchOperand_ParseFail; 2734 } 2735 Parser.Lex(); // Eat hash token. 2736 2737 const MCExpr *ShiftAmount; 2738 SMLoc E = Parser.getTok().getLoc(); 2739 if (getParser().ParseExpression(ShiftAmount)) { 2740 Error(E, "malformed shift expression"); 2741 return MatchOperand_ParseFail; 2742 } 2743 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2744 if (!CE) { 2745 Error(E, "shift amount must be an immediate"); 2746 return MatchOperand_ParseFail; 2747 } 2748 2749 int64_t Val = CE->getValue(); 2750 if (isASR) { 2751 // Shift amount must be in [1,32] 2752 if (Val < 1 || Val > 32) { 2753 Error(E, "'asr' shift amount must be in range [1,32]"); 2754 return MatchOperand_ParseFail; 2755 } 2756 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2757 if (isThumb() && Val == 32) { 2758 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2759 return MatchOperand_ParseFail; 2760 } 2761 if (Val == 32) Val = 0; 2762 } else { 2763 // Shift amount must be in [1,32] 2764 if (Val < 0 || Val > 31) { 2765 Error(E, "'lsr' shift amount must be in range [0,31]"); 2766 return MatchOperand_ParseFail; 2767 } 2768 } 2769 2770 E = Parser.getTok().getLoc(); 2771 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2772 2773 return MatchOperand_Success; 2774 } 2775 2776 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2777 /// of instructions. Legal values are: 2778 /// ror #n 'n' in {0, 8, 16, 24} 2779 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2780 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2781 const AsmToken &Tok = Parser.getTok(); 2782 SMLoc S = Tok.getLoc(); 2783 if (Tok.isNot(AsmToken::Identifier)) 2784 return MatchOperand_NoMatch; 2785 StringRef ShiftName = Tok.getString(); 2786 if (ShiftName != "ror" && ShiftName != "ROR") 2787 return MatchOperand_NoMatch; 2788 Parser.Lex(); // Eat the operator. 2789 2790 // A '#' and a rotate amount. 2791 if (Parser.getTok().isNot(AsmToken::Hash)) { 2792 Error(Parser.getTok().getLoc(), "'#' expected"); 2793 return MatchOperand_ParseFail; 2794 } 2795 Parser.Lex(); // Eat hash token. 2796 2797 const MCExpr *ShiftAmount; 2798 SMLoc E = Parser.getTok().getLoc(); 2799 if (getParser().ParseExpression(ShiftAmount)) { 2800 Error(E, "malformed rotate expression"); 2801 return MatchOperand_ParseFail; 2802 } 2803 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2804 if (!CE) { 2805 Error(E, "rotate amount must be an immediate"); 2806 return MatchOperand_ParseFail; 2807 } 2808 2809 int64_t Val = CE->getValue(); 2810 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2811 // normally, zero is represented in asm by omitting the rotate operand 2812 // entirely. 2813 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2814 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2815 return MatchOperand_ParseFail; 2816 } 2817 2818 E = Parser.getTok().getLoc(); 2819 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2820 2821 return MatchOperand_Success; 2822 } 2823 2824 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2825 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2826 SMLoc S = Parser.getTok().getLoc(); 2827 // The bitfield descriptor is really two operands, the LSB and the width. 2828 if (Parser.getTok().isNot(AsmToken::Hash)) { 2829 Error(Parser.getTok().getLoc(), "'#' expected"); 2830 return MatchOperand_ParseFail; 2831 } 2832 Parser.Lex(); // Eat hash token. 2833 2834 const MCExpr *LSBExpr; 2835 SMLoc E = Parser.getTok().getLoc(); 2836 if (getParser().ParseExpression(LSBExpr)) { 2837 Error(E, "malformed immediate expression"); 2838 return MatchOperand_ParseFail; 2839 } 2840 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2841 if (!CE) { 2842 Error(E, "'lsb' operand must be an immediate"); 2843 return MatchOperand_ParseFail; 2844 } 2845 2846 int64_t LSB = CE->getValue(); 2847 // The LSB must be in the range [0,31] 2848 if (LSB < 0 || LSB > 31) { 2849 Error(E, "'lsb' operand must be in the range [0,31]"); 2850 return MatchOperand_ParseFail; 2851 } 2852 E = Parser.getTok().getLoc(); 2853 2854 // Expect another immediate operand. 2855 if (Parser.getTok().isNot(AsmToken::Comma)) { 2856 Error(Parser.getTok().getLoc(), "too few operands"); 2857 return MatchOperand_ParseFail; 2858 } 2859 Parser.Lex(); // Eat hash token. 2860 if (Parser.getTok().isNot(AsmToken::Hash)) { 2861 Error(Parser.getTok().getLoc(), "'#' expected"); 2862 return MatchOperand_ParseFail; 2863 } 2864 Parser.Lex(); // Eat hash token. 2865 2866 const MCExpr *WidthExpr; 2867 if (getParser().ParseExpression(WidthExpr)) { 2868 Error(E, "malformed immediate expression"); 2869 return MatchOperand_ParseFail; 2870 } 2871 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2872 if (!CE) { 2873 Error(E, "'width' operand must be an immediate"); 2874 return MatchOperand_ParseFail; 2875 } 2876 2877 int64_t Width = CE->getValue(); 2878 // The LSB must be in the range [1,32-lsb] 2879 if (Width < 1 || Width > 32 - LSB) { 2880 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2881 return MatchOperand_ParseFail; 2882 } 2883 E = Parser.getTok().getLoc(); 2884 2885 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2886 2887 return MatchOperand_Success; 2888 } 2889 2890 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2891 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2892 // Check for a post-index addressing register operand. Specifically: 2893 // postidx_reg := '+' register {, shift} 2894 // | '-' register {, shift} 2895 // | register {, shift} 2896 2897 // This method must return MatchOperand_NoMatch without consuming any tokens 2898 // in the case where there is no match, as other alternatives take other 2899 // parse methods. 2900 AsmToken Tok = Parser.getTok(); 2901 SMLoc S = Tok.getLoc(); 2902 bool haveEaten = false; 2903 bool isAdd = true; 2904 int Reg = -1; 2905 if (Tok.is(AsmToken::Plus)) { 2906 Parser.Lex(); // Eat the '+' token. 2907 haveEaten = true; 2908 } else if (Tok.is(AsmToken::Minus)) { 2909 Parser.Lex(); // Eat the '-' token. 2910 isAdd = false; 2911 haveEaten = true; 2912 } 2913 if (Parser.getTok().is(AsmToken::Identifier)) 2914 Reg = tryParseRegister(); 2915 if (Reg == -1) { 2916 if (!haveEaten) 2917 return MatchOperand_NoMatch; 2918 Error(Parser.getTok().getLoc(), "register expected"); 2919 return MatchOperand_ParseFail; 2920 } 2921 SMLoc E = Parser.getTok().getLoc(); 2922 2923 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2924 unsigned ShiftImm = 0; 2925 if (Parser.getTok().is(AsmToken::Comma)) { 2926 Parser.Lex(); // Eat the ','. 2927 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2928 return MatchOperand_ParseFail; 2929 } 2930 2931 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2932 ShiftImm, S, E)); 2933 2934 return MatchOperand_Success; 2935 } 2936 2937 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2938 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2939 // Check for a post-index addressing register operand. Specifically: 2940 // am3offset := '+' register 2941 // | '-' register 2942 // | register 2943 // | # imm 2944 // | # + imm 2945 // | # - imm 2946 2947 // This method must return MatchOperand_NoMatch without consuming any tokens 2948 // in the case where there is no match, as other alternatives take other 2949 // parse methods. 2950 AsmToken Tok = Parser.getTok(); 2951 SMLoc S = Tok.getLoc(); 2952 2953 // Do immediates first, as we always parse those if we have a '#'. 2954 if (Parser.getTok().is(AsmToken::Hash)) { 2955 Parser.Lex(); // Eat the '#'. 2956 // Explicitly look for a '-', as we need to encode negative zero 2957 // differently. 2958 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2959 const MCExpr *Offset; 2960 if (getParser().ParseExpression(Offset)) 2961 return MatchOperand_ParseFail; 2962 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2963 if (!CE) { 2964 Error(S, "constant expression expected"); 2965 return MatchOperand_ParseFail; 2966 } 2967 SMLoc E = Tok.getLoc(); 2968 // Negative zero is encoded as the flag value INT32_MIN. 2969 int32_t Val = CE->getValue(); 2970 if (isNegative && Val == 0) 2971 Val = INT32_MIN; 2972 2973 Operands.push_back( 2974 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2975 2976 return MatchOperand_Success; 2977 } 2978 2979 2980 bool haveEaten = false; 2981 bool isAdd = true; 2982 int Reg = -1; 2983 if (Tok.is(AsmToken::Plus)) { 2984 Parser.Lex(); // Eat the '+' token. 2985 haveEaten = true; 2986 } else if (Tok.is(AsmToken::Minus)) { 2987 Parser.Lex(); // Eat the '-' token. 2988 isAdd = false; 2989 haveEaten = true; 2990 } 2991 if (Parser.getTok().is(AsmToken::Identifier)) 2992 Reg = tryParseRegister(); 2993 if (Reg == -1) { 2994 if (!haveEaten) 2995 return MatchOperand_NoMatch; 2996 Error(Parser.getTok().getLoc(), "register expected"); 2997 return MatchOperand_ParseFail; 2998 } 2999 SMLoc E = Parser.getTok().getLoc(); 3000 3001 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3002 0, S, E)); 3003 3004 return MatchOperand_Success; 3005 } 3006 3007 /// cvtT2LdrdPre - Convert parsed operands to MCInst. 3008 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3009 /// when they refer multiple MIOperands inside a single one. 3010 bool ARMAsmParser:: 3011 cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3012 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3013 // Rt, Rt2 3014 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3015 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3016 // Create a writeback register dummy placeholder. 3017 Inst.addOperand(MCOperand::CreateReg(0)); 3018 // addr 3019 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3020 // pred 3021 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3022 return true; 3023 } 3024 3025 /// cvtT2StrdPre - Convert parsed operands to MCInst. 3026 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3027 /// when they refer multiple MIOperands inside a single one. 3028 bool ARMAsmParser:: 3029 cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3030 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3031 // Create a writeback register dummy placeholder. 3032 Inst.addOperand(MCOperand::CreateReg(0)); 3033 // Rt, Rt2 3034 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3035 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3036 // addr 3037 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3038 // pred 3039 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3040 return true; 3041 } 3042 3043 /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3044 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3045 /// when they refer multiple MIOperands inside a single one. 3046 bool ARMAsmParser:: 3047 cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3048 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3049 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3050 3051 // Create a writeback register dummy placeholder. 3052 Inst.addOperand(MCOperand::CreateImm(0)); 3053 3054 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3055 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3056 return true; 3057 } 3058 3059 /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3060 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3061 /// when they refer multiple MIOperands inside a single one. 3062 bool ARMAsmParser:: 3063 cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3064 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3065 // Create a writeback register dummy placeholder. 3066 Inst.addOperand(MCOperand::CreateImm(0)); 3067 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3068 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3069 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3070 return true; 3071 } 3072 3073 /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3074 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3075 /// when they refer multiple MIOperands inside a single one. 3076 bool ARMAsmParser:: 3077 cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3078 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3079 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3080 3081 // Create a writeback register dummy placeholder. 3082 Inst.addOperand(MCOperand::CreateImm(0)); 3083 3084 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3085 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3086 return true; 3087 } 3088 3089 /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3090 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3091 /// when they refer multiple MIOperands inside a single one. 3092 bool ARMAsmParser:: 3093 cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3094 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3095 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3096 3097 // Create a writeback register dummy placeholder. 3098 Inst.addOperand(MCOperand::CreateImm(0)); 3099 3100 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3101 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3102 return true; 3103 } 3104 3105 3106 /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3107 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3108 /// when they refer multiple MIOperands inside a single one. 3109 bool ARMAsmParser:: 3110 cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3111 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3112 // Create a writeback register dummy placeholder. 3113 Inst.addOperand(MCOperand::CreateImm(0)); 3114 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3115 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3116 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3117 return true; 3118 } 3119 3120 /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3121 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3122 /// when they refer multiple MIOperands inside a single one. 3123 bool ARMAsmParser:: 3124 cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3125 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3126 // Create a writeback register dummy placeholder. 3127 Inst.addOperand(MCOperand::CreateImm(0)); 3128 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3129 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3130 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3131 return true; 3132 } 3133 3134 /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3135 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3136 /// when they refer multiple MIOperands inside a single one. 3137 bool ARMAsmParser:: 3138 cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3139 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3140 // Create a writeback register dummy placeholder. 3141 Inst.addOperand(MCOperand::CreateImm(0)); 3142 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3143 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3144 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3145 return true; 3146 } 3147 3148 /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3149 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3150 /// when they refer multiple MIOperands inside a single one. 3151 bool ARMAsmParser:: 3152 cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3153 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3154 // Rt 3155 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3156 // Create a writeback register dummy placeholder. 3157 Inst.addOperand(MCOperand::CreateImm(0)); 3158 // addr 3159 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3160 // offset 3161 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3162 // pred 3163 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3164 return true; 3165 } 3166 3167 /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3168 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3169 /// when they refer multiple MIOperands inside a single one. 3170 bool ARMAsmParser:: 3171 cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3172 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3173 // Rt 3174 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3175 // Create a writeback register dummy placeholder. 3176 Inst.addOperand(MCOperand::CreateImm(0)); 3177 // addr 3178 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3179 // offset 3180 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3181 // pred 3182 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3183 return true; 3184 } 3185 3186 /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3187 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3188 /// when they refer multiple MIOperands inside a single one. 3189 bool ARMAsmParser:: 3190 cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3191 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3192 // Create a writeback register dummy placeholder. 3193 Inst.addOperand(MCOperand::CreateImm(0)); 3194 // Rt 3195 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3196 // addr 3197 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3198 // offset 3199 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3200 // pred 3201 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3202 return true; 3203 } 3204 3205 /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3206 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3207 /// when they refer multiple MIOperands inside a single one. 3208 bool ARMAsmParser:: 3209 cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3210 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3211 // Create a writeback register dummy placeholder. 3212 Inst.addOperand(MCOperand::CreateImm(0)); 3213 // Rt 3214 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3215 // addr 3216 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3217 // offset 3218 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3219 // pred 3220 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3221 return true; 3222 } 3223 3224 /// cvtLdrdPre - Convert parsed operands to MCInst. 3225 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3226 /// when they refer multiple MIOperands inside a single one. 3227 bool ARMAsmParser:: 3228 cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3229 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3230 // Rt, Rt2 3231 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3232 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3233 // Create a writeback register dummy placeholder. 3234 Inst.addOperand(MCOperand::CreateImm(0)); 3235 // addr 3236 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3237 // pred 3238 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3239 return true; 3240 } 3241 3242 /// cvtStrdPre - Convert parsed operands to MCInst. 3243 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3244 /// when they refer multiple MIOperands inside a single one. 3245 bool ARMAsmParser:: 3246 cvtStrdPre(MCInst &Inst, unsigned Opcode, 3247 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3248 // Create a writeback register dummy placeholder. 3249 Inst.addOperand(MCOperand::CreateImm(0)); 3250 // Rt, Rt2 3251 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3252 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3253 // addr 3254 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3255 // pred 3256 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3257 return true; 3258 } 3259 3260 /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3261 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3262 /// when they refer multiple MIOperands inside a single one. 3263 bool ARMAsmParser:: 3264 cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3265 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3266 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3267 // Create a writeback register dummy placeholder. 3268 Inst.addOperand(MCOperand::CreateImm(0)); 3269 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3270 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3271 return true; 3272 } 3273 3274 /// cvtThumbMultiple- Convert parsed operands to MCInst. 3275 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3276 /// when they refer multiple MIOperands inside a single one. 3277 bool ARMAsmParser:: 3278 cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3279 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3280 // The second source operand must be the same register as the destination 3281 // operand. 3282 if (Operands.size() == 6 && 3283 (((ARMOperand*)Operands[3])->getReg() != 3284 ((ARMOperand*)Operands[5])->getReg()) && 3285 (((ARMOperand*)Operands[3])->getReg() != 3286 ((ARMOperand*)Operands[4])->getReg())) { 3287 Error(Operands[3]->getStartLoc(), 3288 "destination register must match source register"); 3289 return false; 3290 } 3291 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3292 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3293 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3294 // If we have a three-operand form, use that, else the second source operand 3295 // is just the destination operand again. 3296 if (Operands.size() == 6) 3297 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3298 else 3299 Inst.addOperand(Inst.getOperand(0)); 3300 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3301 3302 return true; 3303 } 3304 3305 /// Parse an ARM memory expression, return false if successful else return true 3306 /// or an error. The first token must be a '[' when called. 3307 bool ARMAsmParser:: 3308 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3309 SMLoc S, E; 3310 assert(Parser.getTok().is(AsmToken::LBrac) && 3311 "Token is not a Left Bracket"); 3312 S = Parser.getTok().getLoc(); 3313 Parser.Lex(); // Eat left bracket token. 3314 3315 const AsmToken &BaseRegTok = Parser.getTok(); 3316 int BaseRegNum = tryParseRegister(); 3317 if (BaseRegNum == -1) 3318 return Error(BaseRegTok.getLoc(), "register expected"); 3319 3320 // The next token must either be a comma or a closing bracket. 3321 const AsmToken &Tok = Parser.getTok(); 3322 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3323 return Error(Tok.getLoc(), "malformed memory operand"); 3324 3325 if (Tok.is(AsmToken::RBrac)) { 3326 E = Tok.getLoc(); 3327 Parser.Lex(); // Eat right bracket token. 3328 3329 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3330 0, 0, false, S, E)); 3331 3332 // If there's a pre-indexing writeback marker, '!', just add it as a token 3333 // operand. It's rather odd, but syntactically valid. 3334 if (Parser.getTok().is(AsmToken::Exclaim)) { 3335 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3336 Parser.Lex(); // Eat the '!'. 3337 } 3338 3339 return false; 3340 } 3341 3342 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3343 Parser.Lex(); // Eat the comma. 3344 3345 // If we have a ':', it's an alignment specifier. 3346 if (Parser.getTok().is(AsmToken::Colon)) { 3347 Parser.Lex(); // Eat the ':'. 3348 E = Parser.getTok().getLoc(); 3349 3350 const MCExpr *Expr; 3351 if (getParser().ParseExpression(Expr)) 3352 return true; 3353 3354 // The expression has to be a constant. Memory references with relocations 3355 // don't come through here, as they use the <label> forms of the relevant 3356 // instructions. 3357 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3358 if (!CE) 3359 return Error (E, "constant expression expected"); 3360 3361 unsigned Align = 0; 3362 switch (CE->getValue()) { 3363 default: 3364 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3365 case 64: Align = 8; break; 3366 case 128: Align = 16; break; 3367 case 256: Align = 32; break; 3368 } 3369 3370 // Now we should have the closing ']' 3371 E = Parser.getTok().getLoc(); 3372 if (Parser.getTok().isNot(AsmToken::RBrac)) 3373 return Error(E, "']' expected"); 3374 Parser.Lex(); // Eat right bracket token. 3375 3376 // Don't worry about range checking the value here. That's handled by 3377 // the is*() predicates. 3378 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3379 ARM_AM::no_shift, 0, Align, 3380 false, S, E)); 3381 3382 // If there's a pre-indexing writeback marker, '!', just add it as a token 3383 // operand. 3384 if (Parser.getTok().is(AsmToken::Exclaim)) { 3385 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3386 Parser.Lex(); // Eat the '!'. 3387 } 3388 3389 return false; 3390 } 3391 3392 // If we have a '#', it's an immediate offset, else assume it's a register 3393 // offset. 3394 if (Parser.getTok().is(AsmToken::Hash)) { 3395 Parser.Lex(); // Eat the '#'. 3396 E = Parser.getTok().getLoc(); 3397 3398 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3399 const MCExpr *Offset; 3400 if (getParser().ParseExpression(Offset)) 3401 return true; 3402 3403 // The expression has to be a constant. Memory references with relocations 3404 // don't come through here, as they use the <label> forms of the relevant 3405 // instructions. 3406 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3407 if (!CE) 3408 return Error (E, "constant expression expected"); 3409 3410 // If the constant was #-0, represent it as INT32_MIN. 3411 int32_t Val = CE->getValue(); 3412 if (isNegative && Val == 0) 3413 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3414 3415 // Now we should have the closing ']' 3416 E = Parser.getTok().getLoc(); 3417 if (Parser.getTok().isNot(AsmToken::RBrac)) 3418 return Error(E, "']' expected"); 3419 Parser.Lex(); // Eat right bracket token. 3420 3421 // Don't worry about range checking the value here. That's handled by 3422 // the is*() predicates. 3423 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3424 ARM_AM::no_shift, 0, 0, 3425 false, S, E)); 3426 3427 // If there's a pre-indexing writeback marker, '!', just add it as a token 3428 // operand. 3429 if (Parser.getTok().is(AsmToken::Exclaim)) { 3430 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3431 Parser.Lex(); // Eat the '!'. 3432 } 3433 3434 return false; 3435 } 3436 3437 // The register offset is optionally preceded by a '+' or '-' 3438 bool isNegative = false; 3439 if (Parser.getTok().is(AsmToken::Minus)) { 3440 isNegative = true; 3441 Parser.Lex(); // Eat the '-'. 3442 } else if (Parser.getTok().is(AsmToken::Plus)) { 3443 // Nothing to do. 3444 Parser.Lex(); // Eat the '+'. 3445 } 3446 3447 E = Parser.getTok().getLoc(); 3448 int OffsetRegNum = tryParseRegister(); 3449 if (OffsetRegNum == -1) 3450 return Error(E, "register expected"); 3451 3452 // If there's a shift operator, handle it. 3453 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3454 unsigned ShiftImm = 0; 3455 if (Parser.getTok().is(AsmToken::Comma)) { 3456 Parser.Lex(); // Eat the ','. 3457 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3458 return true; 3459 } 3460 3461 // Now we should have the closing ']' 3462 E = Parser.getTok().getLoc(); 3463 if (Parser.getTok().isNot(AsmToken::RBrac)) 3464 return Error(E, "']' expected"); 3465 Parser.Lex(); // Eat right bracket token. 3466 3467 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3468 ShiftType, ShiftImm, 0, isNegative, 3469 S, E)); 3470 3471 // If there's a pre-indexing writeback marker, '!', just add it as a token 3472 // operand. 3473 if (Parser.getTok().is(AsmToken::Exclaim)) { 3474 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3475 Parser.Lex(); // Eat the '!'. 3476 } 3477 3478 return false; 3479 } 3480 3481 /// parseMemRegOffsetShift - one of these two: 3482 /// ( lsl | lsr | asr | ror ) , # shift_amount 3483 /// rrx 3484 /// return true if it parses a shift otherwise it returns false. 3485 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3486 unsigned &Amount) { 3487 SMLoc Loc = Parser.getTok().getLoc(); 3488 const AsmToken &Tok = Parser.getTok(); 3489 if (Tok.isNot(AsmToken::Identifier)) 3490 return true; 3491 StringRef ShiftName = Tok.getString(); 3492 if (ShiftName == "lsl" || ShiftName == "LSL") 3493 St = ARM_AM::lsl; 3494 else if (ShiftName == "lsr" || ShiftName == "LSR") 3495 St = ARM_AM::lsr; 3496 else if (ShiftName == "asr" || ShiftName == "ASR") 3497 St = ARM_AM::asr; 3498 else if (ShiftName == "ror" || ShiftName == "ROR") 3499 St = ARM_AM::ror; 3500 else if (ShiftName == "rrx" || ShiftName == "RRX") 3501 St = ARM_AM::rrx; 3502 else 3503 return Error(Loc, "illegal shift operator"); 3504 Parser.Lex(); // Eat shift type token. 3505 3506 // rrx stands alone. 3507 Amount = 0; 3508 if (St != ARM_AM::rrx) { 3509 Loc = Parser.getTok().getLoc(); 3510 // A '#' and a shift amount. 3511 const AsmToken &HashTok = Parser.getTok(); 3512 if (HashTok.isNot(AsmToken::Hash)) 3513 return Error(HashTok.getLoc(), "'#' expected"); 3514 Parser.Lex(); // Eat hash token. 3515 3516 const MCExpr *Expr; 3517 if (getParser().ParseExpression(Expr)) 3518 return true; 3519 // Range check the immediate. 3520 // lsl, ror: 0 <= imm <= 31 3521 // lsr, asr: 0 <= imm <= 32 3522 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3523 if (!CE) 3524 return Error(Loc, "shift amount must be an immediate"); 3525 int64_t Imm = CE->getValue(); 3526 if (Imm < 0 || 3527 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3528 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3529 return Error(Loc, "immediate shift value out of range"); 3530 Amount = Imm; 3531 } 3532 3533 return false; 3534 } 3535 3536 /// parseFPImm - A floating point immediate expression operand. 3537 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3538 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3539 SMLoc S = Parser.getTok().getLoc(); 3540 3541 if (Parser.getTok().isNot(AsmToken::Hash)) 3542 return MatchOperand_NoMatch; 3543 3544 // Disambiguate the VMOV forms that can accept an FP immediate. 3545 // vmov.f32 <sreg>, #imm 3546 // vmov.f64 <dreg>, #imm 3547 // vmov.f32 <dreg>, #imm @ vector f32x2 3548 // vmov.f32 <qreg>, #imm @ vector f32x4 3549 // 3550 // There are also the NEON VMOV instructions which expect an 3551 // integer constant. Make sure we don't try to parse an FPImm 3552 // for these: 3553 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3554 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3555 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3556 TyOp->getToken() != ".f64")) 3557 return MatchOperand_NoMatch; 3558 3559 Parser.Lex(); // Eat the '#'. 3560 3561 // Handle negation, as that still comes through as a separate token. 3562 bool isNegative = false; 3563 if (Parser.getTok().is(AsmToken::Minus)) { 3564 isNegative = true; 3565 Parser.Lex(); 3566 } 3567 const AsmToken &Tok = Parser.getTok(); 3568 if (Tok.is(AsmToken::Real)) { 3569 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3570 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3571 // If we had a '-' in front, toggle the sign bit. 3572 IntVal ^= (uint64_t)isNegative << 63; 3573 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3574 Parser.Lex(); // Eat the token. 3575 if (Val == -1) { 3576 TokError("floating point value out of range"); 3577 return MatchOperand_ParseFail; 3578 } 3579 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3580 return MatchOperand_Success; 3581 } 3582 if (Tok.is(AsmToken::Integer)) { 3583 int64_t Val = Tok.getIntVal(); 3584 Parser.Lex(); // Eat the token. 3585 if (Val > 255 || Val < 0) { 3586 TokError("encoded floating point value out of range"); 3587 return MatchOperand_ParseFail; 3588 } 3589 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3590 return MatchOperand_Success; 3591 } 3592 3593 TokError("invalid floating point immediate"); 3594 return MatchOperand_ParseFail; 3595 } 3596 /// Parse a arm instruction operand. For now this parses the operand regardless 3597 /// of the mnemonic. 3598 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3599 StringRef Mnemonic) { 3600 SMLoc S, E; 3601 3602 // Check if the current operand has a custom associated parser, if so, try to 3603 // custom parse the operand, or fallback to the general approach. 3604 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3605 if (ResTy == MatchOperand_Success) 3606 return false; 3607 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3608 // there was a match, but an error occurred, in which case, just return that 3609 // the operand parsing failed. 3610 if (ResTy == MatchOperand_ParseFail) 3611 return true; 3612 3613 switch (getLexer().getKind()) { 3614 default: 3615 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3616 return true; 3617 case AsmToken::Identifier: { 3618 // If this is VMRS, check for the apsr_nzcv operand. 3619 if (!tryParseRegisterWithWriteBack(Operands)) 3620 return false; 3621 int Res = tryParseShiftRegister(Operands); 3622 if (Res == 0) // success 3623 return false; 3624 else if (Res == -1) // irrecoverable error 3625 return true; 3626 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3627 S = Parser.getTok().getLoc(); 3628 Parser.Lex(); 3629 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3630 return false; 3631 } 3632 3633 // Fall though for the Identifier case that is not a register or a 3634 // special name. 3635 } 3636 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3637 case AsmToken::Dot: { // . as a branch target 3638 // This was not a register so parse other operands that start with an 3639 // identifier (like labels) as expressions and create them as immediates. 3640 const MCExpr *IdVal; 3641 S = Parser.getTok().getLoc(); 3642 if (getParser().ParseExpression(IdVal)) 3643 return true; 3644 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3645 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3646 return false; 3647 } 3648 case AsmToken::LBrac: 3649 return parseMemory(Operands); 3650 case AsmToken::LCurly: 3651 return parseRegisterList(Operands); 3652 case AsmToken::Hash: { 3653 // #42 -> immediate. 3654 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3655 S = Parser.getTok().getLoc(); 3656 Parser.Lex(); 3657 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3658 const MCExpr *ImmVal; 3659 if (getParser().ParseExpression(ImmVal)) 3660 return true; 3661 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3662 if (!CE) { 3663 Error(S, "constant expression expected"); 3664 return MatchOperand_ParseFail; 3665 } 3666 int32_t Val = CE->getValue(); 3667 if (isNegative && Val == 0) 3668 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3669 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3670 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3671 return false; 3672 } 3673 case AsmToken::Colon: { 3674 // ":lower16:" and ":upper16:" expression prefixes 3675 // FIXME: Check it's an expression prefix, 3676 // e.g. (FOO - :lower16:BAR) isn't legal. 3677 ARMMCExpr::VariantKind RefKind; 3678 if (parsePrefix(RefKind)) 3679 return true; 3680 3681 const MCExpr *SubExprVal; 3682 if (getParser().ParseExpression(SubExprVal)) 3683 return true; 3684 3685 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3686 getContext()); 3687 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3688 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3689 return false; 3690 } 3691 } 3692 } 3693 3694 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3695 // :lower16: and :upper16:. 3696 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3697 RefKind = ARMMCExpr::VK_ARM_None; 3698 3699 // :lower16: and :upper16: modifiers 3700 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3701 Parser.Lex(); // Eat ':' 3702 3703 if (getLexer().isNot(AsmToken::Identifier)) { 3704 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3705 return true; 3706 } 3707 3708 StringRef IDVal = Parser.getTok().getIdentifier(); 3709 if (IDVal == "lower16") { 3710 RefKind = ARMMCExpr::VK_ARM_LO16; 3711 } else if (IDVal == "upper16") { 3712 RefKind = ARMMCExpr::VK_ARM_HI16; 3713 } else { 3714 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3715 return true; 3716 } 3717 Parser.Lex(); 3718 3719 if (getLexer().isNot(AsmToken::Colon)) { 3720 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3721 return true; 3722 } 3723 Parser.Lex(); // Eat the last ':' 3724 return false; 3725 } 3726 3727 /// \brief Given a mnemonic, split out possible predication code and carry 3728 /// setting letters to form a canonical mnemonic and flags. 3729 // 3730 // FIXME: Would be nice to autogen this. 3731 // FIXME: This is a bit of a maze of special cases. 3732 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3733 unsigned &PredicationCode, 3734 bool &CarrySetting, 3735 unsigned &ProcessorIMod, 3736 StringRef &ITMask) { 3737 PredicationCode = ARMCC::AL; 3738 CarrySetting = false; 3739 ProcessorIMod = 0; 3740 3741 // Ignore some mnemonics we know aren't predicated forms. 3742 // 3743 // FIXME: Would be nice to autogen this. 3744 if ((Mnemonic == "movs" && isThumb()) || 3745 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3746 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3747 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3748 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3749 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3750 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3751 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3752 return Mnemonic; 3753 3754 // First, split out any predication code. Ignore mnemonics we know aren't 3755 // predicated but do have a carry-set and so weren't caught above. 3756 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3757 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3758 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3759 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3760 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3761 .Case("eq", ARMCC::EQ) 3762 .Case("ne", ARMCC::NE) 3763 .Case("hs", ARMCC::HS) 3764 .Case("cs", ARMCC::HS) 3765 .Case("lo", ARMCC::LO) 3766 .Case("cc", ARMCC::LO) 3767 .Case("mi", ARMCC::MI) 3768 .Case("pl", ARMCC::PL) 3769 .Case("vs", ARMCC::VS) 3770 .Case("vc", ARMCC::VC) 3771 .Case("hi", ARMCC::HI) 3772 .Case("ls", ARMCC::LS) 3773 .Case("ge", ARMCC::GE) 3774 .Case("lt", ARMCC::LT) 3775 .Case("gt", ARMCC::GT) 3776 .Case("le", ARMCC::LE) 3777 .Case("al", ARMCC::AL) 3778 .Default(~0U); 3779 if (CC != ~0U) { 3780 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3781 PredicationCode = CC; 3782 } 3783 } 3784 3785 // Next, determine if we have a carry setting bit. We explicitly ignore all 3786 // the instructions we know end in 's'. 3787 if (Mnemonic.endswith("s") && 3788 !(Mnemonic == "cps" || Mnemonic == "mls" || 3789 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3790 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3791 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3792 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3793 (Mnemonic == "movs" && isThumb()))) { 3794 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3795 CarrySetting = true; 3796 } 3797 3798 // The "cps" instruction can have a interrupt mode operand which is glued into 3799 // the mnemonic. Check if this is the case, split it and parse the imod op 3800 if (Mnemonic.startswith("cps")) { 3801 // Split out any imod code. 3802 unsigned IMod = 3803 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3804 .Case("ie", ARM_PROC::IE) 3805 .Case("id", ARM_PROC::ID) 3806 .Default(~0U); 3807 if (IMod != ~0U) { 3808 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3809 ProcessorIMod = IMod; 3810 } 3811 } 3812 3813 // The "it" instruction has the condition mask on the end of the mnemonic. 3814 if (Mnemonic.startswith("it")) { 3815 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3816 Mnemonic = Mnemonic.slice(0, 2); 3817 } 3818 3819 return Mnemonic; 3820 } 3821 3822 /// \brief Given a canonical mnemonic, determine if the instruction ever allows 3823 /// inclusion of carry set or predication code operands. 3824 // 3825 // FIXME: It would be nice to autogen this. 3826 void ARMAsmParser:: 3827 getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3828 bool &CanAcceptPredicationCode) { 3829 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3830 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3831 Mnemonic == "add" || Mnemonic == "adc" || 3832 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3833 Mnemonic == "orr" || Mnemonic == "mvn" || 3834 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3835 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3836 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3837 Mnemonic == "mla" || Mnemonic == "smlal" || 3838 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3839 CanAcceptCarrySet = true; 3840 } else 3841 CanAcceptCarrySet = false; 3842 3843 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3844 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3845 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3846 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3847 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3848 (Mnemonic == "clrex" && !isThumb()) || 3849 (Mnemonic == "nop" && isThumbOne()) || 3850 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3851 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3852 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3853 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3854 !isThumb()) || 3855 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3856 CanAcceptPredicationCode = false; 3857 } else 3858 CanAcceptPredicationCode = true; 3859 3860 if (isThumb()) { 3861 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3862 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3863 CanAcceptPredicationCode = false; 3864 } 3865 } 3866 3867 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3868 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3869 // FIXME: This is all horribly hacky. We really need a better way to deal 3870 // with optional operands like this in the matcher table. 3871 3872 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3873 // another does not. Specifically, the MOVW instruction does not. So we 3874 // special case it here and remove the defaulted (non-setting) cc_out 3875 // operand if that's the instruction we're trying to match. 3876 // 3877 // We do this as post-processing of the explicit operands rather than just 3878 // conditionally adding the cc_out in the first place because we need 3879 // to check the type of the parsed immediate operand. 3880 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3881 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3882 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3883 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3884 return true; 3885 3886 // Register-register 'add' for thumb does not have a cc_out operand 3887 // when there are only two register operands. 3888 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3889 static_cast<ARMOperand*>(Operands[3])->isReg() && 3890 static_cast<ARMOperand*>(Operands[4])->isReg() && 3891 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3892 return true; 3893 // Register-register 'add' for thumb does not have a cc_out operand 3894 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3895 // have to check the immediate range here since Thumb2 has a variant 3896 // that can handle a different range and has a cc_out operand. 3897 if (((isThumb() && Mnemonic == "add") || 3898 (isThumbTwo() && Mnemonic == "sub")) && 3899 Operands.size() == 6 && 3900 static_cast<ARMOperand*>(Operands[3])->isReg() && 3901 static_cast<ARMOperand*>(Operands[4])->isReg() && 3902 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3903 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3904 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3905 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3906 return true; 3907 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3908 // imm0_4095 variant. That's the least-preferred variant when 3909 // selecting via the generic "add" mnemonic, so to know that we 3910 // should remove the cc_out operand, we have to explicitly check that 3911 // it's not one of the other variants. Ugh. 3912 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3913 Operands.size() == 6 && 3914 static_cast<ARMOperand*>(Operands[3])->isReg() && 3915 static_cast<ARMOperand*>(Operands[4])->isReg() && 3916 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3917 // Nest conditions rather than one big 'if' statement for readability. 3918 // 3919 // If either register is a high reg, it's either one of the SP 3920 // variants (handled above) or a 32-bit encoding, so we just 3921 // check against T3. 3922 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3923 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3924 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3925 return false; 3926 // If both registers are low, we're in an IT block, and the immediate is 3927 // in range, we should use encoding T1 instead, which has a cc_out. 3928 if (inITBlock() && 3929 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3930 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3931 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3932 return false; 3933 3934 // Otherwise, we use encoding T4, which does not have a cc_out 3935 // operand. 3936 return true; 3937 } 3938 3939 // The thumb2 multiply instruction doesn't have a CCOut register, so 3940 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3941 // use the 16-bit encoding or not. 3942 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3943 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3944 static_cast<ARMOperand*>(Operands[3])->isReg() && 3945 static_cast<ARMOperand*>(Operands[4])->isReg() && 3946 static_cast<ARMOperand*>(Operands[5])->isReg() && 3947 // If the registers aren't low regs, the destination reg isn't the 3948 // same as one of the source regs, or the cc_out operand is zero 3949 // outside of an IT block, we have to use the 32-bit encoding, so 3950 // remove the cc_out operand. 3951 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3952 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 3953 !inITBlock() || 3954 (static_cast<ARMOperand*>(Operands[3])->getReg() != 3955 static_cast<ARMOperand*>(Operands[5])->getReg() && 3956 static_cast<ARMOperand*>(Operands[3])->getReg() != 3957 static_cast<ARMOperand*>(Operands[4])->getReg()))) 3958 return true; 3959 3960 3961 3962 // Register-register 'add/sub' for thumb does not have a cc_out operand 3963 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 3964 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 3965 // right, this will result in better diagnostics (which operand is off) 3966 // anyway. 3967 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 3968 (Operands.size() == 5 || Operands.size() == 6) && 3969 static_cast<ARMOperand*>(Operands[3])->isReg() && 3970 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 3971 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3972 return true; 3973 3974 return false; 3975 } 3976 3977 /// Parse an arm instruction mnemonic followed by its operands. 3978 bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 3979 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3980 // Create the leading tokens for the mnemonic, split by '.' characters. 3981 size_t Start = 0, Next = Name.find('.'); 3982 StringRef Mnemonic = Name.slice(Start, Next); 3983 3984 // Split out the predication code and carry setting flag from the mnemonic. 3985 unsigned PredicationCode; 3986 unsigned ProcessorIMod; 3987 bool CarrySetting; 3988 StringRef ITMask; 3989 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 3990 ProcessorIMod, ITMask); 3991 3992 // In Thumb1, only the branch (B) instruction can be predicated. 3993 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 3994 Parser.EatToEndOfStatement(); 3995 return Error(NameLoc, "conditional execution not supported in Thumb1"); 3996 } 3997 3998 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 3999 4000 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4001 // is the mask as it will be for the IT encoding if the conditional 4002 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4003 // where the conditional bit0 is zero, the instruction post-processing 4004 // will adjust the mask accordingly. 4005 if (Mnemonic == "it") { 4006 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4007 if (ITMask.size() > 3) { 4008 Parser.EatToEndOfStatement(); 4009 return Error(Loc, "too many conditions on IT instruction"); 4010 } 4011 unsigned Mask = 8; 4012 for (unsigned i = ITMask.size(); i != 0; --i) { 4013 char pos = ITMask[i - 1]; 4014 if (pos != 't' && pos != 'e') { 4015 Parser.EatToEndOfStatement(); 4016 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4017 } 4018 Mask >>= 1; 4019 if (ITMask[i - 1] == 't') 4020 Mask |= 8; 4021 } 4022 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4023 } 4024 4025 // FIXME: This is all a pretty gross hack. We should automatically handle 4026 // optional operands like this via tblgen. 4027 4028 // Next, add the CCOut and ConditionCode operands, if needed. 4029 // 4030 // For mnemonics which can ever incorporate a carry setting bit or predication 4031 // code, our matching model involves us always generating CCOut and 4032 // ConditionCode operands to match the mnemonic "as written" and then we let 4033 // the matcher deal with finding the right instruction or generating an 4034 // appropriate error. 4035 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4036 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4037 4038 // If we had a carry-set on an instruction that can't do that, issue an 4039 // error. 4040 if (!CanAcceptCarrySet && CarrySetting) { 4041 Parser.EatToEndOfStatement(); 4042 return Error(NameLoc, "instruction '" + Mnemonic + 4043 "' can not set flags, but 's' suffix specified"); 4044 } 4045 // If we had a predication code on an instruction that can't do that, issue an 4046 // error. 4047 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4048 Parser.EatToEndOfStatement(); 4049 return Error(NameLoc, "instruction '" + Mnemonic + 4050 "' is not predicable, but condition code specified"); 4051 } 4052 4053 // Add the carry setting operand, if necessary. 4054 if (CanAcceptCarrySet) { 4055 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4056 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4057 Loc)); 4058 } 4059 4060 // Add the predication code operand, if necessary. 4061 if (CanAcceptPredicationCode) { 4062 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4063 CarrySetting); 4064 Operands.push_back(ARMOperand::CreateCondCode( 4065 ARMCC::CondCodes(PredicationCode), Loc)); 4066 } 4067 4068 // Add the processor imod operand, if necessary. 4069 if (ProcessorIMod) { 4070 Operands.push_back(ARMOperand::CreateImm( 4071 MCConstantExpr::Create(ProcessorIMod, getContext()), 4072 NameLoc, NameLoc)); 4073 } 4074 4075 // Add the remaining tokens in the mnemonic. 4076 while (Next != StringRef::npos) { 4077 Start = Next; 4078 Next = Name.find('.', Start + 1); 4079 StringRef ExtraToken = Name.slice(Start, Next); 4080 4081 // For now, we're only parsing Thumb1 (for the most part), so 4082 // just ignore ".n" qualifiers. We'll use them to restrict 4083 // matching when we do Thumb2. 4084 if (ExtraToken != ".n") { 4085 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4086 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4087 } 4088 } 4089 4090 // Read the remaining operands. 4091 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4092 // Read the first operand. 4093 if (parseOperand(Operands, Mnemonic)) { 4094 Parser.EatToEndOfStatement(); 4095 return true; 4096 } 4097 4098 while (getLexer().is(AsmToken::Comma)) { 4099 Parser.Lex(); // Eat the comma. 4100 4101 // Parse and remember the operand. 4102 if (parseOperand(Operands, Mnemonic)) { 4103 Parser.EatToEndOfStatement(); 4104 return true; 4105 } 4106 } 4107 } 4108 4109 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4110 SMLoc Loc = getLexer().getLoc(); 4111 Parser.EatToEndOfStatement(); 4112 return Error(Loc, "unexpected token in argument list"); 4113 } 4114 4115 Parser.Lex(); // Consume the EndOfStatement 4116 4117 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4118 // do and don't have a cc_out optional-def operand. With some spot-checks 4119 // of the operand list, we can figure out which variant we're trying to 4120 // parse and adjust accordingly before actually matching. We shouldn't ever 4121 // try to remove a cc_out operand that was explicitly set on the the 4122 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4123 // table driven matcher doesn't fit well with the ARM instruction set. 4124 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4125 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4126 Operands.erase(Operands.begin() + 1); 4127 delete Op; 4128 } 4129 4130 // ARM mode 'blx' need special handling, as the register operand version 4131 // is predicable, but the label operand version is not. So, we can't rely 4132 // on the Mnemonic based checking to correctly figure out when to put 4133 // a k_CondCode operand in the list. If we're trying to match the label 4134 // version, remove the k_CondCode operand here. 4135 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4136 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4137 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4138 Operands.erase(Operands.begin() + 1); 4139 delete Op; 4140 } 4141 4142 // The vector-compare-to-zero instructions have a literal token "#0" at 4143 // the end that comes to here as an immediate operand. Convert it to a 4144 // token to play nicely with the matcher. 4145 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4146 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4147 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4148 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4149 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4150 if (CE && CE->getValue() == 0) { 4151 Operands.erase(Operands.begin() + 5); 4152 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4153 delete Op; 4154 } 4155 } 4156 // VCMP{E} does the same thing, but with a different operand count. 4157 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4158 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4159 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4160 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4161 if (CE && CE->getValue() == 0) { 4162 Operands.erase(Operands.begin() + 4); 4163 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4164 delete Op; 4165 } 4166 } 4167 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4168 // end. Convert it to a token here. 4169 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4170 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4171 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4172 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4173 if (CE && CE->getValue() == 0) { 4174 Operands.erase(Operands.begin() + 5); 4175 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4176 delete Op; 4177 } 4178 } 4179 4180 return false; 4181 } 4182 4183 // Validate context-sensitive operand constraints. 4184 4185 // return 'true' if register list contains non-low GPR registers, 4186 // 'false' otherwise. If Reg is in the register list or is HiReg, set 4187 // 'containsReg' to true. 4188 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4189 unsigned HiReg, bool &containsReg) { 4190 containsReg = false; 4191 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4192 unsigned OpReg = Inst.getOperand(i).getReg(); 4193 if (OpReg == Reg) 4194 containsReg = true; 4195 // Anything other than a low register isn't legal here. 4196 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4197 return true; 4198 } 4199 return false; 4200 } 4201 4202 // Check if the specified regisgter is in the register list of the inst, 4203 // starting at the indicated operand number. 4204 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4205 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4206 unsigned OpReg = Inst.getOperand(i).getReg(); 4207 if (OpReg == Reg) 4208 return true; 4209 } 4210 return false; 4211 } 4212 4213 // FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4214 // the ARMInsts array) instead. Getting that here requires awkward 4215 // API changes, though. Better way? 4216 namespace llvm { 4217 extern MCInstrDesc ARMInsts[]; 4218 } 4219 static MCInstrDesc &getInstDesc(unsigned Opcode) { 4220 return ARMInsts[Opcode]; 4221 } 4222 4223 // FIXME: We would really like to be able to tablegen'erate this. 4224 bool ARMAsmParser:: 4225 validateInstruction(MCInst &Inst, 4226 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4227 MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4228 SMLoc Loc = Operands[0]->getStartLoc(); 4229 // Check the IT block state first. 4230 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4231 // being allowed in IT blocks, but not being predicable. It just always 4232 // executes. 4233 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4234 unsigned bit = 1; 4235 if (ITState.FirstCond) 4236 ITState.FirstCond = false; 4237 else 4238 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4239 // The instruction must be predicable. 4240 if (!MCID.isPredicable()) 4241 return Error(Loc, "instructions in IT block must be predicable"); 4242 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4243 unsigned ITCond = bit ? ITState.Cond : 4244 ARMCC::getOppositeCondition(ITState.Cond); 4245 if (Cond != ITCond) { 4246 // Find the condition code Operand to get its SMLoc information. 4247 SMLoc CondLoc; 4248 for (unsigned i = 1; i < Operands.size(); ++i) 4249 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4250 CondLoc = Operands[i]->getStartLoc(); 4251 return Error(CondLoc, "incorrect condition in IT block; got '" + 4252 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4253 "', but expected '" + 4254 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4255 } 4256 // Check for non-'al' condition codes outside of the IT block. 4257 } else if (isThumbTwo() && MCID.isPredicable() && 4258 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4259 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4260 Inst.getOpcode() != ARM::t2B) 4261 return Error(Loc, "predicated instructions must be in IT block"); 4262 4263 switch (Inst.getOpcode()) { 4264 case ARM::LDRD: 4265 case ARM::LDRD_PRE: 4266 case ARM::LDRD_POST: 4267 case ARM::LDREXD: { 4268 // Rt2 must be Rt + 1. 4269 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4270 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4271 if (Rt2 != Rt + 1) 4272 return Error(Operands[3]->getStartLoc(), 4273 "destination operands must be sequential"); 4274 return false; 4275 } 4276 case ARM::STRD: { 4277 // Rt2 must be Rt + 1. 4278 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4279 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4280 if (Rt2 != Rt + 1) 4281 return Error(Operands[3]->getStartLoc(), 4282 "source operands must be sequential"); 4283 return false; 4284 } 4285 case ARM::STRD_PRE: 4286 case ARM::STRD_POST: 4287 case ARM::STREXD: { 4288 // Rt2 must be Rt + 1. 4289 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4290 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4291 if (Rt2 != Rt + 1) 4292 return Error(Operands[3]->getStartLoc(), 4293 "source operands must be sequential"); 4294 return false; 4295 } 4296 case ARM::SBFX: 4297 case ARM::UBFX: { 4298 // width must be in range [1, 32-lsb] 4299 unsigned lsb = Inst.getOperand(2).getImm(); 4300 unsigned widthm1 = Inst.getOperand(3).getImm(); 4301 if (widthm1 >= 32 - lsb) 4302 return Error(Operands[5]->getStartLoc(), 4303 "bitfield width must be in range [1,32-lsb]"); 4304 return false; 4305 } 4306 case ARM::tLDMIA: { 4307 // If we're parsing Thumb2, the .w variant is available and handles 4308 // most cases that are normally illegal for a Thumb1 LDM 4309 // instruction. We'll make the transformation in processInstruction() 4310 // if necessary. 4311 // 4312 // Thumb LDM instructions are writeback iff the base register is not 4313 // in the register list. 4314 unsigned Rn = Inst.getOperand(0).getReg(); 4315 bool hasWritebackToken = 4316 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4317 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4318 bool listContainsBase; 4319 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4320 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4321 "registers must be in range r0-r7"); 4322 // If we should have writeback, then there should be a '!' token. 4323 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4324 return Error(Operands[2]->getStartLoc(), 4325 "writeback operator '!' expected"); 4326 // If we should not have writeback, there must not be a '!'. This is 4327 // true even for the 32-bit wide encodings. 4328 if (listContainsBase && hasWritebackToken) 4329 return Error(Operands[3]->getStartLoc(), 4330 "writeback operator '!' not allowed when base register " 4331 "in register list"); 4332 4333 break; 4334 } 4335 case ARM::t2LDMIA_UPD: { 4336 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4337 return Error(Operands[4]->getStartLoc(), 4338 "writeback operator '!' not allowed when base register " 4339 "in register list"); 4340 break; 4341 } 4342 case ARM::tPOP: { 4343 bool listContainsBase; 4344 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4345 return Error(Operands[2]->getStartLoc(), 4346 "registers must be in range r0-r7 or pc"); 4347 break; 4348 } 4349 case ARM::tPUSH: { 4350 bool listContainsBase; 4351 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4352 return Error(Operands[2]->getStartLoc(), 4353 "registers must be in range r0-r7 or lr"); 4354 break; 4355 } 4356 case ARM::tSTMIA_UPD: { 4357 bool listContainsBase; 4358 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4359 return Error(Operands[4]->getStartLoc(), 4360 "registers must be in range r0-r7"); 4361 break; 4362 } 4363 } 4364 4365 return false; 4366 } 4367 4368 void ARMAsmParser:: 4369 processInstruction(MCInst &Inst, 4370 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4371 switch (Inst.getOpcode()) { 4372 case ARM::LDMIA_UPD: 4373 // If this is a load of a single register via a 'pop', then we should use 4374 // a post-indexed LDR instruction instead, per the ARM ARM. 4375 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4376 Inst.getNumOperands() == 5) { 4377 MCInst TmpInst; 4378 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4379 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4380 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4381 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4382 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4383 TmpInst.addOperand(MCOperand::CreateImm(4)); 4384 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4385 TmpInst.addOperand(Inst.getOperand(3)); 4386 Inst = TmpInst; 4387 } 4388 break; 4389 case ARM::STMDB_UPD: 4390 // If this is a store of a single register via a 'push', then we should use 4391 // a pre-indexed STR instruction instead, per the ARM ARM. 4392 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4393 Inst.getNumOperands() == 5) { 4394 MCInst TmpInst; 4395 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4396 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4397 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4398 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4399 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4400 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4401 TmpInst.addOperand(Inst.getOperand(3)); 4402 Inst = TmpInst; 4403 } 4404 break; 4405 case ARM::tADDi8: 4406 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4407 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4408 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4409 // to encoding T1 if <Rd> is omitted." 4410 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4411 Inst.setOpcode(ARM::tADDi3); 4412 break; 4413 case ARM::tSUBi8: 4414 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4415 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4416 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4417 // to encoding T1 if <Rd> is omitted." 4418 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4419 Inst.setOpcode(ARM::tSUBi3); 4420 break; 4421 case ARM::tB: 4422 // A Thumb conditional branch outside of an IT block is a tBcc. 4423 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4424 Inst.setOpcode(ARM::tBcc); 4425 break; 4426 case ARM::t2B: 4427 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4428 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4429 Inst.setOpcode(ARM::t2Bcc); 4430 break; 4431 case ARM::t2Bcc: 4432 // If the conditional is AL or we're in an IT block, we really want t2B. 4433 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4434 Inst.setOpcode(ARM::t2B); 4435 break; 4436 case ARM::tBcc: 4437 // If the conditional is AL, we really want tB. 4438 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4439 Inst.setOpcode(ARM::tB); 4440 break; 4441 case ARM::tLDMIA: { 4442 // If the register list contains any high registers, or if the writeback 4443 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4444 // instead if we're in Thumb2. Otherwise, this should have generated 4445 // an error in validateInstruction(). 4446 unsigned Rn = Inst.getOperand(0).getReg(); 4447 bool hasWritebackToken = 4448 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4449 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4450 bool listContainsBase; 4451 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4452 (!listContainsBase && !hasWritebackToken) || 4453 (listContainsBase && hasWritebackToken)) { 4454 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4455 assert (isThumbTwo()); 4456 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4457 // If we're switching to the updating version, we need to insert 4458 // the writeback tied operand. 4459 if (hasWritebackToken) 4460 Inst.insert(Inst.begin(), 4461 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4462 } 4463 break; 4464 } 4465 case ARM::tSTMIA_UPD: { 4466 // If the register list contains any high registers, we need to use 4467 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4468 // should have generated an error in validateInstruction(). 4469 unsigned Rn = Inst.getOperand(0).getReg(); 4470 bool listContainsBase; 4471 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4472 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4473 assert (isThumbTwo()); 4474 Inst.setOpcode(ARM::t2STMIA_UPD); 4475 } 4476 break; 4477 } 4478 case ARM::t2MOVi: { 4479 // If we can use the 16-bit encoding and the user didn't explicitly 4480 // request the 32-bit variant, transform it here. 4481 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4482 Inst.getOperand(1).getImm() <= 255 && 4483 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4484 Inst.getOperand(4).getReg() == ARM::CPSR) || 4485 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4486 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4487 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4488 // The operands aren't in the same order for tMOVi8... 4489 MCInst TmpInst; 4490 TmpInst.setOpcode(ARM::tMOVi8); 4491 TmpInst.addOperand(Inst.getOperand(0)); 4492 TmpInst.addOperand(Inst.getOperand(4)); 4493 TmpInst.addOperand(Inst.getOperand(1)); 4494 TmpInst.addOperand(Inst.getOperand(2)); 4495 TmpInst.addOperand(Inst.getOperand(3)); 4496 Inst = TmpInst; 4497 } 4498 break; 4499 } 4500 case ARM::t2MOVr: { 4501 // If we can use the 16-bit encoding and the user didn't explicitly 4502 // request the 32-bit variant, transform it here. 4503 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4504 isARMLowRegister(Inst.getOperand(1).getReg()) && 4505 Inst.getOperand(2).getImm() == ARMCC::AL && 4506 Inst.getOperand(4).getReg() == ARM::CPSR && 4507 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4508 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4509 // The operands aren't the same for tMOV[S]r... (no cc_out) 4510 MCInst TmpInst; 4511 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4512 TmpInst.addOperand(Inst.getOperand(0)); 4513 TmpInst.addOperand(Inst.getOperand(1)); 4514 TmpInst.addOperand(Inst.getOperand(2)); 4515 TmpInst.addOperand(Inst.getOperand(3)); 4516 Inst = TmpInst; 4517 } 4518 break; 4519 } 4520 case ARM::t2SXTH: 4521 case ARM::t2SXTB: 4522 case ARM::t2UXTH: 4523 case ARM::t2UXTB: { 4524 // If we can use the 16-bit encoding and the user didn't explicitly 4525 // request the 32-bit variant, transform it here. 4526 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4527 isARMLowRegister(Inst.getOperand(1).getReg()) && 4528 Inst.getOperand(2).getImm() == 0 && 4529 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4530 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4531 unsigned NewOpc; 4532 switch (Inst.getOpcode()) { 4533 default: llvm_unreachable("Illegal opcode!"); 4534 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4535 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4536 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4537 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4538 } 4539 // The operands aren't the same for thumb1 (no rotate operand). 4540 MCInst TmpInst; 4541 TmpInst.setOpcode(NewOpc); 4542 TmpInst.addOperand(Inst.getOperand(0)); 4543 TmpInst.addOperand(Inst.getOperand(1)); 4544 TmpInst.addOperand(Inst.getOperand(3)); 4545 TmpInst.addOperand(Inst.getOperand(4)); 4546 Inst = TmpInst; 4547 } 4548 break; 4549 } 4550 case ARM::t2IT: { 4551 // The mask bits for all but the first condition are represented as 4552 // the low bit of the condition code value implies 't'. We currently 4553 // always have 1 implies 't', so XOR toggle the bits if the low bit 4554 // of the condition code is zero. The encoding also expects the low 4555 // bit of the condition to be encoded as bit 4 of the mask operand, 4556 // so mask that in if needed 4557 MCOperand &MO = Inst.getOperand(1); 4558 unsigned Mask = MO.getImm(); 4559 unsigned OrigMask = Mask; 4560 unsigned TZ = CountTrailingZeros_32(Mask); 4561 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4562 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4563 for (unsigned i = 3; i != TZ; --i) 4564 Mask ^= 1 << i; 4565 } else 4566 Mask |= 0x10; 4567 MO.setImm(Mask); 4568 4569 // Set up the IT block state according to the IT instruction we just 4570 // matched. 4571 assert(!inITBlock() && "nested IT blocks?!"); 4572 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4573 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4574 ITState.CurPosition = 0; 4575 ITState.FirstCond = true; 4576 break; 4577 } 4578 } 4579 } 4580 4581 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4582 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4583 // suffix depending on whether they're in an IT block or not. 4584 unsigned Opc = Inst.getOpcode(); 4585 MCInstrDesc &MCID = getInstDesc(Opc); 4586 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4587 assert(MCID.hasOptionalDef() && 4588 "optionally flag setting instruction missing optional def operand"); 4589 assert(MCID.NumOperands == Inst.getNumOperands() && 4590 "operand count mismatch!"); 4591 // Find the optional-def operand (cc_out). 4592 unsigned OpNo; 4593 for (OpNo = 0; 4594 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4595 ++OpNo) 4596 ; 4597 // If we're parsing Thumb1, reject it completely. 4598 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4599 return Match_MnemonicFail; 4600 // If we're parsing Thumb2, which form is legal depends on whether we're 4601 // in an IT block. 4602 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4603 !inITBlock()) 4604 return Match_RequiresITBlock; 4605 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4606 inITBlock()) 4607 return Match_RequiresNotITBlock; 4608 } 4609 // Some high-register supporting Thumb1 encodings only allow both registers 4610 // to be from r0-r7 when in Thumb2. 4611 else if (Opc == ARM::tADDhirr && isThumbOne() && 4612 isARMLowRegister(Inst.getOperand(1).getReg()) && 4613 isARMLowRegister(Inst.getOperand(2).getReg())) 4614 return Match_RequiresThumb2; 4615 // Others only require ARMv6 or later. 4616 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4617 isARMLowRegister(Inst.getOperand(0).getReg()) && 4618 isARMLowRegister(Inst.getOperand(1).getReg())) 4619 return Match_RequiresV6; 4620 return Match_Success; 4621 } 4622 4623 bool ARMAsmParser:: 4624 MatchAndEmitInstruction(SMLoc IDLoc, 4625 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4626 MCStreamer &Out) { 4627 MCInst Inst; 4628 unsigned ErrorInfo; 4629 unsigned MatchResult; 4630 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4631 switch (MatchResult) { 4632 default: break; 4633 case Match_Success: 4634 // Context sensitive operand constraints aren't handled by the matcher, 4635 // so check them here. 4636 if (validateInstruction(Inst, Operands)) { 4637 // Still progress the IT block, otherwise one wrong condition causes 4638 // nasty cascading errors. 4639 forwardITPosition(); 4640 return true; 4641 } 4642 4643 // Some instructions need post-processing to, for example, tweak which 4644 // encoding is selected. 4645 processInstruction(Inst, Operands); 4646 4647 // Only move forward at the very end so that everything in validate 4648 // and process gets a consistent answer about whether we're in an IT 4649 // block. 4650 forwardITPosition(); 4651 4652 Out.EmitInstruction(Inst); 4653 return false; 4654 case Match_MissingFeature: 4655 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4656 return true; 4657 case Match_InvalidOperand: { 4658 SMLoc ErrorLoc = IDLoc; 4659 if (ErrorInfo != ~0U) { 4660 if (ErrorInfo >= Operands.size()) 4661 return Error(IDLoc, "too few operands for instruction"); 4662 4663 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4664 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4665 } 4666 4667 return Error(ErrorLoc, "invalid operand for instruction"); 4668 } 4669 case Match_MnemonicFail: 4670 return Error(IDLoc, "invalid instruction"); 4671 case Match_ConversionFail: 4672 // The converter function will have already emited a diagnostic. 4673 return true; 4674 case Match_RequiresNotITBlock: 4675 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4676 case Match_RequiresITBlock: 4677 return Error(IDLoc, "instruction only valid inside IT block"); 4678 case Match_RequiresV6: 4679 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4680 case Match_RequiresThumb2: 4681 return Error(IDLoc, "instruction variant requires Thumb2"); 4682 } 4683 4684 llvm_unreachable("Implement any new match types added!"); 4685 return true; 4686 } 4687 4688 /// parseDirective parses the arm specific directives 4689 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4690 StringRef IDVal = DirectiveID.getIdentifier(); 4691 if (IDVal == ".word") 4692 return parseDirectiveWord(4, DirectiveID.getLoc()); 4693 else if (IDVal == ".thumb") 4694 return parseDirectiveThumb(DirectiveID.getLoc()); 4695 else if (IDVal == ".thumb_func") 4696 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4697 else if (IDVal == ".code") 4698 return parseDirectiveCode(DirectiveID.getLoc()); 4699 else if (IDVal == ".syntax") 4700 return parseDirectiveSyntax(DirectiveID.getLoc()); 4701 return true; 4702 } 4703 4704 /// parseDirectiveWord 4705 /// ::= .word [ expression (, expression)* ] 4706 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4707 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4708 for (;;) { 4709 const MCExpr *Value; 4710 if (getParser().ParseExpression(Value)) 4711 return true; 4712 4713 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4714 4715 if (getLexer().is(AsmToken::EndOfStatement)) 4716 break; 4717 4718 // FIXME: Improve diagnostic. 4719 if (getLexer().isNot(AsmToken::Comma)) 4720 return Error(L, "unexpected token in directive"); 4721 Parser.Lex(); 4722 } 4723 } 4724 4725 Parser.Lex(); 4726 return false; 4727 } 4728 4729 /// parseDirectiveThumb 4730 /// ::= .thumb 4731 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4732 if (getLexer().isNot(AsmToken::EndOfStatement)) 4733 return Error(L, "unexpected token in directive"); 4734 Parser.Lex(); 4735 4736 // TODO: set thumb mode 4737 // TODO: tell the MC streamer the mode 4738 // getParser().getStreamer().Emit???(); 4739 return false; 4740 } 4741 4742 /// parseDirectiveThumbFunc 4743 /// ::= .thumbfunc symbol_name 4744 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4745 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4746 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4747 StringRef Name; 4748 4749 // Darwin asm has function name after .thumb_func direction 4750 // ELF doesn't 4751 if (isMachO) { 4752 const AsmToken &Tok = Parser.getTok(); 4753 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4754 return Error(L, "unexpected token in .thumb_func directive"); 4755 Name = Tok.getString(); 4756 Parser.Lex(); // Consume the identifier token. 4757 } 4758 4759 if (getLexer().isNot(AsmToken::EndOfStatement)) 4760 return Error(L, "unexpected token in directive"); 4761 Parser.Lex(); 4762 4763 // FIXME: assuming function name will be the line following .thumb_func 4764 if (!isMachO) { 4765 Name = Parser.getTok().getString(); 4766 } 4767 4768 // Mark symbol as a thumb symbol. 4769 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4770 getParser().getStreamer().EmitThumbFunc(Func); 4771 return false; 4772 } 4773 4774 /// parseDirectiveSyntax 4775 /// ::= .syntax unified | divided 4776 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4777 const AsmToken &Tok = Parser.getTok(); 4778 if (Tok.isNot(AsmToken::Identifier)) 4779 return Error(L, "unexpected token in .syntax directive"); 4780 StringRef Mode = Tok.getString(); 4781 if (Mode == "unified" || Mode == "UNIFIED") 4782 Parser.Lex(); 4783 else if (Mode == "divided" || Mode == "DIVIDED") 4784 return Error(L, "'.syntax divided' arm asssembly not supported"); 4785 else 4786 return Error(L, "unrecognized syntax mode in .syntax directive"); 4787 4788 if (getLexer().isNot(AsmToken::EndOfStatement)) 4789 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4790 Parser.Lex(); 4791 4792 // TODO tell the MC streamer the mode 4793 // getParser().getStreamer().Emit???(); 4794 return false; 4795 } 4796 4797 /// parseDirectiveCode 4798 /// ::= .code 16 | 32 4799 bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4800 const AsmToken &Tok = Parser.getTok(); 4801 if (Tok.isNot(AsmToken::Integer)) 4802 return Error(L, "unexpected token in .code directive"); 4803 int64_t Val = Parser.getTok().getIntVal(); 4804 if (Val == 16) 4805 Parser.Lex(); 4806 else if (Val == 32) 4807 Parser.Lex(); 4808 else 4809 return Error(L, "invalid operand to .code directive"); 4810 4811 if (getLexer().isNot(AsmToken::EndOfStatement)) 4812 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4813 Parser.Lex(); 4814 4815 if (Val == 16) { 4816 if (!isThumb()) 4817 SwitchMode(); 4818 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4819 } else { 4820 if (isThumb()) 4821 SwitchMode(); 4822 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4823 } 4824 4825 return false; 4826 } 4827 4828 extern "C" void LLVMInitializeARMAsmLexer(); 4829 4830 /// Force static initialization. 4831 extern "C" void LLVMInitializeARMAsmParser() { 4832 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4833 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4834 LLVMInitializeARMAsmLexer(); 4835 } 4836 4837 #define GET_REGISTER_MATCHER 4838 #define GET_MATCHER_IMPLEMENTATION 4839 #include "ARMGenAsmMatcher.inc" 4840