1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "MCTargetDesc/ARMBaseInfo.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "MCTargetDesc/ARMMCExpr.h" 13 #include "llvm/MC/MCParser/MCAsmLexer.h" 14 #include "llvm/MC/MCParser/MCAsmParser.h" 15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16 #include "llvm/MC/MCAsmInfo.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCStreamer.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCInst.h" 21 #include "llvm/MC/MCInstrDesc.h" 22 #include "llvm/MC/MCRegisterInfo.h" 23 #include "llvm/MC/MCSubtargetInfo.h" 24 #include "llvm/MC/MCTargetAsmParser.h" 25 #include "llvm/Support/MathExtras.h" 26 #include "llvm/Support/SourceMgr.h" 27 #include "llvm/Support/TargetRegistry.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include "llvm/ADT/BitVector.h" 30 #include "llvm/ADT/OwningPtr.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/StringExtras.h" 34 #include "llvm/ADT/StringSwitch.h" 35 #include "llvm/ADT/Twine.h" 36 37 using namespace llvm; 38 39 namespace { 40 41 class ARMOperand; 42 43 class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131 #define GET_ASSEMBLER_HEADER 132 #include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 165 // Asm Match Converter Methods 166 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 167 const SmallVectorImpl<MCParsedAsmOperand*> &); 168 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 169 const SmallVectorImpl<MCParsedAsmOperand*> &); 170 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 201 bool validateInstruction(MCInst &Inst, 202 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 203 void processInstruction(MCInst &Inst, 204 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 205 bool shouldOmitCCOutOperand(StringRef Mnemonic, 206 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 207 208 public: 209 enum ARMMatchResultTy { 210 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 211 Match_RequiresNotITBlock, 212 Match_RequiresV6, 213 Match_RequiresThumb2 214 }; 215 216 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 217 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 218 MCAsmParserExtension::Initialize(_Parser); 219 220 // Initialize the set of available features. 221 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 222 223 // Not in an ITBlock to start with. 224 ITState.CurPosition = ~0U; 225 } 226 227 // Implementation of the MCTargetAsmParser interface: 228 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 229 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 230 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 231 bool ParseDirective(AsmToken DirectiveID); 232 233 unsigned checkTargetMatchPredicate(MCInst &Inst); 234 235 bool MatchAndEmitInstruction(SMLoc IDLoc, 236 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 237 MCStreamer &Out); 238 }; 239 } // end anonymous namespace 240 241 namespace { 242 243 /// ARMOperand - Instances of this class represent a parsed ARM machine 244 /// instruction. 245 class ARMOperand : public MCParsedAsmOperand { 246 enum KindTy { 247 k_CondCode, 248 k_CCOut, 249 k_ITCondMask, 250 k_CoprocNum, 251 k_CoprocReg, 252 k_CoprocOption, 253 k_Immediate, 254 k_FPImmediate, 255 k_MemBarrierOpt, 256 k_Memory, 257 k_PostIndexRegister, 258 k_MSRMask, 259 k_ProcIFlags, 260 k_VectorIndex, 261 k_Register, 262 k_RegisterList, 263 k_DPRRegisterList, 264 k_SPRRegisterList, 265 k_ShiftedRegister, 266 k_ShiftedImmediate, 267 k_ShifterImmediate, 268 k_RotateImmediate, 269 k_BitfieldDescriptor, 270 k_Token 271 } Kind; 272 273 SMLoc StartLoc, EndLoc; 274 SmallVector<unsigned, 8> Registers; 275 276 union { 277 struct { 278 ARMCC::CondCodes Val; 279 } CC; 280 281 struct { 282 unsigned Val; 283 } Cop; 284 285 struct { 286 unsigned Val; 287 } CoprocOption; 288 289 struct { 290 unsigned Mask:4; 291 } ITMask; 292 293 struct { 294 ARM_MB::MemBOpt Val; 295 } MBOpt; 296 297 struct { 298 ARM_PROC::IFlags Val; 299 } IFlags; 300 301 struct { 302 unsigned Val; 303 } MMask; 304 305 struct { 306 const char *Data; 307 unsigned Length; 308 } Tok; 309 310 struct { 311 unsigned RegNum; 312 } Reg; 313 314 struct { 315 unsigned Val; 316 } VectorIndex; 317 318 struct { 319 const MCExpr *Val; 320 } Imm; 321 322 struct { 323 unsigned Val; // encoded 8-bit representation 324 } FPImm; 325 326 /// Combined record for all forms of ARM address expressions. 327 struct { 328 unsigned BaseRegNum; 329 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 330 // was specified. 331 const MCConstantExpr *OffsetImm; // Offset immediate value 332 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 333 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 334 unsigned ShiftImm; // shift for OffsetReg. 335 unsigned Alignment; // 0 = no alignment specified 336 // n = alignment in bytes (8, 16, or 32) 337 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 338 } Memory; 339 340 struct { 341 unsigned RegNum; 342 bool isAdd; 343 ARM_AM::ShiftOpc ShiftTy; 344 unsigned ShiftImm; 345 } PostIdxReg; 346 347 struct { 348 bool isASR; 349 unsigned Imm; 350 } ShifterImm; 351 struct { 352 ARM_AM::ShiftOpc ShiftTy; 353 unsigned SrcReg; 354 unsigned ShiftReg; 355 unsigned ShiftImm; 356 } RegShiftedReg; 357 struct { 358 ARM_AM::ShiftOpc ShiftTy; 359 unsigned SrcReg; 360 unsigned ShiftImm; 361 } RegShiftedImm; 362 struct { 363 unsigned Imm; 364 } RotImm; 365 struct { 366 unsigned LSB; 367 unsigned Width; 368 } Bitfield; 369 }; 370 371 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 372 public: 373 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 374 Kind = o.Kind; 375 StartLoc = o.StartLoc; 376 EndLoc = o.EndLoc; 377 switch (Kind) { 378 case k_CondCode: 379 CC = o.CC; 380 break; 381 case k_ITCondMask: 382 ITMask = o.ITMask; 383 break; 384 case k_Token: 385 Tok = o.Tok; 386 break; 387 case k_CCOut: 388 case k_Register: 389 Reg = o.Reg; 390 break; 391 case k_RegisterList: 392 case k_DPRRegisterList: 393 case k_SPRRegisterList: 394 Registers = o.Registers; 395 break; 396 case k_CoprocNum: 397 case k_CoprocReg: 398 Cop = o.Cop; 399 break; 400 case k_CoprocOption: 401 CoprocOption = o.CoprocOption; 402 break; 403 case k_Immediate: 404 Imm = o.Imm; 405 break; 406 case k_FPImmediate: 407 FPImm = o.FPImm; 408 break; 409 case k_MemBarrierOpt: 410 MBOpt = o.MBOpt; 411 break; 412 case k_Memory: 413 Memory = o.Memory; 414 break; 415 case k_PostIndexRegister: 416 PostIdxReg = o.PostIdxReg; 417 break; 418 case k_MSRMask: 419 MMask = o.MMask; 420 break; 421 case k_ProcIFlags: 422 IFlags = o.IFlags; 423 break; 424 case k_ShifterImmediate: 425 ShifterImm = o.ShifterImm; 426 break; 427 case k_ShiftedRegister: 428 RegShiftedReg = o.RegShiftedReg; 429 break; 430 case k_ShiftedImmediate: 431 RegShiftedImm = o.RegShiftedImm; 432 break; 433 case k_RotateImmediate: 434 RotImm = o.RotImm; 435 break; 436 case k_BitfieldDescriptor: 437 Bitfield = o.Bitfield; 438 break; 439 case k_VectorIndex: 440 VectorIndex = o.VectorIndex; 441 break; 442 } 443 } 444 445 /// getStartLoc - Get the location of the first token of this operand. 446 SMLoc getStartLoc() const { return StartLoc; } 447 /// getEndLoc - Get the location of the last token of this operand. 448 SMLoc getEndLoc() const { return EndLoc; } 449 450 ARMCC::CondCodes getCondCode() const { 451 assert(Kind == k_CondCode && "Invalid access!"); 452 return CC.Val; 453 } 454 455 unsigned getCoproc() const { 456 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 457 return Cop.Val; 458 } 459 460 StringRef getToken() const { 461 assert(Kind == k_Token && "Invalid access!"); 462 return StringRef(Tok.Data, Tok.Length); 463 } 464 465 unsigned getReg() const { 466 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 467 return Reg.RegNum; 468 } 469 470 const SmallVectorImpl<unsigned> &getRegList() const { 471 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 472 Kind == k_SPRRegisterList) && "Invalid access!"); 473 return Registers; 474 } 475 476 const MCExpr *getImm() const { 477 assert(Kind == k_Immediate && "Invalid access!"); 478 return Imm.Val; 479 } 480 481 unsigned getFPImm() const { 482 assert(Kind == k_FPImmediate && "Invalid access!"); 483 return FPImm.Val; 484 } 485 486 unsigned getVectorIndex() const { 487 assert(Kind == k_VectorIndex && "Invalid access!"); 488 return VectorIndex.Val; 489 } 490 491 ARM_MB::MemBOpt getMemBarrierOpt() const { 492 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 493 return MBOpt.Val; 494 } 495 496 ARM_PROC::IFlags getProcIFlags() const { 497 assert(Kind == k_ProcIFlags && "Invalid access!"); 498 return IFlags.Val; 499 } 500 501 unsigned getMSRMask() const { 502 assert(Kind == k_MSRMask && "Invalid access!"); 503 return MMask.Val; 504 } 505 506 bool isCoprocNum() const { return Kind == k_CoprocNum; } 507 bool isCoprocReg() const { return Kind == k_CoprocReg; } 508 bool isCoprocOption() const { return Kind == k_CoprocOption; } 509 bool isCondCode() const { return Kind == k_CondCode; } 510 bool isCCOut() const { return Kind == k_CCOut; } 511 bool isITMask() const { return Kind == k_ITCondMask; } 512 bool isITCondCode() const { return Kind == k_CondCode; } 513 bool isImm() const { return Kind == k_Immediate; } 514 bool isFPImm() const { return Kind == k_FPImmediate; } 515 bool isImm8s4() const { 516 if (Kind != k_Immediate) 517 return false; 518 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 519 if (!CE) return false; 520 int64_t Value = CE->getValue(); 521 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 522 } 523 bool isImm0_1020s4() const { 524 if (Kind != k_Immediate) 525 return false; 526 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 527 if (!CE) return false; 528 int64_t Value = CE->getValue(); 529 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 530 } 531 bool isImm0_508s4() const { 532 if (Kind != k_Immediate) 533 return false; 534 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 535 if (!CE) return false; 536 int64_t Value = CE->getValue(); 537 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 538 } 539 bool isImm0_255() const { 540 if (Kind != k_Immediate) 541 return false; 542 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 543 if (!CE) return false; 544 int64_t Value = CE->getValue(); 545 return Value >= 0 && Value < 256; 546 } 547 bool isImm0_7() const { 548 if (Kind != k_Immediate) 549 return false; 550 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 551 if (!CE) return false; 552 int64_t Value = CE->getValue(); 553 return Value >= 0 && Value < 8; 554 } 555 bool isImm0_15() const { 556 if (Kind != k_Immediate) 557 return false; 558 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 559 if (!CE) return false; 560 int64_t Value = CE->getValue(); 561 return Value >= 0 && Value < 16; 562 } 563 bool isImm0_31() const { 564 if (Kind != k_Immediate) 565 return false; 566 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 567 if (!CE) return false; 568 int64_t Value = CE->getValue(); 569 return Value >= 0 && Value < 32; 570 } 571 bool isImm1_16() const { 572 if (Kind != k_Immediate) 573 return false; 574 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 575 if (!CE) return false; 576 int64_t Value = CE->getValue(); 577 return Value > 0 && Value < 17; 578 } 579 bool isImm1_32() const { 580 if (Kind != k_Immediate) 581 return false; 582 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 583 if (!CE) return false; 584 int64_t Value = CE->getValue(); 585 return Value > 0 && Value < 33; 586 } 587 bool isImm0_65535() const { 588 if (Kind != k_Immediate) 589 return false; 590 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 591 if (!CE) return false; 592 int64_t Value = CE->getValue(); 593 return Value >= 0 && Value < 65536; 594 } 595 bool isImm0_65535Expr() const { 596 if (Kind != k_Immediate) 597 return false; 598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 599 // If it's not a constant expression, it'll generate a fixup and be 600 // handled later. 601 if (!CE) return true; 602 int64_t Value = CE->getValue(); 603 return Value >= 0 && Value < 65536; 604 } 605 bool isImm24bit() const { 606 if (Kind != k_Immediate) 607 return false; 608 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 609 if (!CE) return false; 610 int64_t Value = CE->getValue(); 611 return Value >= 0 && Value <= 0xffffff; 612 } 613 bool isImmThumbSR() const { 614 if (Kind != k_Immediate) 615 return false; 616 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 617 if (!CE) return false; 618 int64_t Value = CE->getValue(); 619 return Value > 0 && Value < 33; 620 } 621 bool isPKHLSLImm() const { 622 if (Kind != k_Immediate) 623 return false; 624 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 625 if (!CE) return false; 626 int64_t Value = CE->getValue(); 627 return Value >= 0 && Value < 32; 628 } 629 bool isPKHASRImm() const { 630 if (Kind != k_Immediate) 631 return false; 632 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 633 if (!CE) return false; 634 int64_t Value = CE->getValue(); 635 return Value > 0 && Value <= 32; 636 } 637 bool isARMSOImm() const { 638 if (Kind != k_Immediate) 639 return false; 640 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 641 if (!CE) return false; 642 int64_t Value = CE->getValue(); 643 return ARM_AM::getSOImmVal(Value) != -1; 644 } 645 bool isT2SOImm() const { 646 if (Kind != k_Immediate) 647 return false; 648 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 649 if (!CE) return false; 650 int64_t Value = CE->getValue(); 651 return ARM_AM::getT2SOImmVal(Value) != -1; 652 } 653 bool isSetEndImm() const { 654 if (Kind != k_Immediate) 655 return false; 656 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 657 if (!CE) return false; 658 int64_t Value = CE->getValue(); 659 return Value == 1 || Value == 0; 660 } 661 bool isReg() const { return Kind == k_Register; } 662 bool isRegList() const { return Kind == k_RegisterList; } 663 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 664 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 665 bool isToken() const { return Kind == k_Token; } 666 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 667 bool isMemory() const { return Kind == k_Memory; } 668 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 669 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 670 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 671 bool isRotImm() const { return Kind == k_RotateImmediate; } 672 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 673 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 674 bool isPostIdxReg() const { 675 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 676 } 677 bool isMemNoOffset(bool alignOK = false) const { 678 if (!isMemory()) 679 return false; 680 // No offset of any kind. 681 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 682 (alignOK || Memory.Alignment == 0); 683 } 684 bool isAlignedMemory() const { 685 return isMemNoOffset(true); 686 } 687 bool isAddrMode2() const { 688 if (!isMemory() || Memory.Alignment != 0) return false; 689 // Check for register offset. 690 if (Memory.OffsetRegNum) return true; 691 // Immediate offset in range [-4095, 4095]. 692 if (!Memory.OffsetImm) return true; 693 int64_t Val = Memory.OffsetImm->getValue(); 694 return Val > -4096 && Val < 4096; 695 } 696 bool isAM2OffsetImm() const { 697 if (Kind != k_Immediate) 698 return false; 699 // Immediate offset in range [-4095, 4095]. 700 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 701 if (!CE) return false; 702 int64_t Val = CE->getValue(); 703 return Val > -4096 && Val < 4096; 704 } 705 bool isAddrMode3() const { 706 if (!isMemory() || Memory.Alignment != 0) return false; 707 // No shifts are legal for AM3. 708 if (Memory.ShiftType != ARM_AM::no_shift) return false; 709 // Check for register offset. 710 if (Memory.OffsetRegNum) return true; 711 // Immediate offset in range [-255, 255]. 712 if (!Memory.OffsetImm) return true; 713 int64_t Val = Memory.OffsetImm->getValue(); 714 return Val > -256 && Val < 256; 715 } 716 bool isAM3Offset() const { 717 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 718 return false; 719 if (Kind == k_PostIndexRegister) 720 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 721 // Immediate offset in range [-255, 255]. 722 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 723 if (!CE) return false; 724 int64_t Val = CE->getValue(); 725 // Special case, #-0 is INT32_MIN. 726 return (Val > -256 && Val < 256) || Val == INT32_MIN; 727 } 728 bool isAddrMode5() const { 729 if (!isMemory() || Memory.Alignment != 0) return false; 730 // Check for register offset. 731 if (Memory.OffsetRegNum) return false; 732 // Immediate offset in range [-1020, 1020] and a multiple of 4. 733 if (!Memory.OffsetImm) return true; 734 int64_t Val = Memory.OffsetImm->getValue(); 735 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 736 Val == INT32_MIN; 737 } 738 bool isMemTBB() const { 739 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 740 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 741 return false; 742 return true; 743 } 744 bool isMemTBH() const { 745 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 746 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 747 Memory.Alignment != 0 ) 748 return false; 749 return true; 750 } 751 bool isMemRegOffset() const { 752 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 753 return false; 754 return true; 755 } 756 bool isT2MemRegOffset() const { 757 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 758 Memory.Alignment != 0) 759 return false; 760 // Only lsl #{0, 1, 2, 3} allowed. 761 if (Memory.ShiftType == ARM_AM::no_shift) 762 return true; 763 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 764 return false; 765 return true; 766 } 767 bool isMemThumbRR() const { 768 // Thumb reg+reg addressing is simple. Just two registers, a base and 769 // an offset. No shifts, negations or any other complicating factors. 770 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 771 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 772 return false; 773 return isARMLowRegister(Memory.BaseRegNum) && 774 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 775 } 776 bool isMemThumbRIs4() const { 777 if (!isMemory() || Memory.OffsetRegNum != 0 || 778 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 779 return false; 780 // Immediate offset, multiple of 4 in range [0, 124]. 781 if (!Memory.OffsetImm) return true; 782 int64_t Val = Memory.OffsetImm->getValue(); 783 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 784 } 785 bool isMemThumbRIs2() const { 786 if (!isMemory() || Memory.OffsetRegNum != 0 || 787 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 788 return false; 789 // Immediate offset, multiple of 4 in range [0, 62]. 790 if (!Memory.OffsetImm) return true; 791 int64_t Val = Memory.OffsetImm->getValue(); 792 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 793 } 794 bool isMemThumbRIs1() const { 795 if (!isMemory() || Memory.OffsetRegNum != 0 || 796 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 797 return false; 798 // Immediate offset in range [0, 31]. 799 if (!Memory.OffsetImm) return true; 800 int64_t Val = Memory.OffsetImm->getValue(); 801 return Val >= 0 && Val <= 31; 802 } 803 bool isMemThumbSPI() const { 804 if (!isMemory() || Memory.OffsetRegNum != 0 || 805 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 806 return false; 807 // Immediate offset, multiple of 4 in range [0, 1020]. 808 if (!Memory.OffsetImm) return true; 809 int64_t Val = Memory.OffsetImm->getValue(); 810 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 811 } 812 bool isMemImm8s4Offset() const { 813 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 814 return false; 815 // Immediate offset a multiple of 4 in range [-1020, 1020]. 816 if (!Memory.OffsetImm) return true; 817 int64_t Val = Memory.OffsetImm->getValue(); 818 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 819 } 820 bool isMemImm0_1020s4Offset() const { 821 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 822 return false; 823 // Immediate offset a multiple of 4 in range [0, 1020]. 824 if (!Memory.OffsetImm) return true; 825 int64_t Val = Memory.OffsetImm->getValue(); 826 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 827 } 828 bool isMemImm8Offset() const { 829 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 830 return false; 831 // Immediate offset in range [-255, 255]. 832 if (!Memory.OffsetImm) return true; 833 int64_t Val = Memory.OffsetImm->getValue(); 834 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 835 } 836 bool isMemPosImm8Offset() const { 837 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 838 return false; 839 // Immediate offset in range [0, 255]. 840 if (!Memory.OffsetImm) return true; 841 int64_t Val = Memory.OffsetImm->getValue(); 842 return Val >= 0 && Val < 256; 843 } 844 bool isMemNegImm8Offset() const { 845 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 846 return false; 847 // Immediate offset in range [-255, -1]. 848 if (!Memory.OffsetImm) return true; 849 int64_t Val = Memory.OffsetImm->getValue(); 850 return Val > -256 && Val < 0; 851 } 852 bool isMemUImm12Offset() const { 853 // If we have an immediate that's not a constant, treat it as a label 854 // reference needing a fixup. If it is a constant, it's something else 855 // and we reject it. 856 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 857 return true; 858 859 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 860 return false; 861 // Immediate offset in range [0, 4095]. 862 if (!Memory.OffsetImm) return true; 863 int64_t Val = Memory.OffsetImm->getValue(); 864 return (Val >= 0 && Val < 4096); 865 } 866 bool isMemImm12Offset() const { 867 // If we have an immediate that's not a constant, treat it as a label 868 // reference needing a fixup. If it is a constant, it's something else 869 // and we reject it. 870 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 871 return true; 872 873 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 874 return false; 875 // Immediate offset in range [-4095, 4095]. 876 if (!Memory.OffsetImm) return true; 877 int64_t Val = Memory.OffsetImm->getValue(); 878 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 879 } 880 bool isPostIdxImm8() const { 881 if (Kind != k_Immediate) 882 return false; 883 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 884 if (!CE) return false; 885 int64_t Val = CE->getValue(); 886 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 887 } 888 bool isPostIdxImm8s4() const { 889 if (Kind != k_Immediate) 890 return false; 891 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 892 if (!CE) return false; 893 int64_t Val = CE->getValue(); 894 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 895 (Val == INT32_MIN); 896 } 897 898 bool isMSRMask() const { return Kind == k_MSRMask; } 899 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 900 901 bool isVectorIndex8() const { 902 if (Kind != k_VectorIndex) return false; 903 return VectorIndex.Val < 8; 904 } 905 bool isVectorIndex16() const { 906 if (Kind != k_VectorIndex) return false; 907 return VectorIndex.Val < 4; 908 } 909 bool isVectorIndex32() const { 910 if (Kind != k_VectorIndex) return false; 911 return VectorIndex.Val < 2; 912 } 913 914 915 916 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 917 // Add as immediates when possible. Null MCExpr = 0. 918 if (Expr == 0) 919 Inst.addOperand(MCOperand::CreateImm(0)); 920 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 921 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 922 else 923 Inst.addOperand(MCOperand::CreateExpr(Expr)); 924 } 925 926 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 927 assert(N == 2 && "Invalid number of operands!"); 928 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 929 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 930 Inst.addOperand(MCOperand::CreateReg(RegNum)); 931 } 932 933 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 934 assert(N == 1 && "Invalid number of operands!"); 935 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 936 } 937 938 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 939 assert(N == 1 && "Invalid number of operands!"); 940 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 941 } 942 943 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 944 assert(N == 1 && "Invalid number of operands!"); 945 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 946 } 947 948 void addITMaskOperands(MCInst &Inst, unsigned N) const { 949 assert(N == 1 && "Invalid number of operands!"); 950 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 951 } 952 953 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 954 assert(N == 1 && "Invalid number of operands!"); 955 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 956 } 957 958 void addCCOutOperands(MCInst &Inst, unsigned N) const { 959 assert(N == 1 && "Invalid number of operands!"); 960 Inst.addOperand(MCOperand::CreateReg(getReg())); 961 } 962 963 void addRegOperands(MCInst &Inst, unsigned N) const { 964 assert(N == 1 && "Invalid number of operands!"); 965 Inst.addOperand(MCOperand::CreateReg(getReg())); 966 } 967 968 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 969 assert(N == 3 && "Invalid number of operands!"); 970 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 971 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 972 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 973 Inst.addOperand(MCOperand::CreateImm( 974 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 975 } 976 977 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 978 assert(N == 2 && "Invalid number of operands!"); 979 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 980 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 981 Inst.addOperand(MCOperand::CreateImm( 982 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 983 } 984 985 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 986 assert(N == 1 && "Invalid number of operands!"); 987 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 988 ShifterImm.Imm)); 989 } 990 991 void addRegListOperands(MCInst &Inst, unsigned N) const { 992 assert(N == 1 && "Invalid number of operands!"); 993 const SmallVectorImpl<unsigned> &RegList = getRegList(); 994 for (SmallVectorImpl<unsigned>::const_iterator 995 I = RegList.begin(), E = RegList.end(); I != E; ++I) 996 Inst.addOperand(MCOperand::CreateReg(*I)); 997 } 998 999 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1000 addRegListOperands(Inst, N); 1001 } 1002 1003 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1004 addRegListOperands(Inst, N); 1005 } 1006 1007 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1008 assert(N == 1 && "Invalid number of operands!"); 1009 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1010 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1011 } 1012 1013 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1014 assert(N == 1 && "Invalid number of operands!"); 1015 // Munge the lsb/width into a bitfield mask. 1016 unsigned lsb = Bitfield.LSB; 1017 unsigned width = Bitfield.Width; 1018 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1019 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1020 (32 - (lsb + width))); 1021 Inst.addOperand(MCOperand::CreateImm(Mask)); 1022 } 1023 1024 void addImmOperands(MCInst &Inst, unsigned N) const { 1025 assert(N == 1 && "Invalid number of operands!"); 1026 addExpr(Inst, getImm()); 1027 } 1028 1029 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1030 assert(N == 1 && "Invalid number of operands!"); 1031 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1032 } 1033 1034 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1035 assert(N == 1 && "Invalid number of operands!"); 1036 // FIXME: We really want to scale the value here, but the LDRD/STRD 1037 // instruction don't encode operands that way yet. 1038 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1039 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1040 } 1041 1042 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1043 assert(N == 1 && "Invalid number of operands!"); 1044 // The immediate is scaled by four in the encoding and is stored 1045 // in the MCInst as such. Lop off the low two bits here. 1046 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1047 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1048 } 1049 1050 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1051 assert(N == 1 && "Invalid number of operands!"); 1052 // The immediate is scaled by four in the encoding and is stored 1053 // in the MCInst as such. Lop off the low two bits here. 1054 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1055 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1056 } 1057 1058 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1059 assert(N == 1 && "Invalid number of operands!"); 1060 addExpr(Inst, getImm()); 1061 } 1062 1063 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1064 assert(N == 1 && "Invalid number of operands!"); 1065 addExpr(Inst, getImm()); 1066 } 1067 1068 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1069 assert(N == 1 && "Invalid number of operands!"); 1070 addExpr(Inst, getImm()); 1071 } 1072 1073 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1074 assert(N == 1 && "Invalid number of operands!"); 1075 addExpr(Inst, getImm()); 1076 } 1077 1078 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1079 assert(N == 1 && "Invalid number of operands!"); 1080 // The constant encodes as the immediate-1, and we store in the instruction 1081 // the bits as encoded, so subtract off one here. 1082 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1083 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1084 } 1085 1086 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1087 assert(N == 1 && "Invalid number of operands!"); 1088 // The constant encodes as the immediate-1, and we store in the instruction 1089 // the bits as encoded, so subtract off one here. 1090 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1091 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1092 } 1093 1094 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1095 assert(N == 1 && "Invalid number of operands!"); 1096 addExpr(Inst, getImm()); 1097 } 1098 1099 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1100 assert(N == 1 && "Invalid number of operands!"); 1101 addExpr(Inst, getImm()); 1102 } 1103 1104 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1105 assert(N == 1 && "Invalid number of operands!"); 1106 addExpr(Inst, getImm()); 1107 } 1108 1109 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1110 assert(N == 1 && "Invalid number of operands!"); 1111 // The constant encodes as the immediate, except for 32, which encodes as 1112 // zero. 1113 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1114 unsigned Imm = CE->getValue(); 1115 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1116 } 1117 1118 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1119 assert(N == 1 && "Invalid number of operands!"); 1120 addExpr(Inst, getImm()); 1121 } 1122 1123 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1124 assert(N == 1 && "Invalid number of operands!"); 1125 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1126 // the instruction as well. 1127 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1128 int Val = CE->getValue(); 1129 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1130 } 1131 1132 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1133 assert(N == 1 && "Invalid number of operands!"); 1134 addExpr(Inst, getImm()); 1135 } 1136 1137 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1138 assert(N == 1 && "Invalid number of operands!"); 1139 addExpr(Inst, getImm()); 1140 } 1141 1142 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1143 assert(N == 1 && "Invalid number of operands!"); 1144 addExpr(Inst, getImm()); 1145 } 1146 1147 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1148 assert(N == 1 && "Invalid number of operands!"); 1149 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1150 } 1151 1152 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1153 assert(N == 1 && "Invalid number of operands!"); 1154 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1155 } 1156 1157 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1158 assert(N == 2 && "Invalid number of operands!"); 1159 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1160 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1161 } 1162 1163 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1164 assert(N == 3 && "Invalid number of operands!"); 1165 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1166 if (!Memory.OffsetRegNum) { 1167 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1168 // Special case for #-0 1169 if (Val == INT32_MIN) Val = 0; 1170 if (Val < 0) Val = -Val; 1171 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1172 } else { 1173 // For register offset, we encode the shift type and negation flag 1174 // here. 1175 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1176 Memory.ShiftImm, Memory.ShiftType); 1177 } 1178 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1179 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1180 Inst.addOperand(MCOperand::CreateImm(Val)); 1181 } 1182 1183 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1184 assert(N == 2 && "Invalid number of operands!"); 1185 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1186 assert(CE && "non-constant AM2OffsetImm operand!"); 1187 int32_t Val = CE->getValue(); 1188 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1189 // Special case for #-0 1190 if (Val == INT32_MIN) Val = 0; 1191 if (Val < 0) Val = -Val; 1192 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1193 Inst.addOperand(MCOperand::CreateReg(0)); 1194 Inst.addOperand(MCOperand::CreateImm(Val)); 1195 } 1196 1197 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1198 assert(N == 3 && "Invalid number of operands!"); 1199 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1200 if (!Memory.OffsetRegNum) { 1201 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1202 // Special case for #-0 1203 if (Val == INT32_MIN) Val = 0; 1204 if (Val < 0) Val = -Val; 1205 Val = ARM_AM::getAM3Opc(AddSub, Val); 1206 } else { 1207 // For register offset, we encode the shift type and negation flag 1208 // here. 1209 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1210 } 1211 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1212 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1213 Inst.addOperand(MCOperand::CreateImm(Val)); 1214 } 1215 1216 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1217 assert(N == 2 && "Invalid number of operands!"); 1218 if (Kind == k_PostIndexRegister) { 1219 int32_t Val = 1220 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1221 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1222 Inst.addOperand(MCOperand::CreateImm(Val)); 1223 return; 1224 } 1225 1226 // Constant offset. 1227 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1228 int32_t Val = CE->getValue(); 1229 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1230 // Special case for #-0 1231 if (Val == INT32_MIN) Val = 0; 1232 if (Val < 0) Val = -Val; 1233 Val = ARM_AM::getAM3Opc(AddSub, Val); 1234 Inst.addOperand(MCOperand::CreateReg(0)); 1235 Inst.addOperand(MCOperand::CreateImm(Val)); 1236 } 1237 1238 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1239 assert(N == 2 && "Invalid number of operands!"); 1240 // The lower two bits are always zero and as such are not encoded. 1241 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1242 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1243 // Special case for #-0 1244 if (Val == INT32_MIN) Val = 0; 1245 if (Val < 0) Val = -Val; 1246 Val = ARM_AM::getAM5Opc(AddSub, Val); 1247 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1248 Inst.addOperand(MCOperand::CreateImm(Val)); 1249 } 1250 1251 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1252 assert(N == 2 && "Invalid number of operands!"); 1253 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1254 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1255 Inst.addOperand(MCOperand::CreateImm(Val)); 1256 } 1257 1258 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1259 assert(N == 2 && "Invalid number of operands!"); 1260 // The lower two bits are always zero and as such are not encoded. 1261 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1262 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1263 Inst.addOperand(MCOperand::CreateImm(Val)); 1264 } 1265 1266 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1267 assert(N == 2 && "Invalid number of operands!"); 1268 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1269 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1270 Inst.addOperand(MCOperand::CreateImm(Val)); 1271 } 1272 1273 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1274 addMemImm8OffsetOperands(Inst, N); 1275 } 1276 1277 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1278 addMemImm8OffsetOperands(Inst, N); 1279 } 1280 1281 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1282 assert(N == 2 && "Invalid number of operands!"); 1283 // If this is an immediate, it's a label reference. 1284 if (Kind == k_Immediate) { 1285 addExpr(Inst, getImm()); 1286 Inst.addOperand(MCOperand::CreateImm(0)); 1287 return; 1288 } 1289 1290 // Otherwise, it's a normal memory reg+offset. 1291 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1292 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1293 Inst.addOperand(MCOperand::CreateImm(Val)); 1294 } 1295 1296 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1297 assert(N == 2 && "Invalid number of operands!"); 1298 // If this is an immediate, it's a label reference. 1299 if (Kind == k_Immediate) { 1300 addExpr(Inst, getImm()); 1301 Inst.addOperand(MCOperand::CreateImm(0)); 1302 return; 1303 } 1304 1305 // Otherwise, it's a normal memory reg+offset. 1306 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1307 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1308 Inst.addOperand(MCOperand::CreateImm(Val)); 1309 } 1310 1311 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1312 assert(N == 2 && "Invalid number of operands!"); 1313 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1314 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1315 } 1316 1317 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1318 assert(N == 2 && "Invalid number of operands!"); 1319 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1320 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1321 } 1322 1323 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1324 assert(N == 3 && "Invalid number of operands!"); 1325 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1326 Memory.ShiftImm, Memory.ShiftType); 1327 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1328 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1329 Inst.addOperand(MCOperand::CreateImm(Val)); 1330 } 1331 1332 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1333 assert(N == 3 && "Invalid number of operands!"); 1334 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1335 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1336 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1337 } 1338 1339 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1340 assert(N == 2 && "Invalid number of operands!"); 1341 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1342 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1343 } 1344 1345 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1346 assert(N == 2 && "Invalid number of operands!"); 1347 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1348 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1349 Inst.addOperand(MCOperand::CreateImm(Val)); 1350 } 1351 1352 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1353 assert(N == 2 && "Invalid number of operands!"); 1354 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1355 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1356 Inst.addOperand(MCOperand::CreateImm(Val)); 1357 } 1358 1359 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1360 assert(N == 2 && "Invalid number of operands!"); 1361 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1362 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1363 Inst.addOperand(MCOperand::CreateImm(Val)); 1364 } 1365 1366 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1367 assert(N == 2 && "Invalid number of operands!"); 1368 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1369 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1370 Inst.addOperand(MCOperand::CreateImm(Val)); 1371 } 1372 1373 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1374 assert(N == 1 && "Invalid number of operands!"); 1375 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1376 assert(CE && "non-constant post-idx-imm8 operand!"); 1377 int Imm = CE->getValue(); 1378 bool isAdd = Imm >= 0; 1379 if (Imm == INT32_MIN) Imm = 0; 1380 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1381 Inst.addOperand(MCOperand::CreateImm(Imm)); 1382 } 1383 1384 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1385 assert(N == 1 && "Invalid number of operands!"); 1386 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1387 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1388 int Imm = CE->getValue(); 1389 bool isAdd = Imm >= 0; 1390 if (Imm == INT32_MIN) Imm = 0; 1391 // Immediate is scaled by 4. 1392 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1393 Inst.addOperand(MCOperand::CreateImm(Imm)); 1394 } 1395 1396 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1397 assert(N == 2 && "Invalid number of operands!"); 1398 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1399 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1400 } 1401 1402 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1403 assert(N == 2 && "Invalid number of operands!"); 1404 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1405 // The sign, shift type, and shift amount are encoded in a single operand 1406 // using the AM2 encoding helpers. 1407 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1408 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1409 PostIdxReg.ShiftTy); 1410 Inst.addOperand(MCOperand::CreateImm(Imm)); 1411 } 1412 1413 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1414 assert(N == 1 && "Invalid number of operands!"); 1415 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1416 } 1417 1418 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1419 assert(N == 1 && "Invalid number of operands!"); 1420 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1421 } 1422 1423 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1424 assert(N == 1 && "Invalid number of operands!"); 1425 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1426 } 1427 1428 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1429 assert(N == 1 && "Invalid number of operands!"); 1430 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1431 } 1432 1433 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1434 assert(N == 1 && "Invalid number of operands!"); 1435 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1436 } 1437 1438 virtual void print(raw_ostream &OS) const; 1439 1440 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1441 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1442 Op->ITMask.Mask = Mask; 1443 Op->StartLoc = S; 1444 Op->EndLoc = S; 1445 return Op; 1446 } 1447 1448 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1449 ARMOperand *Op = new ARMOperand(k_CondCode); 1450 Op->CC.Val = CC; 1451 Op->StartLoc = S; 1452 Op->EndLoc = S; 1453 return Op; 1454 } 1455 1456 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1457 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1458 Op->Cop.Val = CopVal; 1459 Op->StartLoc = S; 1460 Op->EndLoc = S; 1461 return Op; 1462 } 1463 1464 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1465 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1466 Op->Cop.Val = CopVal; 1467 Op->StartLoc = S; 1468 Op->EndLoc = S; 1469 return Op; 1470 } 1471 1472 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1473 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1474 Op->Cop.Val = Val; 1475 Op->StartLoc = S; 1476 Op->EndLoc = E; 1477 return Op; 1478 } 1479 1480 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1481 ARMOperand *Op = new ARMOperand(k_CCOut); 1482 Op->Reg.RegNum = RegNum; 1483 Op->StartLoc = S; 1484 Op->EndLoc = S; 1485 return Op; 1486 } 1487 1488 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1489 ARMOperand *Op = new ARMOperand(k_Token); 1490 Op->Tok.Data = Str.data(); 1491 Op->Tok.Length = Str.size(); 1492 Op->StartLoc = S; 1493 Op->EndLoc = S; 1494 return Op; 1495 } 1496 1497 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1498 ARMOperand *Op = new ARMOperand(k_Register); 1499 Op->Reg.RegNum = RegNum; 1500 Op->StartLoc = S; 1501 Op->EndLoc = E; 1502 return Op; 1503 } 1504 1505 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1506 unsigned SrcReg, 1507 unsigned ShiftReg, 1508 unsigned ShiftImm, 1509 SMLoc S, SMLoc E) { 1510 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1511 Op->RegShiftedReg.ShiftTy = ShTy; 1512 Op->RegShiftedReg.SrcReg = SrcReg; 1513 Op->RegShiftedReg.ShiftReg = ShiftReg; 1514 Op->RegShiftedReg.ShiftImm = ShiftImm; 1515 Op->StartLoc = S; 1516 Op->EndLoc = E; 1517 return Op; 1518 } 1519 1520 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1521 unsigned SrcReg, 1522 unsigned ShiftImm, 1523 SMLoc S, SMLoc E) { 1524 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1525 Op->RegShiftedImm.ShiftTy = ShTy; 1526 Op->RegShiftedImm.SrcReg = SrcReg; 1527 Op->RegShiftedImm.ShiftImm = ShiftImm; 1528 Op->StartLoc = S; 1529 Op->EndLoc = E; 1530 return Op; 1531 } 1532 1533 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1534 SMLoc S, SMLoc E) { 1535 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1536 Op->ShifterImm.isASR = isASR; 1537 Op->ShifterImm.Imm = Imm; 1538 Op->StartLoc = S; 1539 Op->EndLoc = E; 1540 return Op; 1541 } 1542 1543 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1544 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1545 Op->RotImm.Imm = Imm; 1546 Op->StartLoc = S; 1547 Op->EndLoc = E; 1548 return Op; 1549 } 1550 1551 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1552 SMLoc S, SMLoc E) { 1553 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1554 Op->Bitfield.LSB = LSB; 1555 Op->Bitfield.Width = Width; 1556 Op->StartLoc = S; 1557 Op->EndLoc = E; 1558 return Op; 1559 } 1560 1561 static ARMOperand * 1562 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1563 SMLoc StartLoc, SMLoc EndLoc) { 1564 KindTy Kind = k_RegisterList; 1565 1566 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1567 Kind = k_DPRRegisterList; 1568 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1569 contains(Regs.front().first)) 1570 Kind = k_SPRRegisterList; 1571 1572 ARMOperand *Op = new ARMOperand(Kind); 1573 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1574 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1575 Op->Registers.push_back(I->first); 1576 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1577 Op->StartLoc = StartLoc; 1578 Op->EndLoc = EndLoc; 1579 return Op; 1580 } 1581 1582 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1583 MCContext &Ctx) { 1584 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1585 Op->VectorIndex.Val = Idx; 1586 Op->StartLoc = S; 1587 Op->EndLoc = E; 1588 return Op; 1589 } 1590 1591 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1592 ARMOperand *Op = new ARMOperand(k_Immediate); 1593 Op->Imm.Val = Val; 1594 Op->StartLoc = S; 1595 Op->EndLoc = E; 1596 return Op; 1597 } 1598 1599 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1600 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1601 Op->FPImm.Val = Val; 1602 Op->StartLoc = S; 1603 Op->EndLoc = S; 1604 return Op; 1605 } 1606 1607 static ARMOperand *CreateMem(unsigned BaseRegNum, 1608 const MCConstantExpr *OffsetImm, 1609 unsigned OffsetRegNum, 1610 ARM_AM::ShiftOpc ShiftType, 1611 unsigned ShiftImm, 1612 unsigned Alignment, 1613 bool isNegative, 1614 SMLoc S, SMLoc E) { 1615 ARMOperand *Op = new ARMOperand(k_Memory); 1616 Op->Memory.BaseRegNum = BaseRegNum; 1617 Op->Memory.OffsetImm = OffsetImm; 1618 Op->Memory.OffsetRegNum = OffsetRegNum; 1619 Op->Memory.ShiftType = ShiftType; 1620 Op->Memory.ShiftImm = ShiftImm; 1621 Op->Memory.Alignment = Alignment; 1622 Op->Memory.isNegative = isNegative; 1623 Op->StartLoc = S; 1624 Op->EndLoc = E; 1625 return Op; 1626 } 1627 1628 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1629 ARM_AM::ShiftOpc ShiftTy, 1630 unsigned ShiftImm, 1631 SMLoc S, SMLoc E) { 1632 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1633 Op->PostIdxReg.RegNum = RegNum; 1634 Op->PostIdxReg.isAdd = isAdd; 1635 Op->PostIdxReg.ShiftTy = ShiftTy; 1636 Op->PostIdxReg.ShiftImm = ShiftImm; 1637 Op->StartLoc = S; 1638 Op->EndLoc = E; 1639 return Op; 1640 } 1641 1642 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1643 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1644 Op->MBOpt.Val = Opt; 1645 Op->StartLoc = S; 1646 Op->EndLoc = S; 1647 return Op; 1648 } 1649 1650 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1651 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1652 Op->IFlags.Val = IFlags; 1653 Op->StartLoc = S; 1654 Op->EndLoc = S; 1655 return Op; 1656 } 1657 1658 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1659 ARMOperand *Op = new ARMOperand(k_MSRMask); 1660 Op->MMask.Val = MMask; 1661 Op->StartLoc = S; 1662 Op->EndLoc = S; 1663 return Op; 1664 } 1665 }; 1666 1667 } // end anonymous namespace. 1668 1669 void ARMOperand::print(raw_ostream &OS) const { 1670 switch (Kind) { 1671 case k_FPImmediate: 1672 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1673 << ") >"; 1674 break; 1675 case k_CondCode: 1676 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1677 break; 1678 case k_CCOut: 1679 OS << "<ccout " << getReg() << ">"; 1680 break; 1681 case k_ITCondMask: { 1682 static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)", 1683 "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)", 1684 "(tee)", "(eee)" }; 1685 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1686 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1687 break; 1688 } 1689 case k_CoprocNum: 1690 OS << "<coprocessor number: " << getCoproc() << ">"; 1691 break; 1692 case k_CoprocReg: 1693 OS << "<coprocessor register: " << getCoproc() << ">"; 1694 break; 1695 case k_CoprocOption: 1696 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1697 break; 1698 case k_MSRMask: 1699 OS << "<mask: " << getMSRMask() << ">"; 1700 break; 1701 case k_Immediate: 1702 getImm()->print(OS); 1703 break; 1704 case k_MemBarrierOpt: 1705 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1706 break; 1707 case k_Memory: 1708 OS << "<memory " 1709 << " base:" << Memory.BaseRegNum; 1710 OS << ">"; 1711 break; 1712 case k_PostIndexRegister: 1713 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1714 << PostIdxReg.RegNum; 1715 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1716 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1717 << PostIdxReg.ShiftImm; 1718 OS << ">"; 1719 break; 1720 case k_ProcIFlags: { 1721 OS << "<ARM_PROC::"; 1722 unsigned IFlags = getProcIFlags(); 1723 for (int i=2; i >= 0; --i) 1724 if (IFlags & (1 << i)) 1725 OS << ARM_PROC::IFlagsToString(1 << i); 1726 OS << ">"; 1727 break; 1728 } 1729 case k_Register: 1730 OS << "<register " << getReg() << ">"; 1731 break; 1732 case k_ShifterImmediate: 1733 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1734 << " #" << ShifterImm.Imm << ">"; 1735 break; 1736 case k_ShiftedRegister: 1737 OS << "<so_reg_reg " 1738 << RegShiftedReg.SrcReg 1739 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1740 << ", " << RegShiftedReg.ShiftReg << ", " 1741 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1742 << ">"; 1743 break; 1744 case k_ShiftedImmediate: 1745 OS << "<so_reg_imm " 1746 << RegShiftedImm.SrcReg 1747 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1748 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1749 << ">"; 1750 break; 1751 case k_RotateImmediate: 1752 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1753 break; 1754 case k_BitfieldDescriptor: 1755 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1756 << ", width: " << Bitfield.Width << ">"; 1757 break; 1758 case k_RegisterList: 1759 case k_DPRRegisterList: 1760 case k_SPRRegisterList: { 1761 OS << "<register_list "; 1762 1763 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1764 for (SmallVectorImpl<unsigned>::const_iterator 1765 I = RegList.begin(), E = RegList.end(); I != E; ) { 1766 OS << *I; 1767 if (++I < E) OS << ", "; 1768 } 1769 1770 OS << ">"; 1771 break; 1772 } 1773 case k_Token: 1774 OS << "'" << getToken() << "'"; 1775 break; 1776 case k_VectorIndex: 1777 OS << "<vectorindex " << getVectorIndex() << ">"; 1778 break; 1779 } 1780 } 1781 1782 /// @name Auto-generated Match Functions 1783 /// { 1784 1785 static unsigned MatchRegisterName(StringRef Name); 1786 1787 /// } 1788 1789 bool ARMAsmParser::ParseRegister(unsigned &RegNo, 1790 SMLoc &StartLoc, SMLoc &EndLoc) { 1791 RegNo = tryParseRegister(); 1792 1793 return (RegNo == (unsigned)-1); 1794 } 1795 1796 /// Try to parse a register name. The token must be an Identifier when called, 1797 /// and if it is a register name the token is eaten and the register number is 1798 /// returned. Otherwise return -1. 1799 /// 1800 int ARMAsmParser::tryParseRegister() { 1801 const AsmToken &Tok = Parser.getTok(); 1802 if (Tok.isNot(AsmToken::Identifier)) return -1; 1803 1804 // FIXME: Validate register for the current architecture; we have to do 1805 // validation later, so maybe there is no need for this here. 1806 std::string upperCase = Tok.getString().str(); 1807 std::string lowerCase = LowercaseString(upperCase); 1808 unsigned RegNum = MatchRegisterName(lowerCase); 1809 if (!RegNum) { 1810 RegNum = StringSwitch<unsigned>(lowerCase) 1811 .Case("r13", ARM::SP) 1812 .Case("r14", ARM::LR) 1813 .Case("r15", ARM::PC) 1814 .Case("ip", ARM::R12) 1815 .Default(0); 1816 } 1817 if (!RegNum) return -1; 1818 1819 Parser.Lex(); // Eat identifier token. 1820 1821 #if 0 1822 // Also check for an index operand. This is only legal for vector registers, 1823 // but that'll get caught OK in operand matching, so we don't need to 1824 // explicitly filter everything else out here. 1825 if (Parser.getTok().is(AsmToken::LBrac)) { 1826 SMLoc SIdx = Parser.getTok().getLoc(); 1827 Parser.Lex(); // Eat left bracket token. 1828 1829 const MCExpr *ImmVal; 1830 SMLoc ExprLoc = Parser.getTok().getLoc(); 1831 if (getParser().ParseExpression(ImmVal)) 1832 return MatchOperand_ParseFail; 1833 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 1834 if (!MCE) { 1835 TokError("immediate value expected for vector index"); 1836 return MatchOperand_ParseFail; 1837 } 1838 1839 SMLoc E = Parser.getTok().getLoc(); 1840 if (Parser.getTok().isNot(AsmToken::RBrac)) { 1841 Error(E, "']' expected"); 1842 return MatchOperand_ParseFail; 1843 } 1844 1845 Parser.Lex(); // Eat right bracket token. 1846 1847 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 1848 SIdx, E, 1849 getContext())); 1850 } 1851 #endif 1852 1853 return RegNum; 1854 } 1855 1856 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 1857 // If a recoverable error occurs, return 1. If an irrecoverable error 1858 // occurs, return -1. An irrecoverable error is one where tokens have been 1859 // consumed in the process of trying to parse the shifter (i.e., when it is 1860 // indeed a shifter operand, but malformed). 1861 int ARMAsmParser::tryParseShiftRegister( 1862 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1863 SMLoc S = Parser.getTok().getLoc(); 1864 const AsmToken &Tok = Parser.getTok(); 1865 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 1866 1867 std::string upperCase = Tok.getString().str(); 1868 std::string lowerCase = LowercaseString(upperCase); 1869 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 1870 .Case("lsl", ARM_AM::lsl) 1871 .Case("lsr", ARM_AM::lsr) 1872 .Case("asr", ARM_AM::asr) 1873 .Case("ror", ARM_AM::ror) 1874 .Case("rrx", ARM_AM::rrx) 1875 .Default(ARM_AM::no_shift); 1876 1877 if (ShiftTy == ARM_AM::no_shift) 1878 return 1; 1879 1880 Parser.Lex(); // Eat the operator. 1881 1882 // The source register for the shift has already been added to the 1883 // operand list, so we need to pop it off and combine it into the shifted 1884 // register operand instead. 1885 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 1886 if (!PrevOp->isReg()) 1887 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 1888 int SrcReg = PrevOp->getReg(); 1889 int64_t Imm = 0; 1890 int ShiftReg = 0; 1891 if (ShiftTy == ARM_AM::rrx) { 1892 // RRX Doesn't have an explicit shift amount. The encoder expects 1893 // the shift register to be the same as the source register. Seems odd, 1894 // but OK. 1895 ShiftReg = SrcReg; 1896 } else { 1897 // Figure out if this is shifted by a constant or a register (for non-RRX). 1898 if (Parser.getTok().is(AsmToken::Hash)) { 1899 Parser.Lex(); // Eat hash. 1900 SMLoc ImmLoc = Parser.getTok().getLoc(); 1901 const MCExpr *ShiftExpr = 0; 1902 if (getParser().ParseExpression(ShiftExpr)) { 1903 Error(ImmLoc, "invalid immediate shift value"); 1904 return -1; 1905 } 1906 // The expression must be evaluatable as an immediate. 1907 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 1908 if (!CE) { 1909 Error(ImmLoc, "invalid immediate shift value"); 1910 return -1; 1911 } 1912 // Range check the immediate. 1913 // lsl, ror: 0 <= imm <= 31 1914 // lsr, asr: 0 <= imm <= 32 1915 Imm = CE->getValue(); 1916 if (Imm < 0 || 1917 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 1918 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 1919 Error(ImmLoc, "immediate shift value out of range"); 1920 return -1; 1921 } 1922 } else if (Parser.getTok().is(AsmToken::Identifier)) { 1923 ShiftReg = tryParseRegister(); 1924 SMLoc L = Parser.getTok().getLoc(); 1925 if (ShiftReg == -1) { 1926 Error (L, "expected immediate or register in shift operand"); 1927 return -1; 1928 } 1929 } else { 1930 Error (Parser.getTok().getLoc(), 1931 "expected immediate or register in shift operand"); 1932 return -1; 1933 } 1934 } 1935 1936 if (ShiftReg && ShiftTy != ARM_AM::rrx) 1937 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 1938 ShiftReg, Imm, 1939 S, Parser.getTok().getLoc())); 1940 else 1941 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 1942 S, Parser.getTok().getLoc())); 1943 1944 return 0; 1945 } 1946 1947 1948 /// Try to parse a register name. The token must be an Identifier when called. 1949 /// If it's a register, an AsmOperand is created. Another AsmOperand is created 1950 /// if there is a "writeback". 'true' if it's not a register. 1951 /// 1952 /// TODO this is likely to change to allow different register types and or to 1953 /// parse for a specific register type. 1954 bool ARMAsmParser:: 1955 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 1956 SMLoc S = Parser.getTok().getLoc(); 1957 int RegNo = tryParseRegister(); 1958 if (RegNo == -1) 1959 return true; 1960 1961 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 1962 1963 const AsmToken &ExclaimTok = Parser.getTok(); 1964 if (ExclaimTok.is(AsmToken::Exclaim)) { 1965 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 1966 ExclaimTok.getLoc())); 1967 Parser.Lex(); // Eat exclaim token 1968 return false; 1969 } 1970 1971 // Also check for an index operand. This is only legal for vector registers, 1972 // but that'll get caught OK in operand matching, so we don't need to 1973 // explicitly filter everything else out here. 1974 if (Parser.getTok().is(AsmToken::LBrac)) { 1975 SMLoc SIdx = Parser.getTok().getLoc(); 1976 Parser.Lex(); // Eat left bracket token. 1977 1978 const MCExpr *ImmVal; 1979 SMLoc ExprLoc = Parser.getTok().getLoc(); 1980 if (getParser().ParseExpression(ImmVal)) 1981 return MatchOperand_ParseFail; 1982 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 1983 if (!MCE) { 1984 TokError("immediate value expected for vector index"); 1985 return MatchOperand_ParseFail; 1986 } 1987 1988 SMLoc E = Parser.getTok().getLoc(); 1989 if (Parser.getTok().isNot(AsmToken::RBrac)) { 1990 Error(E, "']' expected"); 1991 return MatchOperand_ParseFail; 1992 } 1993 1994 Parser.Lex(); // Eat right bracket token. 1995 1996 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 1997 SIdx, E, 1998 getContext())); 1999 } 2000 2001 return false; 2002 } 2003 2004 /// MatchCoprocessorOperandName - Try to parse an coprocessor related 2005 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2006 /// "c5", ... 2007 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2008 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2009 // but efficient. 2010 switch (Name.size()) { 2011 default: break; 2012 case 2: 2013 if (Name[0] != CoprocOp) 2014 return -1; 2015 switch (Name[1]) { 2016 default: return -1; 2017 case '0': return 0; 2018 case '1': return 1; 2019 case '2': return 2; 2020 case '3': return 3; 2021 case '4': return 4; 2022 case '5': return 5; 2023 case '6': return 6; 2024 case '7': return 7; 2025 case '8': return 8; 2026 case '9': return 9; 2027 } 2028 break; 2029 case 3: 2030 if (Name[0] != CoprocOp || Name[1] != '1') 2031 return -1; 2032 switch (Name[2]) { 2033 default: return -1; 2034 case '0': return 10; 2035 case '1': return 11; 2036 case '2': return 12; 2037 case '3': return 13; 2038 case '4': return 14; 2039 case '5': return 15; 2040 } 2041 break; 2042 } 2043 2044 return -1; 2045 } 2046 2047 /// parseITCondCode - Try to parse a condition code for an IT instruction. 2048 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2049 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2050 SMLoc S = Parser.getTok().getLoc(); 2051 const AsmToken &Tok = Parser.getTok(); 2052 if (!Tok.is(AsmToken::Identifier)) 2053 return MatchOperand_NoMatch; 2054 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2055 .Case("eq", ARMCC::EQ) 2056 .Case("ne", ARMCC::NE) 2057 .Case("hs", ARMCC::HS) 2058 .Case("cs", ARMCC::HS) 2059 .Case("lo", ARMCC::LO) 2060 .Case("cc", ARMCC::LO) 2061 .Case("mi", ARMCC::MI) 2062 .Case("pl", ARMCC::PL) 2063 .Case("vs", ARMCC::VS) 2064 .Case("vc", ARMCC::VC) 2065 .Case("hi", ARMCC::HI) 2066 .Case("ls", ARMCC::LS) 2067 .Case("ge", ARMCC::GE) 2068 .Case("lt", ARMCC::LT) 2069 .Case("gt", ARMCC::GT) 2070 .Case("le", ARMCC::LE) 2071 .Case("al", ARMCC::AL) 2072 .Default(~0U); 2073 if (CC == ~0U) 2074 return MatchOperand_NoMatch; 2075 Parser.Lex(); // Eat the token. 2076 2077 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2078 2079 return MatchOperand_Success; 2080 } 2081 2082 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2083 /// token must be an Identifier when called, and if it is a coprocessor 2084 /// number, the token is eaten and the operand is added to the operand list. 2085 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2086 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2087 SMLoc S = Parser.getTok().getLoc(); 2088 const AsmToken &Tok = Parser.getTok(); 2089 if (Tok.isNot(AsmToken::Identifier)) 2090 return MatchOperand_NoMatch; 2091 2092 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2093 if (Num == -1) 2094 return MatchOperand_NoMatch; 2095 2096 Parser.Lex(); // Eat identifier token. 2097 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2098 return MatchOperand_Success; 2099 } 2100 2101 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2102 /// token must be an Identifier when called, and if it is a coprocessor 2103 /// number, the token is eaten and the operand is added to the operand list. 2104 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2105 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2106 SMLoc S = Parser.getTok().getLoc(); 2107 const AsmToken &Tok = Parser.getTok(); 2108 if (Tok.isNot(AsmToken::Identifier)) 2109 return MatchOperand_NoMatch; 2110 2111 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2112 if (Reg == -1) 2113 return MatchOperand_NoMatch; 2114 2115 Parser.Lex(); // Eat identifier token. 2116 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2117 return MatchOperand_Success; 2118 } 2119 2120 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2121 /// coproc_option : '{' imm0_255 '}' 2122 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2123 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2124 SMLoc S = Parser.getTok().getLoc(); 2125 2126 // If this isn't a '{', this isn't a coprocessor immediate operand. 2127 if (Parser.getTok().isNot(AsmToken::LCurly)) 2128 return MatchOperand_NoMatch; 2129 Parser.Lex(); // Eat the '{' 2130 2131 const MCExpr *Expr; 2132 SMLoc Loc = Parser.getTok().getLoc(); 2133 if (getParser().ParseExpression(Expr)) { 2134 Error(Loc, "illegal expression"); 2135 return MatchOperand_ParseFail; 2136 } 2137 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2138 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2139 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2140 return MatchOperand_ParseFail; 2141 } 2142 int Val = CE->getValue(); 2143 2144 // Check for and consume the closing '}' 2145 if (Parser.getTok().isNot(AsmToken::RCurly)) 2146 return MatchOperand_ParseFail; 2147 SMLoc E = Parser.getTok().getLoc(); 2148 Parser.Lex(); // Eat the '}' 2149 2150 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2151 return MatchOperand_Success; 2152 } 2153 2154 // For register list parsing, we need to map from raw GPR register numbering 2155 // to the enumeration values. The enumeration values aren't sorted by 2156 // register number due to our using "sp", "lr" and "pc" as canonical names. 2157 static unsigned getNextRegister(unsigned Reg) { 2158 // If this is a GPR, we need to do it manually, otherwise we can rely 2159 // on the sort ordering of the enumeration since the other reg-classes 2160 // are sane. 2161 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2162 return Reg + 1; 2163 switch(Reg) { 2164 default: assert(0 && "Invalid GPR number!"); 2165 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2166 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2167 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2168 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2169 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2170 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2171 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2172 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2173 } 2174 } 2175 2176 /// Parse a register list. 2177 bool ARMAsmParser:: 2178 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2179 assert(Parser.getTok().is(AsmToken::LCurly) && 2180 "Token is not a Left Curly Brace"); 2181 SMLoc S = Parser.getTok().getLoc(); 2182 Parser.Lex(); // Eat '{' token. 2183 SMLoc RegLoc = Parser.getTok().getLoc(); 2184 2185 // Check the first register in the list to see what register class 2186 // this is a list of. 2187 int Reg = tryParseRegister(); 2188 if (Reg == -1) 2189 return Error(RegLoc, "register expected"); 2190 2191 MCRegisterClass *RC; 2192 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2193 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2194 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2195 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2196 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2197 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2198 else 2199 return Error(RegLoc, "invalid register in register list"); 2200 2201 // The reglist instructions have at most 16 registers, so reserve 2202 // space for that many. 2203 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2204 // Store the first register. 2205 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2206 2207 // This starts immediately after the first register token in the list, 2208 // so we can see either a comma or a minus (range separator) as a legal 2209 // next token. 2210 while (Parser.getTok().is(AsmToken::Comma) || 2211 Parser.getTok().is(AsmToken::Minus)) { 2212 if (Parser.getTok().is(AsmToken::Minus)) { 2213 Parser.Lex(); // Eat the comma. 2214 SMLoc EndLoc = Parser.getTok().getLoc(); 2215 int EndReg = tryParseRegister(); 2216 if (EndReg == -1) 2217 return Error(EndLoc, "register expected"); 2218 // If the register is the same as the start reg, there's nothing 2219 // more to do. 2220 if (Reg == EndReg) 2221 continue; 2222 // The register must be in the same register class as the first. 2223 if (!RC->contains(EndReg)) 2224 return Error(EndLoc, "invalid register in register list"); 2225 // Ranges must go from low to high. 2226 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2227 return Error(EndLoc, "bad range in register list"); 2228 2229 // Add all the registers in the range to the register list. 2230 while (Reg != EndReg) { 2231 Reg = getNextRegister(Reg); 2232 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2233 } 2234 continue; 2235 } 2236 Parser.Lex(); // Eat the comma. 2237 RegLoc = Parser.getTok().getLoc(); 2238 int OldReg = Reg; 2239 Reg = tryParseRegister(); 2240 if (Reg == -1) 2241 return Error(RegLoc, "register expected"); 2242 // The register must be in the same register class as the first. 2243 if (!RC->contains(Reg)) 2244 return Error(RegLoc, "invalid register in register list"); 2245 // List must be monotonically increasing. 2246 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2247 return Error(RegLoc, "register list not in ascending order"); 2248 // VFP register lists must also be contiguous. 2249 // It's OK to use the enumeration values directly here rather, as the 2250 // VFP register classes have the enum sorted properly. 2251 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2252 Reg != OldReg + 1) 2253 return Error(RegLoc, "non-contiguous register range"); 2254 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2255 } 2256 2257 SMLoc E = Parser.getTok().getLoc(); 2258 if (Parser.getTok().isNot(AsmToken::RCurly)) 2259 return Error(E, "'}' expected"); 2260 Parser.Lex(); // Eat '}' token. 2261 2262 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2263 return false; 2264 } 2265 2266 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2267 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2268 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2269 SMLoc S = Parser.getTok().getLoc(); 2270 const AsmToken &Tok = Parser.getTok(); 2271 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2272 StringRef OptStr = Tok.getString(); 2273 2274 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2275 .Case("sy", ARM_MB::SY) 2276 .Case("st", ARM_MB::ST) 2277 .Case("sh", ARM_MB::ISH) 2278 .Case("ish", ARM_MB::ISH) 2279 .Case("shst", ARM_MB::ISHST) 2280 .Case("ishst", ARM_MB::ISHST) 2281 .Case("nsh", ARM_MB::NSH) 2282 .Case("un", ARM_MB::NSH) 2283 .Case("nshst", ARM_MB::NSHST) 2284 .Case("unst", ARM_MB::NSHST) 2285 .Case("osh", ARM_MB::OSH) 2286 .Case("oshst", ARM_MB::OSHST) 2287 .Default(~0U); 2288 2289 if (Opt == ~0U) 2290 return MatchOperand_NoMatch; 2291 2292 Parser.Lex(); // Eat identifier token. 2293 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2294 return MatchOperand_Success; 2295 } 2296 2297 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2298 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2299 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2300 SMLoc S = Parser.getTok().getLoc(); 2301 const AsmToken &Tok = Parser.getTok(); 2302 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2303 StringRef IFlagsStr = Tok.getString(); 2304 2305 // An iflags string of "none" is interpreted to mean that none of the AIF 2306 // bits are set. Not a terribly useful instruction, but a valid encoding. 2307 unsigned IFlags = 0; 2308 if (IFlagsStr != "none") { 2309 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2310 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2311 .Case("a", ARM_PROC::A) 2312 .Case("i", ARM_PROC::I) 2313 .Case("f", ARM_PROC::F) 2314 .Default(~0U); 2315 2316 // If some specific iflag is already set, it means that some letter is 2317 // present more than once, this is not acceptable. 2318 if (Flag == ~0U || (IFlags & Flag)) 2319 return MatchOperand_NoMatch; 2320 2321 IFlags |= Flag; 2322 } 2323 } 2324 2325 Parser.Lex(); // Eat identifier token. 2326 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2327 return MatchOperand_Success; 2328 } 2329 2330 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2331 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2332 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2333 SMLoc S = Parser.getTok().getLoc(); 2334 const AsmToken &Tok = Parser.getTok(); 2335 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2336 StringRef Mask = Tok.getString(); 2337 2338 if (isMClass()) { 2339 // See ARMv6-M 10.1.1 2340 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2341 .Case("apsr", 0) 2342 .Case("iapsr", 1) 2343 .Case("eapsr", 2) 2344 .Case("xpsr", 3) 2345 .Case("ipsr", 5) 2346 .Case("epsr", 6) 2347 .Case("iepsr", 7) 2348 .Case("msp", 8) 2349 .Case("psp", 9) 2350 .Case("primask", 16) 2351 .Case("basepri", 17) 2352 .Case("basepri_max", 18) 2353 .Case("faultmask", 19) 2354 .Case("control", 20) 2355 .Default(~0U); 2356 2357 if (FlagsVal == ~0U) 2358 return MatchOperand_NoMatch; 2359 2360 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2361 // basepri, basepri_max and faultmask only valid for V7m. 2362 return MatchOperand_NoMatch; 2363 2364 Parser.Lex(); // Eat identifier token. 2365 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2366 return MatchOperand_Success; 2367 } 2368 2369 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2370 size_t Start = 0, Next = Mask.find('_'); 2371 StringRef Flags = ""; 2372 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2373 if (Next != StringRef::npos) 2374 Flags = Mask.slice(Next+1, Mask.size()); 2375 2376 // FlagsVal contains the complete mask: 2377 // 3-0: Mask 2378 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2379 unsigned FlagsVal = 0; 2380 2381 if (SpecReg == "apsr") { 2382 FlagsVal = StringSwitch<unsigned>(Flags) 2383 .Case("nzcvq", 0x8) // same as CPSR_f 2384 .Case("g", 0x4) // same as CPSR_s 2385 .Case("nzcvqg", 0xc) // same as CPSR_fs 2386 .Default(~0U); 2387 2388 if (FlagsVal == ~0U) { 2389 if (!Flags.empty()) 2390 return MatchOperand_NoMatch; 2391 else 2392 FlagsVal = 8; // No flag 2393 } 2394 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2395 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2396 Flags = "fc"; 2397 for (int i = 0, e = Flags.size(); i != e; ++i) { 2398 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2399 .Case("c", 1) 2400 .Case("x", 2) 2401 .Case("s", 4) 2402 .Case("f", 8) 2403 .Default(~0U); 2404 2405 // If some specific flag is already set, it means that some letter is 2406 // present more than once, this is not acceptable. 2407 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2408 return MatchOperand_NoMatch; 2409 FlagsVal |= Flag; 2410 } 2411 } else // No match for special register. 2412 return MatchOperand_NoMatch; 2413 2414 // Special register without flags are equivalent to "fc" flags. 2415 if (!FlagsVal) 2416 FlagsVal = 0x9; 2417 2418 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2419 if (SpecReg == "spsr") 2420 FlagsVal |= 16; 2421 2422 Parser.Lex(); // Eat identifier token. 2423 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2424 return MatchOperand_Success; 2425 } 2426 2427 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2428 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2429 int Low, int High) { 2430 const AsmToken &Tok = Parser.getTok(); 2431 if (Tok.isNot(AsmToken::Identifier)) { 2432 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2433 return MatchOperand_ParseFail; 2434 } 2435 StringRef ShiftName = Tok.getString(); 2436 std::string LowerOp = LowercaseString(Op); 2437 std::string UpperOp = UppercaseString(Op); 2438 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2439 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2440 return MatchOperand_ParseFail; 2441 } 2442 Parser.Lex(); // Eat shift type token. 2443 2444 // There must be a '#' and a shift amount. 2445 if (Parser.getTok().isNot(AsmToken::Hash)) { 2446 Error(Parser.getTok().getLoc(), "'#' expected"); 2447 return MatchOperand_ParseFail; 2448 } 2449 Parser.Lex(); // Eat hash token. 2450 2451 const MCExpr *ShiftAmount; 2452 SMLoc Loc = Parser.getTok().getLoc(); 2453 if (getParser().ParseExpression(ShiftAmount)) { 2454 Error(Loc, "illegal expression"); 2455 return MatchOperand_ParseFail; 2456 } 2457 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2458 if (!CE) { 2459 Error(Loc, "constant expression expected"); 2460 return MatchOperand_ParseFail; 2461 } 2462 int Val = CE->getValue(); 2463 if (Val < Low || Val > High) { 2464 Error(Loc, "immediate value out of range"); 2465 return MatchOperand_ParseFail; 2466 } 2467 2468 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2469 2470 return MatchOperand_Success; 2471 } 2472 2473 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2474 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2475 const AsmToken &Tok = Parser.getTok(); 2476 SMLoc S = Tok.getLoc(); 2477 if (Tok.isNot(AsmToken::Identifier)) { 2478 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2479 return MatchOperand_ParseFail; 2480 } 2481 int Val = StringSwitch<int>(Tok.getString()) 2482 .Case("be", 1) 2483 .Case("le", 0) 2484 .Default(-1); 2485 Parser.Lex(); // Eat the token. 2486 2487 if (Val == -1) { 2488 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2489 return MatchOperand_ParseFail; 2490 } 2491 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2492 getContext()), 2493 S, Parser.getTok().getLoc())); 2494 return MatchOperand_Success; 2495 } 2496 2497 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2498 /// instructions. Legal values are: 2499 /// lsl #n 'n' in [0,31] 2500 /// asr #n 'n' in [1,32] 2501 /// n == 32 encoded as n == 0. 2502 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2503 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2504 const AsmToken &Tok = Parser.getTok(); 2505 SMLoc S = Tok.getLoc(); 2506 if (Tok.isNot(AsmToken::Identifier)) { 2507 Error(S, "shift operator 'asr' or 'lsl' expected"); 2508 return MatchOperand_ParseFail; 2509 } 2510 StringRef ShiftName = Tok.getString(); 2511 bool isASR; 2512 if (ShiftName == "lsl" || ShiftName == "LSL") 2513 isASR = false; 2514 else if (ShiftName == "asr" || ShiftName == "ASR") 2515 isASR = true; 2516 else { 2517 Error(S, "shift operator 'asr' or 'lsl' expected"); 2518 return MatchOperand_ParseFail; 2519 } 2520 Parser.Lex(); // Eat the operator. 2521 2522 // A '#' and a shift amount. 2523 if (Parser.getTok().isNot(AsmToken::Hash)) { 2524 Error(Parser.getTok().getLoc(), "'#' expected"); 2525 return MatchOperand_ParseFail; 2526 } 2527 Parser.Lex(); // Eat hash token. 2528 2529 const MCExpr *ShiftAmount; 2530 SMLoc E = Parser.getTok().getLoc(); 2531 if (getParser().ParseExpression(ShiftAmount)) { 2532 Error(E, "malformed shift expression"); 2533 return MatchOperand_ParseFail; 2534 } 2535 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2536 if (!CE) { 2537 Error(E, "shift amount must be an immediate"); 2538 return MatchOperand_ParseFail; 2539 } 2540 2541 int64_t Val = CE->getValue(); 2542 if (isASR) { 2543 // Shift amount must be in [1,32] 2544 if (Val < 1 || Val > 32) { 2545 Error(E, "'asr' shift amount must be in range [1,32]"); 2546 return MatchOperand_ParseFail; 2547 } 2548 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2549 if (isThumb() && Val == 32) { 2550 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2551 return MatchOperand_ParseFail; 2552 } 2553 if (Val == 32) Val = 0; 2554 } else { 2555 // Shift amount must be in [1,32] 2556 if (Val < 0 || Val > 31) { 2557 Error(E, "'lsr' shift amount must be in range [0,31]"); 2558 return MatchOperand_ParseFail; 2559 } 2560 } 2561 2562 E = Parser.getTok().getLoc(); 2563 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2564 2565 return MatchOperand_Success; 2566 } 2567 2568 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2569 /// of instructions. Legal values are: 2570 /// ror #n 'n' in {0, 8, 16, 24} 2571 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2572 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2573 const AsmToken &Tok = Parser.getTok(); 2574 SMLoc S = Tok.getLoc(); 2575 if (Tok.isNot(AsmToken::Identifier)) 2576 return MatchOperand_NoMatch; 2577 StringRef ShiftName = Tok.getString(); 2578 if (ShiftName != "ror" && ShiftName != "ROR") 2579 return MatchOperand_NoMatch; 2580 Parser.Lex(); // Eat the operator. 2581 2582 // A '#' and a rotate amount. 2583 if (Parser.getTok().isNot(AsmToken::Hash)) { 2584 Error(Parser.getTok().getLoc(), "'#' expected"); 2585 return MatchOperand_ParseFail; 2586 } 2587 Parser.Lex(); // Eat hash token. 2588 2589 const MCExpr *ShiftAmount; 2590 SMLoc E = Parser.getTok().getLoc(); 2591 if (getParser().ParseExpression(ShiftAmount)) { 2592 Error(E, "malformed rotate expression"); 2593 return MatchOperand_ParseFail; 2594 } 2595 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2596 if (!CE) { 2597 Error(E, "rotate amount must be an immediate"); 2598 return MatchOperand_ParseFail; 2599 } 2600 2601 int64_t Val = CE->getValue(); 2602 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2603 // normally, zero is represented in asm by omitting the rotate operand 2604 // entirely. 2605 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2606 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2607 return MatchOperand_ParseFail; 2608 } 2609 2610 E = Parser.getTok().getLoc(); 2611 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2612 2613 return MatchOperand_Success; 2614 } 2615 2616 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2617 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2618 SMLoc S = Parser.getTok().getLoc(); 2619 // The bitfield descriptor is really two operands, the LSB and the width. 2620 if (Parser.getTok().isNot(AsmToken::Hash)) { 2621 Error(Parser.getTok().getLoc(), "'#' expected"); 2622 return MatchOperand_ParseFail; 2623 } 2624 Parser.Lex(); // Eat hash token. 2625 2626 const MCExpr *LSBExpr; 2627 SMLoc E = Parser.getTok().getLoc(); 2628 if (getParser().ParseExpression(LSBExpr)) { 2629 Error(E, "malformed immediate expression"); 2630 return MatchOperand_ParseFail; 2631 } 2632 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2633 if (!CE) { 2634 Error(E, "'lsb' operand must be an immediate"); 2635 return MatchOperand_ParseFail; 2636 } 2637 2638 int64_t LSB = CE->getValue(); 2639 // The LSB must be in the range [0,31] 2640 if (LSB < 0 || LSB > 31) { 2641 Error(E, "'lsb' operand must be in the range [0,31]"); 2642 return MatchOperand_ParseFail; 2643 } 2644 E = Parser.getTok().getLoc(); 2645 2646 // Expect another immediate operand. 2647 if (Parser.getTok().isNot(AsmToken::Comma)) { 2648 Error(Parser.getTok().getLoc(), "too few operands"); 2649 return MatchOperand_ParseFail; 2650 } 2651 Parser.Lex(); // Eat hash token. 2652 if (Parser.getTok().isNot(AsmToken::Hash)) { 2653 Error(Parser.getTok().getLoc(), "'#' expected"); 2654 return MatchOperand_ParseFail; 2655 } 2656 Parser.Lex(); // Eat hash token. 2657 2658 const MCExpr *WidthExpr; 2659 if (getParser().ParseExpression(WidthExpr)) { 2660 Error(E, "malformed immediate expression"); 2661 return MatchOperand_ParseFail; 2662 } 2663 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2664 if (!CE) { 2665 Error(E, "'width' operand must be an immediate"); 2666 return MatchOperand_ParseFail; 2667 } 2668 2669 int64_t Width = CE->getValue(); 2670 // The LSB must be in the range [1,32-lsb] 2671 if (Width < 1 || Width > 32 - LSB) { 2672 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2673 return MatchOperand_ParseFail; 2674 } 2675 E = Parser.getTok().getLoc(); 2676 2677 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2678 2679 return MatchOperand_Success; 2680 } 2681 2682 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2683 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2684 // Check for a post-index addressing register operand. Specifically: 2685 // postidx_reg := '+' register {, shift} 2686 // | '-' register {, shift} 2687 // | register {, shift} 2688 2689 // This method must return MatchOperand_NoMatch without consuming any tokens 2690 // in the case where there is no match, as other alternatives take other 2691 // parse methods. 2692 AsmToken Tok = Parser.getTok(); 2693 SMLoc S = Tok.getLoc(); 2694 bool haveEaten = false; 2695 bool isAdd = true; 2696 int Reg = -1; 2697 if (Tok.is(AsmToken::Plus)) { 2698 Parser.Lex(); // Eat the '+' token. 2699 haveEaten = true; 2700 } else if (Tok.is(AsmToken::Minus)) { 2701 Parser.Lex(); // Eat the '-' token. 2702 isAdd = false; 2703 haveEaten = true; 2704 } 2705 if (Parser.getTok().is(AsmToken::Identifier)) 2706 Reg = tryParseRegister(); 2707 if (Reg == -1) { 2708 if (!haveEaten) 2709 return MatchOperand_NoMatch; 2710 Error(Parser.getTok().getLoc(), "register expected"); 2711 return MatchOperand_ParseFail; 2712 } 2713 SMLoc E = Parser.getTok().getLoc(); 2714 2715 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2716 unsigned ShiftImm = 0; 2717 if (Parser.getTok().is(AsmToken::Comma)) { 2718 Parser.Lex(); // Eat the ','. 2719 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2720 return MatchOperand_ParseFail; 2721 } 2722 2723 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2724 ShiftImm, S, E)); 2725 2726 return MatchOperand_Success; 2727 } 2728 2729 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2730 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2731 // Check for a post-index addressing register operand. Specifically: 2732 // am3offset := '+' register 2733 // | '-' register 2734 // | register 2735 // | # imm 2736 // | # + imm 2737 // | # - imm 2738 2739 // This method must return MatchOperand_NoMatch without consuming any tokens 2740 // in the case where there is no match, as other alternatives take other 2741 // parse methods. 2742 AsmToken Tok = Parser.getTok(); 2743 SMLoc S = Tok.getLoc(); 2744 2745 // Do immediates first, as we always parse those if we have a '#'. 2746 if (Parser.getTok().is(AsmToken::Hash)) { 2747 Parser.Lex(); // Eat the '#'. 2748 // Explicitly look for a '-', as we need to encode negative zero 2749 // differently. 2750 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2751 const MCExpr *Offset; 2752 if (getParser().ParseExpression(Offset)) 2753 return MatchOperand_ParseFail; 2754 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2755 if (!CE) { 2756 Error(S, "constant expression expected"); 2757 return MatchOperand_ParseFail; 2758 } 2759 SMLoc E = Tok.getLoc(); 2760 // Negative zero is encoded as the flag value INT32_MIN. 2761 int32_t Val = CE->getValue(); 2762 if (isNegative && Val == 0) 2763 Val = INT32_MIN; 2764 2765 Operands.push_back( 2766 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2767 2768 return MatchOperand_Success; 2769 } 2770 2771 2772 bool haveEaten = false; 2773 bool isAdd = true; 2774 int Reg = -1; 2775 if (Tok.is(AsmToken::Plus)) { 2776 Parser.Lex(); // Eat the '+' token. 2777 haveEaten = true; 2778 } else if (Tok.is(AsmToken::Minus)) { 2779 Parser.Lex(); // Eat the '-' token. 2780 isAdd = false; 2781 haveEaten = true; 2782 } 2783 if (Parser.getTok().is(AsmToken::Identifier)) 2784 Reg = tryParseRegister(); 2785 if (Reg == -1) { 2786 if (!haveEaten) 2787 return MatchOperand_NoMatch; 2788 Error(Parser.getTok().getLoc(), "register expected"); 2789 return MatchOperand_ParseFail; 2790 } 2791 SMLoc E = Parser.getTok().getLoc(); 2792 2793 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 2794 0, S, E)); 2795 2796 return MatchOperand_Success; 2797 } 2798 2799 /// cvtT2LdrdPre - Convert parsed operands to MCInst. 2800 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2801 /// when they refer multiple MIOperands inside a single one. 2802 bool ARMAsmParser:: 2803 cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 2804 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2805 // Rt, Rt2 2806 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2807 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2808 // Create a writeback register dummy placeholder. 2809 Inst.addOperand(MCOperand::CreateReg(0)); 2810 // addr 2811 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 2812 // pred 2813 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2814 return true; 2815 } 2816 2817 /// cvtT2StrdPre - Convert parsed operands to MCInst. 2818 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2819 /// when they refer multiple MIOperands inside a single one. 2820 bool ARMAsmParser:: 2821 cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 2822 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2823 // Create a writeback register dummy placeholder. 2824 Inst.addOperand(MCOperand::CreateReg(0)); 2825 // Rt, Rt2 2826 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2827 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 2828 // addr 2829 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 2830 // pred 2831 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2832 return true; 2833 } 2834 2835 /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 2836 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2837 /// when they refer multiple MIOperands inside a single one. 2838 bool ARMAsmParser:: 2839 cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 2840 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2841 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2842 2843 // Create a writeback register dummy placeholder. 2844 Inst.addOperand(MCOperand::CreateImm(0)); 2845 2846 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 2847 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2848 return true; 2849 } 2850 2851 /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 2852 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2853 /// when they refer multiple MIOperands inside a single one. 2854 bool ARMAsmParser:: 2855 cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 2856 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2857 // Create a writeback register dummy placeholder. 2858 Inst.addOperand(MCOperand::CreateImm(0)); 2859 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2860 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 2861 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2862 return true; 2863 } 2864 2865 /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 2866 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2867 /// when they refer multiple MIOperands inside a single one. 2868 bool ARMAsmParser:: 2869 cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 2870 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2871 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2872 2873 // Create a writeback register dummy placeholder. 2874 Inst.addOperand(MCOperand::CreateImm(0)); 2875 2876 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 2877 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2878 return true; 2879 } 2880 2881 /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 2882 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2883 /// when they refer multiple MIOperands inside a single one. 2884 bool ARMAsmParser:: 2885 cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 2886 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2887 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2888 2889 // Create a writeback register dummy placeholder. 2890 Inst.addOperand(MCOperand::CreateImm(0)); 2891 2892 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 2893 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2894 return true; 2895 } 2896 2897 2898 /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 2899 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2900 /// when they refer multiple MIOperands inside a single one. 2901 bool ARMAsmParser:: 2902 cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 2903 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2904 // Create a writeback register dummy placeholder. 2905 Inst.addOperand(MCOperand::CreateImm(0)); 2906 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2907 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 2908 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2909 return true; 2910 } 2911 2912 /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 2913 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2914 /// when they refer multiple MIOperands inside a single one. 2915 bool ARMAsmParser:: 2916 cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 2917 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2918 // Create a writeback register dummy placeholder. 2919 Inst.addOperand(MCOperand::CreateImm(0)); 2920 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2921 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 2922 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2923 return true; 2924 } 2925 2926 /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 2927 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2928 /// when they refer multiple MIOperands inside a single one. 2929 bool ARMAsmParser:: 2930 cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 2931 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2932 // Create a writeback register dummy placeholder. 2933 Inst.addOperand(MCOperand::CreateImm(0)); 2934 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2935 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 2936 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2937 return true; 2938 } 2939 2940 /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 2941 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2942 /// when they refer multiple MIOperands inside a single one. 2943 bool ARMAsmParser:: 2944 cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 2945 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2946 // Rt 2947 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2948 // Create a writeback register dummy placeholder. 2949 Inst.addOperand(MCOperand::CreateImm(0)); 2950 // addr 2951 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2952 // offset 2953 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 2954 // pred 2955 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2956 return true; 2957 } 2958 2959 /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 2960 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2961 /// when they refer multiple MIOperands inside a single one. 2962 bool ARMAsmParser:: 2963 cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 2964 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2965 // Rt 2966 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2967 // Create a writeback register dummy placeholder. 2968 Inst.addOperand(MCOperand::CreateImm(0)); 2969 // addr 2970 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2971 // offset 2972 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 2973 // pred 2974 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2975 return true; 2976 } 2977 2978 /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 2979 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2980 /// when they refer multiple MIOperands inside a single one. 2981 bool ARMAsmParser:: 2982 cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 2983 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2984 // Create a writeback register dummy placeholder. 2985 Inst.addOperand(MCOperand::CreateImm(0)); 2986 // Rt 2987 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 2988 // addr 2989 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 2990 // offset 2991 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 2992 // pred 2993 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 2994 return true; 2995 } 2996 2997 /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 2998 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 2999 /// when they refer multiple MIOperands inside a single one. 3000 bool ARMAsmParser:: 3001 cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3002 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3003 // Create a writeback register dummy placeholder. 3004 Inst.addOperand(MCOperand::CreateImm(0)); 3005 // Rt 3006 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3007 // addr 3008 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3009 // offset 3010 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3011 // pred 3012 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3013 return true; 3014 } 3015 3016 /// cvtLdrdPre - Convert parsed operands to MCInst. 3017 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3018 /// when they refer multiple MIOperands inside a single one. 3019 bool ARMAsmParser:: 3020 cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3021 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3022 // Rt, Rt2 3023 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3024 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3025 // Create a writeback register dummy placeholder. 3026 Inst.addOperand(MCOperand::CreateImm(0)); 3027 // addr 3028 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3029 // pred 3030 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3031 return true; 3032 } 3033 3034 /// cvtStrdPre - Convert parsed operands to MCInst. 3035 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3036 /// when they refer multiple MIOperands inside a single one. 3037 bool ARMAsmParser:: 3038 cvtStrdPre(MCInst &Inst, unsigned Opcode, 3039 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3040 // Create a writeback register dummy placeholder. 3041 Inst.addOperand(MCOperand::CreateImm(0)); 3042 // Rt, Rt2 3043 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3044 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3045 // addr 3046 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3047 // pred 3048 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3049 return true; 3050 } 3051 3052 /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3053 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3054 /// when they refer multiple MIOperands inside a single one. 3055 bool ARMAsmParser:: 3056 cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3057 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3058 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3059 // Create a writeback register dummy placeholder. 3060 Inst.addOperand(MCOperand::CreateImm(0)); 3061 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3062 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3063 return true; 3064 } 3065 3066 /// cvtThumbMultiple- Convert parsed operands to MCInst. 3067 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3068 /// when they refer multiple MIOperands inside a single one. 3069 bool ARMAsmParser:: 3070 cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3071 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3072 // The second source operand must be the same register as the destination 3073 // operand. 3074 if (Operands.size() == 6 && 3075 (((ARMOperand*)Operands[3])->getReg() != 3076 ((ARMOperand*)Operands[5])->getReg()) && 3077 (((ARMOperand*)Operands[3])->getReg() != 3078 ((ARMOperand*)Operands[4])->getReg())) { 3079 Error(Operands[3]->getStartLoc(), 3080 "destination register must match source register"); 3081 return false; 3082 } 3083 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3084 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3085 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3086 // If we have a three-operand form, use that, else the second source operand 3087 // is just the destination operand again. 3088 if (Operands.size() == 6) 3089 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3090 else 3091 Inst.addOperand(Inst.getOperand(0)); 3092 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3093 3094 return true; 3095 } 3096 3097 /// Parse an ARM memory expression, return false if successful else return true 3098 /// or an error. The first token must be a '[' when called. 3099 bool ARMAsmParser:: 3100 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3101 SMLoc S, E; 3102 assert(Parser.getTok().is(AsmToken::LBrac) && 3103 "Token is not a Left Bracket"); 3104 S = Parser.getTok().getLoc(); 3105 Parser.Lex(); // Eat left bracket token. 3106 3107 const AsmToken &BaseRegTok = Parser.getTok(); 3108 int BaseRegNum = tryParseRegister(); 3109 if (BaseRegNum == -1) 3110 return Error(BaseRegTok.getLoc(), "register expected"); 3111 3112 // The next token must either be a comma or a closing bracket. 3113 const AsmToken &Tok = Parser.getTok(); 3114 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3115 return Error(Tok.getLoc(), "malformed memory operand"); 3116 3117 if (Tok.is(AsmToken::RBrac)) { 3118 E = Tok.getLoc(); 3119 Parser.Lex(); // Eat right bracket token. 3120 3121 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3122 0, 0, false, S, E)); 3123 3124 // If there's a pre-indexing writeback marker, '!', just add it as a token 3125 // operand. It's rather odd, but syntactically valid. 3126 if (Parser.getTok().is(AsmToken::Exclaim)) { 3127 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3128 Parser.Lex(); // Eat the '!'. 3129 } 3130 3131 return false; 3132 } 3133 3134 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3135 Parser.Lex(); // Eat the comma. 3136 3137 // If we have a ':', it's an alignment specifier. 3138 if (Parser.getTok().is(AsmToken::Colon)) { 3139 Parser.Lex(); // Eat the ':'. 3140 E = Parser.getTok().getLoc(); 3141 3142 const MCExpr *Expr; 3143 if (getParser().ParseExpression(Expr)) 3144 return true; 3145 3146 // The expression has to be a constant. Memory references with relocations 3147 // don't come through here, as they use the <label> forms of the relevant 3148 // instructions. 3149 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3150 if (!CE) 3151 return Error (E, "constant expression expected"); 3152 3153 unsigned Align = 0; 3154 switch (CE->getValue()) { 3155 default: 3156 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3157 case 64: Align = 8; break; 3158 case 128: Align = 16; break; 3159 case 256: Align = 32; break; 3160 } 3161 3162 // Now we should have the closing ']' 3163 E = Parser.getTok().getLoc(); 3164 if (Parser.getTok().isNot(AsmToken::RBrac)) 3165 return Error(E, "']' expected"); 3166 Parser.Lex(); // Eat right bracket token. 3167 3168 // Don't worry about range checking the value here. That's handled by 3169 // the is*() predicates. 3170 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3171 ARM_AM::no_shift, 0, Align, 3172 false, S, E)); 3173 3174 // If there's a pre-indexing writeback marker, '!', just add it as a token 3175 // operand. 3176 if (Parser.getTok().is(AsmToken::Exclaim)) { 3177 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3178 Parser.Lex(); // Eat the '!'. 3179 } 3180 3181 return false; 3182 } 3183 3184 // If we have a '#', it's an immediate offset, else assume it's a register 3185 // offset. 3186 if (Parser.getTok().is(AsmToken::Hash)) { 3187 Parser.Lex(); // Eat the '#'. 3188 E = Parser.getTok().getLoc(); 3189 3190 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3191 const MCExpr *Offset; 3192 if (getParser().ParseExpression(Offset)) 3193 return true; 3194 3195 // The expression has to be a constant. Memory references with relocations 3196 // don't come through here, as they use the <label> forms of the relevant 3197 // instructions. 3198 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3199 if (!CE) 3200 return Error (E, "constant expression expected"); 3201 3202 // If the constant was #-0, represent it as INT32_MIN. 3203 int32_t Val = CE->getValue(); 3204 if (isNegative && Val == 0) 3205 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3206 3207 // Now we should have the closing ']' 3208 E = Parser.getTok().getLoc(); 3209 if (Parser.getTok().isNot(AsmToken::RBrac)) 3210 return Error(E, "']' expected"); 3211 Parser.Lex(); // Eat right bracket token. 3212 3213 // Don't worry about range checking the value here. That's handled by 3214 // the is*() predicates. 3215 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3216 ARM_AM::no_shift, 0, 0, 3217 false, S, E)); 3218 3219 // If there's a pre-indexing writeback marker, '!', just add it as a token 3220 // operand. 3221 if (Parser.getTok().is(AsmToken::Exclaim)) { 3222 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3223 Parser.Lex(); // Eat the '!'. 3224 } 3225 3226 return false; 3227 } 3228 3229 // The register offset is optionally preceded by a '+' or '-' 3230 bool isNegative = false; 3231 if (Parser.getTok().is(AsmToken::Minus)) { 3232 isNegative = true; 3233 Parser.Lex(); // Eat the '-'. 3234 } else if (Parser.getTok().is(AsmToken::Plus)) { 3235 // Nothing to do. 3236 Parser.Lex(); // Eat the '+'. 3237 } 3238 3239 E = Parser.getTok().getLoc(); 3240 int OffsetRegNum = tryParseRegister(); 3241 if (OffsetRegNum == -1) 3242 return Error(E, "register expected"); 3243 3244 // If there's a shift operator, handle it. 3245 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3246 unsigned ShiftImm = 0; 3247 if (Parser.getTok().is(AsmToken::Comma)) { 3248 Parser.Lex(); // Eat the ','. 3249 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3250 return true; 3251 } 3252 3253 // Now we should have the closing ']' 3254 E = Parser.getTok().getLoc(); 3255 if (Parser.getTok().isNot(AsmToken::RBrac)) 3256 return Error(E, "']' expected"); 3257 Parser.Lex(); // Eat right bracket token. 3258 3259 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3260 ShiftType, ShiftImm, 0, isNegative, 3261 S, E)); 3262 3263 // If there's a pre-indexing writeback marker, '!', just add it as a token 3264 // operand. 3265 if (Parser.getTok().is(AsmToken::Exclaim)) { 3266 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3267 Parser.Lex(); // Eat the '!'. 3268 } 3269 3270 return false; 3271 } 3272 3273 /// parseMemRegOffsetShift - one of these two: 3274 /// ( lsl | lsr | asr | ror ) , # shift_amount 3275 /// rrx 3276 /// return true if it parses a shift otherwise it returns false. 3277 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3278 unsigned &Amount) { 3279 SMLoc Loc = Parser.getTok().getLoc(); 3280 const AsmToken &Tok = Parser.getTok(); 3281 if (Tok.isNot(AsmToken::Identifier)) 3282 return true; 3283 StringRef ShiftName = Tok.getString(); 3284 if (ShiftName == "lsl" || ShiftName == "LSL") 3285 St = ARM_AM::lsl; 3286 else if (ShiftName == "lsr" || ShiftName == "LSR") 3287 St = ARM_AM::lsr; 3288 else if (ShiftName == "asr" || ShiftName == "ASR") 3289 St = ARM_AM::asr; 3290 else if (ShiftName == "ror" || ShiftName == "ROR") 3291 St = ARM_AM::ror; 3292 else if (ShiftName == "rrx" || ShiftName == "RRX") 3293 St = ARM_AM::rrx; 3294 else 3295 return Error(Loc, "illegal shift operator"); 3296 Parser.Lex(); // Eat shift type token. 3297 3298 // rrx stands alone. 3299 Amount = 0; 3300 if (St != ARM_AM::rrx) { 3301 Loc = Parser.getTok().getLoc(); 3302 // A '#' and a shift amount. 3303 const AsmToken &HashTok = Parser.getTok(); 3304 if (HashTok.isNot(AsmToken::Hash)) 3305 return Error(HashTok.getLoc(), "'#' expected"); 3306 Parser.Lex(); // Eat hash token. 3307 3308 const MCExpr *Expr; 3309 if (getParser().ParseExpression(Expr)) 3310 return true; 3311 // Range check the immediate. 3312 // lsl, ror: 0 <= imm <= 31 3313 // lsr, asr: 0 <= imm <= 32 3314 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3315 if (!CE) 3316 return Error(Loc, "shift amount must be an immediate"); 3317 int64_t Imm = CE->getValue(); 3318 if (Imm < 0 || 3319 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3320 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3321 return Error(Loc, "immediate shift value out of range"); 3322 Amount = Imm; 3323 } 3324 3325 return false; 3326 } 3327 3328 /// parseFPImm - A floating point immediate expression operand. 3329 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3330 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3331 SMLoc S = Parser.getTok().getLoc(); 3332 3333 if (Parser.getTok().isNot(AsmToken::Hash)) 3334 return MatchOperand_NoMatch; 3335 Parser.Lex(); // Eat the '#'. 3336 3337 // Handle negation, as that still comes through as a separate token. 3338 bool isNegative = false; 3339 if (Parser.getTok().is(AsmToken::Minus)) { 3340 isNegative = true; 3341 Parser.Lex(); 3342 } 3343 const AsmToken &Tok = Parser.getTok(); 3344 if (Tok.is(AsmToken::Real)) { 3345 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3346 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3347 // If we had a '-' in front, toggle the sign bit. 3348 IntVal ^= (uint64_t)isNegative << 63; 3349 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3350 Parser.Lex(); // Eat the token. 3351 if (Val == -1) { 3352 TokError("floating point value out of range"); 3353 return MatchOperand_ParseFail; 3354 } 3355 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3356 return MatchOperand_Success; 3357 } 3358 if (Tok.is(AsmToken::Integer)) { 3359 int64_t Val = Tok.getIntVal(); 3360 Parser.Lex(); // Eat the token. 3361 if (Val > 255 || Val < 0) { 3362 TokError("encoded floating point value out of range"); 3363 return MatchOperand_ParseFail; 3364 } 3365 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3366 return MatchOperand_Success; 3367 } 3368 3369 TokError("invalid floating point immediate"); 3370 return MatchOperand_ParseFail; 3371 } 3372 /// Parse a arm instruction operand. For now this parses the operand regardless 3373 /// of the mnemonic. 3374 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3375 StringRef Mnemonic) { 3376 SMLoc S, E; 3377 3378 // Check if the current operand has a custom associated parser, if so, try to 3379 // custom parse the operand, or fallback to the general approach. 3380 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3381 if (ResTy == MatchOperand_Success) 3382 return false; 3383 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3384 // there was a match, but an error occurred, in which case, just return that 3385 // the operand parsing failed. 3386 if (ResTy == MatchOperand_ParseFail) 3387 return true; 3388 3389 switch (getLexer().getKind()) { 3390 default: 3391 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3392 return true; 3393 case AsmToken::Identifier: { 3394 // If this is VMRS, check for the apsr_nzcv operand. 3395 if (!tryParseRegisterWithWriteBack(Operands)) 3396 return false; 3397 int Res = tryParseShiftRegister(Operands); 3398 if (Res == 0) // success 3399 return false; 3400 else if (Res == -1) // irrecoverable error 3401 return true; 3402 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3403 S = Parser.getTok().getLoc(); 3404 Parser.Lex(); 3405 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3406 return false; 3407 } 3408 3409 // Fall though for the Identifier case that is not a register or a 3410 // special name. 3411 } 3412 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3413 case AsmToken::Dot: { // . as a branch target 3414 // This was not a register so parse other operands that start with an 3415 // identifier (like labels) as expressions and create them as immediates. 3416 const MCExpr *IdVal; 3417 S = Parser.getTok().getLoc(); 3418 if (getParser().ParseExpression(IdVal)) 3419 return true; 3420 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3421 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3422 return false; 3423 } 3424 case AsmToken::LBrac: 3425 return parseMemory(Operands); 3426 case AsmToken::LCurly: 3427 return parseRegisterList(Operands); 3428 case AsmToken::Hash: { 3429 // #42 -> immediate. 3430 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3431 S = Parser.getTok().getLoc(); 3432 Parser.Lex(); 3433 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3434 const MCExpr *ImmVal; 3435 if (getParser().ParseExpression(ImmVal)) 3436 return true; 3437 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3438 if (!CE) { 3439 Error(S, "constant expression expected"); 3440 return MatchOperand_ParseFail; 3441 } 3442 int32_t Val = CE->getValue(); 3443 if (isNegative && Val == 0) 3444 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3445 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3446 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3447 return false; 3448 } 3449 case AsmToken::Colon: { 3450 // ":lower16:" and ":upper16:" expression prefixes 3451 // FIXME: Check it's an expression prefix, 3452 // e.g. (FOO - :lower16:BAR) isn't legal. 3453 ARMMCExpr::VariantKind RefKind; 3454 if (parsePrefix(RefKind)) 3455 return true; 3456 3457 const MCExpr *SubExprVal; 3458 if (getParser().ParseExpression(SubExprVal)) 3459 return true; 3460 3461 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3462 getContext()); 3463 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3464 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3465 return false; 3466 } 3467 } 3468 } 3469 3470 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3471 // :lower16: and :upper16:. 3472 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3473 RefKind = ARMMCExpr::VK_ARM_None; 3474 3475 // :lower16: and :upper16: modifiers 3476 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3477 Parser.Lex(); // Eat ':' 3478 3479 if (getLexer().isNot(AsmToken::Identifier)) { 3480 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3481 return true; 3482 } 3483 3484 StringRef IDVal = Parser.getTok().getIdentifier(); 3485 if (IDVal == "lower16") { 3486 RefKind = ARMMCExpr::VK_ARM_LO16; 3487 } else if (IDVal == "upper16") { 3488 RefKind = ARMMCExpr::VK_ARM_HI16; 3489 } else { 3490 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3491 return true; 3492 } 3493 Parser.Lex(); 3494 3495 if (getLexer().isNot(AsmToken::Colon)) { 3496 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3497 return true; 3498 } 3499 Parser.Lex(); // Eat the last ':' 3500 return false; 3501 } 3502 3503 /// \brief Given a mnemonic, split out possible predication code and carry 3504 /// setting letters to form a canonical mnemonic and flags. 3505 // 3506 // FIXME: Would be nice to autogen this. 3507 // FIXME: This is a bit of a maze of special cases. 3508 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3509 unsigned &PredicationCode, 3510 bool &CarrySetting, 3511 unsigned &ProcessorIMod, 3512 StringRef &ITMask) { 3513 PredicationCode = ARMCC::AL; 3514 CarrySetting = false; 3515 ProcessorIMod = 0; 3516 3517 // Ignore some mnemonics we know aren't predicated forms. 3518 // 3519 // FIXME: Would be nice to autogen this. 3520 if ((Mnemonic == "movs" && isThumb()) || 3521 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3522 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3523 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3524 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3525 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3526 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3527 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3528 return Mnemonic; 3529 3530 // First, split out any predication code. Ignore mnemonics we know aren't 3531 // predicated but do have a carry-set and so weren't caught above. 3532 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3533 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3534 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3535 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3536 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3537 .Case("eq", ARMCC::EQ) 3538 .Case("ne", ARMCC::NE) 3539 .Case("hs", ARMCC::HS) 3540 .Case("cs", ARMCC::HS) 3541 .Case("lo", ARMCC::LO) 3542 .Case("cc", ARMCC::LO) 3543 .Case("mi", ARMCC::MI) 3544 .Case("pl", ARMCC::PL) 3545 .Case("vs", ARMCC::VS) 3546 .Case("vc", ARMCC::VC) 3547 .Case("hi", ARMCC::HI) 3548 .Case("ls", ARMCC::LS) 3549 .Case("ge", ARMCC::GE) 3550 .Case("lt", ARMCC::LT) 3551 .Case("gt", ARMCC::GT) 3552 .Case("le", ARMCC::LE) 3553 .Case("al", ARMCC::AL) 3554 .Default(~0U); 3555 if (CC != ~0U) { 3556 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3557 PredicationCode = CC; 3558 } 3559 } 3560 3561 // Next, determine if we have a carry setting bit. We explicitly ignore all 3562 // the instructions we know end in 's'. 3563 if (Mnemonic.endswith("s") && 3564 !(Mnemonic == "cps" || Mnemonic == "mls" || 3565 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3566 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3567 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3568 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3569 (Mnemonic == "movs" && isThumb()))) { 3570 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3571 CarrySetting = true; 3572 } 3573 3574 // The "cps" instruction can have a interrupt mode operand which is glued into 3575 // the mnemonic. Check if this is the case, split it and parse the imod op 3576 if (Mnemonic.startswith("cps")) { 3577 // Split out any imod code. 3578 unsigned IMod = 3579 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3580 .Case("ie", ARM_PROC::IE) 3581 .Case("id", ARM_PROC::ID) 3582 .Default(~0U); 3583 if (IMod != ~0U) { 3584 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3585 ProcessorIMod = IMod; 3586 } 3587 } 3588 3589 // The "it" instruction has the condition mask on the end of the mnemonic. 3590 if (Mnemonic.startswith("it")) { 3591 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3592 Mnemonic = Mnemonic.slice(0, 2); 3593 } 3594 3595 return Mnemonic; 3596 } 3597 3598 /// \brief Given a canonical mnemonic, determine if the instruction ever allows 3599 /// inclusion of carry set or predication code operands. 3600 // 3601 // FIXME: It would be nice to autogen this. 3602 void ARMAsmParser:: 3603 getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3604 bool &CanAcceptPredicationCode) { 3605 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3606 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3607 Mnemonic == "add" || Mnemonic == "adc" || 3608 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3609 Mnemonic == "orr" || Mnemonic == "mvn" || 3610 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3611 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3612 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3613 Mnemonic == "mla" || Mnemonic == "smlal" || 3614 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3615 CanAcceptCarrySet = true; 3616 } else 3617 CanAcceptCarrySet = false; 3618 3619 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3620 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3621 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3622 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3623 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3624 (Mnemonic == "clrex" && !isThumb()) || 3625 (Mnemonic == "nop" && isThumbOne()) || 3626 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3627 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3628 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3629 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3630 !isThumb()) || 3631 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3632 CanAcceptPredicationCode = false; 3633 } else 3634 CanAcceptPredicationCode = true; 3635 3636 if (isThumb()) { 3637 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3638 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3639 CanAcceptPredicationCode = false; 3640 } 3641 } 3642 3643 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3644 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3645 // FIXME: This is all horribly hacky. We really need a better way to deal 3646 // with optional operands like this in the matcher table. 3647 3648 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3649 // another does not. Specifically, the MOVW instruction does not. So we 3650 // special case it here and remove the defaulted (non-setting) cc_out 3651 // operand if that's the instruction we're trying to match. 3652 // 3653 // We do this as post-processing of the explicit operands rather than just 3654 // conditionally adding the cc_out in the first place because we need 3655 // to check the type of the parsed immediate operand. 3656 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3657 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3658 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3659 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3660 return true; 3661 3662 // Register-register 'add' for thumb does not have a cc_out operand 3663 // when there are only two register operands. 3664 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3665 static_cast<ARMOperand*>(Operands[3])->isReg() && 3666 static_cast<ARMOperand*>(Operands[4])->isReg() && 3667 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3668 return true; 3669 // Register-register 'add' for thumb does not have a cc_out operand 3670 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3671 // have to check the immediate range here since Thumb2 has a variant 3672 // that can handle a different range and has a cc_out operand. 3673 if (((isThumb() && Mnemonic == "add") || 3674 (isThumbTwo() && Mnemonic == "sub")) && 3675 Operands.size() == 6 && 3676 static_cast<ARMOperand*>(Operands[3])->isReg() && 3677 static_cast<ARMOperand*>(Operands[4])->isReg() && 3678 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3679 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3680 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3681 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3682 return true; 3683 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3684 // imm0_4095 variant. That's the least-preferred variant when 3685 // selecting via the generic "add" mnemonic, so to know that we 3686 // should remove the cc_out operand, we have to explicitly check that 3687 // it's not one of the other variants. Ugh. 3688 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3689 Operands.size() == 6 && 3690 static_cast<ARMOperand*>(Operands[3])->isReg() && 3691 static_cast<ARMOperand*>(Operands[4])->isReg() && 3692 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3693 // Nest conditions rather than one big 'if' statement for readability. 3694 // 3695 // If either register is a high reg, it's either one of the SP 3696 // variants (handled above) or a 32-bit encoding, so we just 3697 // check against T3. 3698 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3699 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3700 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3701 return false; 3702 // If both registers are low, we're in an IT block, and the immediate is 3703 // in range, we should use encoding T1 instead, which has a cc_out. 3704 if (inITBlock() && 3705 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3706 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3707 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3708 return false; 3709 3710 // Otherwise, we use encoding T4, which does not have a cc_out 3711 // operand. 3712 return true; 3713 } 3714 3715 // The thumb2 multiply instruction doesn't have a CCOut register, so 3716 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3717 // use the 16-bit encoding or not. 3718 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3719 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3720 static_cast<ARMOperand*>(Operands[3])->isReg() && 3721 static_cast<ARMOperand*>(Operands[4])->isReg() && 3722 static_cast<ARMOperand*>(Operands[5])->isReg() && 3723 // If the registers aren't low regs, the destination reg isn't the 3724 // same as one of the source regs, or the cc_out operand is zero 3725 // outside of an IT block, we have to use the 32-bit encoding, so 3726 // remove the cc_out operand. 3727 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3728 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 3729 !inITBlock() || 3730 (static_cast<ARMOperand*>(Operands[3])->getReg() != 3731 static_cast<ARMOperand*>(Operands[5])->getReg() && 3732 static_cast<ARMOperand*>(Operands[3])->getReg() != 3733 static_cast<ARMOperand*>(Operands[4])->getReg()))) 3734 return true; 3735 3736 3737 3738 // Register-register 'add/sub' for thumb does not have a cc_out operand 3739 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 3740 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 3741 // right, this will result in better diagnostics (which operand is off) 3742 // anyway. 3743 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 3744 (Operands.size() == 5 || Operands.size() == 6) && 3745 static_cast<ARMOperand*>(Operands[3])->isReg() && 3746 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 3747 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3748 return true; 3749 3750 return false; 3751 } 3752 3753 /// Parse an arm instruction mnemonic followed by its operands. 3754 bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 3755 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3756 // Create the leading tokens for the mnemonic, split by '.' characters. 3757 size_t Start = 0, Next = Name.find('.'); 3758 StringRef Mnemonic = Name.slice(Start, Next); 3759 3760 // Split out the predication code and carry setting flag from the mnemonic. 3761 unsigned PredicationCode; 3762 unsigned ProcessorIMod; 3763 bool CarrySetting; 3764 StringRef ITMask; 3765 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 3766 ProcessorIMod, ITMask); 3767 3768 // In Thumb1, only the branch (B) instruction can be predicated. 3769 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 3770 Parser.EatToEndOfStatement(); 3771 return Error(NameLoc, "conditional execution not supported in Thumb1"); 3772 } 3773 3774 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 3775 3776 // Handle the IT instruction ITMask. Convert it to a bitmask. This 3777 // is the mask as it will be for the IT encoding if the conditional 3778 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 3779 // where the conditional bit0 is zero, the instruction post-processing 3780 // will adjust the mask accordingly. 3781 if (Mnemonic == "it") { 3782 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 3783 if (ITMask.size() > 3) { 3784 Parser.EatToEndOfStatement(); 3785 return Error(Loc, "too many conditions on IT instruction"); 3786 } 3787 unsigned Mask = 8; 3788 for (unsigned i = ITMask.size(); i != 0; --i) { 3789 char pos = ITMask[i - 1]; 3790 if (pos != 't' && pos != 'e') { 3791 Parser.EatToEndOfStatement(); 3792 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 3793 } 3794 Mask >>= 1; 3795 if (ITMask[i - 1] == 't') 3796 Mask |= 8; 3797 } 3798 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 3799 } 3800 3801 // FIXME: This is all a pretty gross hack. We should automatically handle 3802 // optional operands like this via tblgen. 3803 3804 // Next, add the CCOut and ConditionCode operands, if needed. 3805 // 3806 // For mnemonics which can ever incorporate a carry setting bit or predication 3807 // code, our matching model involves us always generating CCOut and 3808 // ConditionCode operands to match the mnemonic "as written" and then we let 3809 // the matcher deal with finding the right instruction or generating an 3810 // appropriate error. 3811 bool CanAcceptCarrySet, CanAcceptPredicationCode; 3812 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 3813 3814 // If we had a carry-set on an instruction that can't do that, issue an 3815 // error. 3816 if (!CanAcceptCarrySet && CarrySetting) { 3817 Parser.EatToEndOfStatement(); 3818 return Error(NameLoc, "instruction '" + Mnemonic + 3819 "' can not set flags, but 's' suffix specified"); 3820 } 3821 // If we had a predication code on an instruction that can't do that, issue an 3822 // error. 3823 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 3824 Parser.EatToEndOfStatement(); 3825 return Error(NameLoc, "instruction '" + Mnemonic + 3826 "' is not predicable, but condition code specified"); 3827 } 3828 3829 // Add the carry setting operand, if necessary. 3830 if (CanAcceptCarrySet) { 3831 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 3832 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 3833 Loc)); 3834 } 3835 3836 // Add the predication code operand, if necessary. 3837 if (CanAcceptPredicationCode) { 3838 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 3839 CarrySetting); 3840 Operands.push_back(ARMOperand::CreateCondCode( 3841 ARMCC::CondCodes(PredicationCode), Loc)); 3842 } 3843 3844 // Add the processor imod operand, if necessary. 3845 if (ProcessorIMod) { 3846 Operands.push_back(ARMOperand::CreateImm( 3847 MCConstantExpr::Create(ProcessorIMod, getContext()), 3848 NameLoc, NameLoc)); 3849 } 3850 3851 // Add the remaining tokens in the mnemonic. 3852 while (Next != StringRef::npos) { 3853 Start = Next; 3854 Next = Name.find('.', Start + 1); 3855 StringRef ExtraToken = Name.slice(Start, Next); 3856 3857 // For now, we're only parsing Thumb1 (for the most part), so 3858 // just ignore ".n" qualifiers. We'll use them to restrict 3859 // matching when we do Thumb2. 3860 if (ExtraToken != ".n") { 3861 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 3862 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 3863 } 3864 } 3865 3866 // Read the remaining operands. 3867 if (getLexer().isNot(AsmToken::EndOfStatement)) { 3868 // Read the first operand. 3869 if (parseOperand(Operands, Mnemonic)) { 3870 Parser.EatToEndOfStatement(); 3871 return true; 3872 } 3873 3874 while (getLexer().is(AsmToken::Comma)) { 3875 Parser.Lex(); // Eat the comma. 3876 3877 // Parse and remember the operand. 3878 if (parseOperand(Operands, Mnemonic)) { 3879 Parser.EatToEndOfStatement(); 3880 return true; 3881 } 3882 } 3883 } 3884 3885 if (getLexer().isNot(AsmToken::EndOfStatement)) { 3886 SMLoc Loc = getLexer().getLoc(); 3887 Parser.EatToEndOfStatement(); 3888 return Error(Loc, "unexpected token in argument list"); 3889 } 3890 3891 Parser.Lex(); // Consume the EndOfStatement 3892 3893 // Some instructions, mostly Thumb, have forms for the same mnemonic that 3894 // do and don't have a cc_out optional-def operand. With some spot-checks 3895 // of the operand list, we can figure out which variant we're trying to 3896 // parse and adjust accordingly before actually matching. We shouldn't ever 3897 // try to remove a cc_out operand that was explicitly set on the the 3898 // mnemonic, of course (CarrySetting == true). Reason number #317 the 3899 // table driven matcher doesn't fit well with the ARM instruction set. 3900 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 3901 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 3902 Operands.erase(Operands.begin() + 1); 3903 delete Op; 3904 } 3905 3906 // ARM mode 'blx' need special handling, as the register operand version 3907 // is predicable, but the label operand version is not. So, we can't rely 3908 // on the Mnemonic based checking to correctly figure out when to put 3909 // a k_CondCode operand in the list. If we're trying to match the label 3910 // version, remove the k_CondCode operand here. 3911 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 3912 static_cast<ARMOperand*>(Operands[2])->isImm()) { 3913 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 3914 Operands.erase(Operands.begin() + 1); 3915 delete Op; 3916 } 3917 3918 // The vector-compare-to-zero instructions have a literal token "#0" at 3919 // the end that comes to here as an immediate operand. Convert it to a 3920 // token to play nicely with the matcher. 3921 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 3922 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 3923 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3924 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 3925 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3926 if (CE && CE->getValue() == 0) { 3927 Operands.erase(Operands.begin() + 5); 3928 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3929 delete Op; 3930 } 3931 } 3932 // VCMP{E} does the same thing, but with a different operand count. 3933 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 3934 static_cast<ARMOperand*>(Operands[4])->isImm()) { 3935 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 3936 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3937 if (CE && CE->getValue() == 0) { 3938 Operands.erase(Operands.begin() + 4); 3939 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3940 delete Op; 3941 } 3942 } 3943 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 3944 // end. Convert it to a token here. 3945 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 3946 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3947 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 3948 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 3949 if (CE && CE->getValue() == 0) { 3950 Operands.erase(Operands.begin() + 5); 3951 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 3952 delete Op; 3953 } 3954 } 3955 3956 return false; 3957 } 3958 3959 // Validate context-sensitive operand constraints. 3960 3961 // return 'true' if register list contains non-low GPR registers, 3962 // 'false' otherwise. If Reg is in the register list or is HiReg, set 3963 // 'containsReg' to true. 3964 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 3965 unsigned HiReg, bool &containsReg) { 3966 containsReg = false; 3967 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 3968 unsigned OpReg = Inst.getOperand(i).getReg(); 3969 if (OpReg == Reg) 3970 containsReg = true; 3971 // Anything other than a low register isn't legal here. 3972 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 3973 return true; 3974 } 3975 return false; 3976 } 3977 3978 // Check if the specified regisgter is in the register list of the inst, 3979 // starting at the indicated operand number. 3980 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 3981 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 3982 unsigned OpReg = Inst.getOperand(i).getReg(); 3983 if (OpReg == Reg) 3984 return true; 3985 } 3986 return false; 3987 } 3988 3989 // FIXME: We would really prefer to have MCInstrInfo (the wrapper around 3990 // the ARMInsts array) instead. Getting that here requires awkward 3991 // API changes, though. Better way? 3992 namespace llvm { 3993 extern MCInstrDesc ARMInsts[]; 3994 } 3995 static MCInstrDesc &getInstDesc(unsigned Opcode) { 3996 return ARMInsts[Opcode]; 3997 } 3998 3999 // FIXME: We would really like to be able to tablegen'erate this. 4000 bool ARMAsmParser:: 4001 validateInstruction(MCInst &Inst, 4002 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4003 MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4004 SMLoc Loc = Operands[0]->getStartLoc(); 4005 // Check the IT block state first. 4006 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4007 // being allowed in IT blocks, but not being predicable. It just always 4008 // executes. 4009 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4010 unsigned bit = 1; 4011 if (ITState.FirstCond) 4012 ITState.FirstCond = false; 4013 else 4014 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4015 // The instruction must be predicable. 4016 if (!MCID.isPredicable()) 4017 return Error(Loc, "instructions in IT block must be predicable"); 4018 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4019 unsigned ITCond = bit ? ITState.Cond : 4020 ARMCC::getOppositeCondition(ITState.Cond); 4021 if (Cond != ITCond) { 4022 // Find the condition code Operand to get its SMLoc information. 4023 SMLoc CondLoc; 4024 for (unsigned i = 1; i < Operands.size(); ++i) 4025 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4026 CondLoc = Operands[i]->getStartLoc(); 4027 return Error(CondLoc, "incorrect condition in IT block; got '" + 4028 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4029 "', but expected '" + 4030 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4031 } 4032 // Check for non-'al' condition codes outside of the IT block. 4033 } else if (isThumbTwo() && MCID.isPredicable() && 4034 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4035 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4036 Inst.getOpcode() != ARM::t2B) 4037 return Error(Loc, "predicated instructions must be in IT block"); 4038 4039 switch (Inst.getOpcode()) { 4040 case ARM::LDRD: 4041 case ARM::LDRD_PRE: 4042 case ARM::LDRD_POST: 4043 case ARM::LDREXD: { 4044 // Rt2 must be Rt + 1. 4045 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4046 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4047 if (Rt2 != Rt + 1) 4048 return Error(Operands[3]->getStartLoc(), 4049 "destination operands must be sequential"); 4050 return false; 4051 } 4052 case ARM::STRD: { 4053 // Rt2 must be Rt + 1. 4054 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4055 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4056 if (Rt2 != Rt + 1) 4057 return Error(Operands[3]->getStartLoc(), 4058 "source operands must be sequential"); 4059 return false; 4060 } 4061 case ARM::STRD_PRE: 4062 case ARM::STRD_POST: 4063 case ARM::STREXD: { 4064 // Rt2 must be Rt + 1. 4065 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4066 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4067 if (Rt2 != Rt + 1) 4068 return Error(Operands[3]->getStartLoc(), 4069 "source operands must be sequential"); 4070 return false; 4071 } 4072 case ARM::SBFX: 4073 case ARM::UBFX: { 4074 // width must be in range [1, 32-lsb] 4075 unsigned lsb = Inst.getOperand(2).getImm(); 4076 unsigned widthm1 = Inst.getOperand(3).getImm(); 4077 if (widthm1 >= 32 - lsb) 4078 return Error(Operands[5]->getStartLoc(), 4079 "bitfield width must be in range [1,32-lsb]"); 4080 return false; 4081 } 4082 case ARM::tLDMIA: { 4083 // If we're parsing Thumb2, the .w variant is available and handles 4084 // most cases that are normally illegal for a Thumb1 LDM 4085 // instruction. We'll make the transformation in processInstruction() 4086 // if necessary. 4087 // 4088 // Thumb LDM instructions are writeback iff the base register is not 4089 // in the register list. 4090 unsigned Rn = Inst.getOperand(0).getReg(); 4091 bool hasWritebackToken = 4092 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4093 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4094 bool listContainsBase; 4095 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4096 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4097 "registers must be in range r0-r7"); 4098 // If we should have writeback, then there should be a '!' token. 4099 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4100 return Error(Operands[2]->getStartLoc(), 4101 "writeback operator '!' expected"); 4102 // If we should not have writeback, there must not be a '!'. This is 4103 // true even for the 32-bit wide encodings. 4104 if (listContainsBase && hasWritebackToken) 4105 return Error(Operands[3]->getStartLoc(), 4106 "writeback operator '!' not allowed when base register " 4107 "in register list"); 4108 4109 break; 4110 } 4111 case ARM::t2LDMIA_UPD: { 4112 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4113 return Error(Operands[4]->getStartLoc(), 4114 "writeback operator '!' not allowed when base register " 4115 "in register list"); 4116 break; 4117 } 4118 case ARM::tPOP: { 4119 bool listContainsBase; 4120 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4121 return Error(Operands[2]->getStartLoc(), 4122 "registers must be in range r0-r7 or pc"); 4123 break; 4124 } 4125 case ARM::tPUSH: { 4126 bool listContainsBase; 4127 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4128 return Error(Operands[2]->getStartLoc(), 4129 "registers must be in range r0-r7 or lr"); 4130 break; 4131 } 4132 case ARM::tSTMIA_UPD: { 4133 bool listContainsBase; 4134 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4135 return Error(Operands[4]->getStartLoc(), 4136 "registers must be in range r0-r7"); 4137 break; 4138 } 4139 } 4140 4141 return false; 4142 } 4143 4144 void ARMAsmParser:: 4145 processInstruction(MCInst &Inst, 4146 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4147 switch (Inst.getOpcode()) { 4148 case ARM::LDMIA_UPD: 4149 // If this is a load of a single register via a 'pop', then we should use 4150 // a post-indexed LDR instruction instead, per the ARM ARM. 4151 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4152 Inst.getNumOperands() == 5) { 4153 MCInst TmpInst; 4154 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4155 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4156 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4157 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4158 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4159 TmpInst.addOperand(MCOperand::CreateImm(4)); 4160 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4161 TmpInst.addOperand(Inst.getOperand(3)); 4162 Inst = TmpInst; 4163 } 4164 break; 4165 case ARM::STMDB_UPD: 4166 // If this is a store of a single register via a 'push', then we should use 4167 // a pre-indexed STR instruction instead, per the ARM ARM. 4168 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4169 Inst.getNumOperands() == 5) { 4170 MCInst TmpInst; 4171 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4172 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4173 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4174 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4175 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4176 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4177 TmpInst.addOperand(Inst.getOperand(3)); 4178 Inst = TmpInst; 4179 } 4180 break; 4181 case ARM::tADDi8: 4182 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4183 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4184 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4185 // to encoding T1 if <Rd> is omitted." 4186 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4187 Inst.setOpcode(ARM::tADDi3); 4188 break; 4189 case ARM::tSUBi8: 4190 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4191 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4192 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4193 // to encoding T1 if <Rd> is omitted." 4194 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4195 Inst.setOpcode(ARM::tSUBi3); 4196 break; 4197 case ARM::tB: 4198 // A Thumb conditional branch outside of an IT block is a tBcc. 4199 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4200 Inst.setOpcode(ARM::tBcc); 4201 break; 4202 case ARM::t2B: 4203 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4204 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4205 Inst.setOpcode(ARM::t2Bcc); 4206 break; 4207 case ARM::t2Bcc: 4208 // If the conditional is AL or we're in an IT block, we really want t2B. 4209 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4210 Inst.setOpcode(ARM::t2B); 4211 break; 4212 case ARM::tBcc: 4213 // If the conditional is AL, we really want tB. 4214 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4215 Inst.setOpcode(ARM::tB); 4216 break; 4217 case ARM::tLDMIA: { 4218 // If the register list contains any high registers, or if the writeback 4219 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4220 // instead if we're in Thumb2. Otherwise, this should have generated 4221 // an error in validateInstruction(). 4222 unsigned Rn = Inst.getOperand(0).getReg(); 4223 bool hasWritebackToken = 4224 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4225 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4226 bool listContainsBase; 4227 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4228 (!listContainsBase && !hasWritebackToken) || 4229 (listContainsBase && hasWritebackToken)) { 4230 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4231 assert (isThumbTwo()); 4232 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4233 // If we're switching to the updating version, we need to insert 4234 // the writeback tied operand. 4235 if (hasWritebackToken) 4236 Inst.insert(Inst.begin(), 4237 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4238 } 4239 break; 4240 } 4241 case ARM::tSTMIA_UPD: { 4242 // If the register list contains any high registers, we need to use 4243 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4244 // should have generated an error in validateInstruction(). 4245 unsigned Rn = Inst.getOperand(0).getReg(); 4246 bool listContainsBase; 4247 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4248 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4249 assert (isThumbTwo()); 4250 Inst.setOpcode(ARM::t2STMIA_UPD); 4251 } 4252 break; 4253 } 4254 case ARM::t2MOVi: { 4255 // If we can use the 16-bit encoding and the user didn't explicitly 4256 // request the 32-bit variant, transform it here. 4257 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4258 Inst.getOperand(1).getImm() <= 255 && 4259 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4260 Inst.getOperand(4).getReg() == ARM::CPSR) || 4261 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4262 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4263 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4264 // The operands aren't in the same order for tMOVi8... 4265 MCInst TmpInst; 4266 TmpInst.setOpcode(ARM::tMOVi8); 4267 TmpInst.addOperand(Inst.getOperand(0)); 4268 TmpInst.addOperand(Inst.getOperand(4)); 4269 TmpInst.addOperand(Inst.getOperand(1)); 4270 TmpInst.addOperand(Inst.getOperand(2)); 4271 TmpInst.addOperand(Inst.getOperand(3)); 4272 Inst = TmpInst; 4273 } 4274 break; 4275 } 4276 case ARM::t2MOVr: { 4277 // If we can use the 16-bit encoding and the user didn't explicitly 4278 // request the 32-bit variant, transform it here. 4279 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4280 isARMLowRegister(Inst.getOperand(1).getReg()) && 4281 Inst.getOperand(2).getImm() == ARMCC::AL && 4282 Inst.getOperand(4).getReg() == ARM::CPSR && 4283 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4284 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4285 // The operands aren't the same for tMOV[S]r... (no cc_out) 4286 MCInst TmpInst; 4287 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4288 TmpInst.addOperand(Inst.getOperand(0)); 4289 TmpInst.addOperand(Inst.getOperand(1)); 4290 TmpInst.addOperand(Inst.getOperand(2)); 4291 TmpInst.addOperand(Inst.getOperand(3)); 4292 Inst = TmpInst; 4293 } 4294 break; 4295 } 4296 case ARM::t2SXTH: 4297 case ARM::t2SXTB: 4298 case ARM::t2UXTH: 4299 case ARM::t2UXTB: { 4300 // If we can use the 16-bit encoding and the user didn't explicitly 4301 // request the 32-bit variant, transform it here. 4302 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4303 isARMLowRegister(Inst.getOperand(1).getReg()) && 4304 Inst.getOperand(2).getImm() == 0 && 4305 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4306 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4307 unsigned NewOpc; 4308 switch (Inst.getOpcode()) { 4309 default: llvm_unreachable("Illegal opcode!"); 4310 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4311 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4312 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4313 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4314 } 4315 // The operands aren't the same for thumb1 (no rotate operand). 4316 MCInst TmpInst; 4317 TmpInst.setOpcode(NewOpc); 4318 TmpInst.addOperand(Inst.getOperand(0)); 4319 TmpInst.addOperand(Inst.getOperand(1)); 4320 TmpInst.addOperand(Inst.getOperand(3)); 4321 TmpInst.addOperand(Inst.getOperand(4)); 4322 Inst = TmpInst; 4323 } 4324 break; 4325 } 4326 case ARM::t2IT: { 4327 // The mask bits for all but the first condition are represented as 4328 // the low bit of the condition code value implies 't'. We currently 4329 // always have 1 implies 't', so XOR toggle the bits if the low bit 4330 // of the condition code is zero. The encoding also expects the low 4331 // bit of the condition to be encoded as bit 4 of the mask operand, 4332 // so mask that in if needed 4333 MCOperand &MO = Inst.getOperand(1); 4334 unsigned Mask = MO.getImm(); 4335 unsigned OrigMask = Mask; 4336 unsigned TZ = CountTrailingZeros_32(Mask); 4337 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4338 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4339 for (unsigned i = 3; i != TZ; --i) 4340 Mask ^= 1 << i; 4341 } else 4342 Mask |= 0x10; 4343 MO.setImm(Mask); 4344 4345 // Set up the IT block state according to the IT instruction we just 4346 // matched. 4347 assert(!inITBlock() && "nested IT blocks?!"); 4348 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4349 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4350 ITState.CurPosition = 0; 4351 ITState.FirstCond = true; 4352 break; 4353 } 4354 } 4355 } 4356 4357 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4358 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4359 // suffix depending on whether they're in an IT block or not. 4360 unsigned Opc = Inst.getOpcode(); 4361 MCInstrDesc &MCID = getInstDesc(Opc); 4362 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4363 assert(MCID.hasOptionalDef() && 4364 "optionally flag setting instruction missing optional def operand"); 4365 assert(MCID.NumOperands == Inst.getNumOperands() && 4366 "operand count mismatch!"); 4367 // Find the optional-def operand (cc_out). 4368 unsigned OpNo; 4369 for (OpNo = 0; 4370 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4371 ++OpNo) 4372 ; 4373 // If we're parsing Thumb1, reject it completely. 4374 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4375 return Match_MnemonicFail; 4376 // If we're parsing Thumb2, which form is legal depends on whether we're 4377 // in an IT block. 4378 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4379 !inITBlock()) 4380 return Match_RequiresITBlock; 4381 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4382 inITBlock()) 4383 return Match_RequiresNotITBlock; 4384 } 4385 // Some high-register supporting Thumb1 encodings only allow both registers 4386 // to be from r0-r7 when in Thumb2. 4387 else if (Opc == ARM::tADDhirr && isThumbOne() && 4388 isARMLowRegister(Inst.getOperand(1).getReg()) && 4389 isARMLowRegister(Inst.getOperand(2).getReg())) 4390 return Match_RequiresThumb2; 4391 // Others only require ARMv6 or later. 4392 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4393 isARMLowRegister(Inst.getOperand(0).getReg()) && 4394 isARMLowRegister(Inst.getOperand(1).getReg())) 4395 return Match_RequiresV6; 4396 return Match_Success; 4397 } 4398 4399 bool ARMAsmParser:: 4400 MatchAndEmitInstruction(SMLoc IDLoc, 4401 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4402 MCStreamer &Out) { 4403 MCInst Inst; 4404 unsigned ErrorInfo; 4405 unsigned MatchResult; 4406 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4407 switch (MatchResult) { 4408 default: break; 4409 case Match_Success: 4410 // Context sensitive operand constraints aren't handled by the matcher, 4411 // so check them here. 4412 if (validateInstruction(Inst, Operands)) { 4413 // Still progress the IT block, otherwise one wrong condition causes 4414 // nasty cascading errors. 4415 forwardITPosition(); 4416 return true; 4417 } 4418 4419 // Some instructions need post-processing to, for example, tweak which 4420 // encoding is selected. 4421 processInstruction(Inst, Operands); 4422 4423 // Only move forward at the very end so that everything in validate 4424 // and process gets a consistent answer about whether we're in an IT 4425 // block. 4426 forwardITPosition(); 4427 4428 Out.EmitInstruction(Inst); 4429 return false; 4430 case Match_MissingFeature: 4431 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4432 return true; 4433 case Match_InvalidOperand: { 4434 SMLoc ErrorLoc = IDLoc; 4435 if (ErrorInfo != ~0U) { 4436 if (ErrorInfo >= Operands.size()) 4437 return Error(IDLoc, "too few operands for instruction"); 4438 4439 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4440 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4441 } 4442 4443 return Error(ErrorLoc, "invalid operand for instruction"); 4444 } 4445 case Match_MnemonicFail: 4446 return Error(IDLoc, "invalid instruction"); 4447 case Match_ConversionFail: 4448 // The converter function will have already emited a diagnostic. 4449 return true; 4450 case Match_RequiresNotITBlock: 4451 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4452 case Match_RequiresITBlock: 4453 return Error(IDLoc, "instruction only valid inside IT block"); 4454 case Match_RequiresV6: 4455 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4456 case Match_RequiresThumb2: 4457 return Error(IDLoc, "instruction variant requires Thumb2"); 4458 } 4459 4460 llvm_unreachable("Implement any new match types added!"); 4461 return true; 4462 } 4463 4464 /// parseDirective parses the arm specific directives 4465 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4466 StringRef IDVal = DirectiveID.getIdentifier(); 4467 if (IDVal == ".word") 4468 return parseDirectiveWord(4, DirectiveID.getLoc()); 4469 else if (IDVal == ".thumb") 4470 return parseDirectiveThumb(DirectiveID.getLoc()); 4471 else if (IDVal == ".thumb_func") 4472 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4473 else if (IDVal == ".code") 4474 return parseDirectiveCode(DirectiveID.getLoc()); 4475 else if (IDVal == ".syntax") 4476 return parseDirectiveSyntax(DirectiveID.getLoc()); 4477 return true; 4478 } 4479 4480 /// parseDirectiveWord 4481 /// ::= .word [ expression (, expression)* ] 4482 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4483 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4484 for (;;) { 4485 const MCExpr *Value; 4486 if (getParser().ParseExpression(Value)) 4487 return true; 4488 4489 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4490 4491 if (getLexer().is(AsmToken::EndOfStatement)) 4492 break; 4493 4494 // FIXME: Improve diagnostic. 4495 if (getLexer().isNot(AsmToken::Comma)) 4496 return Error(L, "unexpected token in directive"); 4497 Parser.Lex(); 4498 } 4499 } 4500 4501 Parser.Lex(); 4502 return false; 4503 } 4504 4505 /// parseDirectiveThumb 4506 /// ::= .thumb 4507 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4508 if (getLexer().isNot(AsmToken::EndOfStatement)) 4509 return Error(L, "unexpected token in directive"); 4510 Parser.Lex(); 4511 4512 // TODO: set thumb mode 4513 // TODO: tell the MC streamer the mode 4514 // getParser().getStreamer().Emit???(); 4515 return false; 4516 } 4517 4518 /// parseDirectiveThumbFunc 4519 /// ::= .thumbfunc symbol_name 4520 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4521 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4522 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4523 StringRef Name; 4524 4525 // Darwin asm has function name after .thumb_func direction 4526 // ELF doesn't 4527 if (isMachO) { 4528 const AsmToken &Tok = Parser.getTok(); 4529 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4530 return Error(L, "unexpected token in .thumb_func directive"); 4531 Name = Tok.getString(); 4532 Parser.Lex(); // Consume the identifier token. 4533 } 4534 4535 if (getLexer().isNot(AsmToken::EndOfStatement)) 4536 return Error(L, "unexpected token in directive"); 4537 Parser.Lex(); 4538 4539 // FIXME: assuming function name will be the line following .thumb_func 4540 if (!isMachO) { 4541 Name = Parser.getTok().getString(); 4542 } 4543 4544 // Mark symbol as a thumb symbol. 4545 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4546 getParser().getStreamer().EmitThumbFunc(Func); 4547 return false; 4548 } 4549 4550 /// parseDirectiveSyntax 4551 /// ::= .syntax unified | divided 4552 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4553 const AsmToken &Tok = Parser.getTok(); 4554 if (Tok.isNot(AsmToken::Identifier)) 4555 return Error(L, "unexpected token in .syntax directive"); 4556 StringRef Mode = Tok.getString(); 4557 if (Mode == "unified" || Mode == "UNIFIED") 4558 Parser.Lex(); 4559 else if (Mode == "divided" || Mode == "DIVIDED") 4560 return Error(L, "'.syntax divided' arm asssembly not supported"); 4561 else 4562 return Error(L, "unrecognized syntax mode in .syntax directive"); 4563 4564 if (getLexer().isNot(AsmToken::EndOfStatement)) 4565 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4566 Parser.Lex(); 4567 4568 // TODO tell the MC streamer the mode 4569 // getParser().getStreamer().Emit???(); 4570 return false; 4571 } 4572 4573 /// parseDirectiveCode 4574 /// ::= .code 16 | 32 4575 bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4576 const AsmToken &Tok = Parser.getTok(); 4577 if (Tok.isNot(AsmToken::Integer)) 4578 return Error(L, "unexpected token in .code directive"); 4579 int64_t Val = Parser.getTok().getIntVal(); 4580 if (Val == 16) 4581 Parser.Lex(); 4582 else if (Val == 32) 4583 Parser.Lex(); 4584 else 4585 return Error(L, "invalid operand to .code directive"); 4586 4587 if (getLexer().isNot(AsmToken::EndOfStatement)) 4588 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4589 Parser.Lex(); 4590 4591 if (Val == 16) { 4592 if (!isThumb()) 4593 SwitchMode(); 4594 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4595 } else { 4596 if (isThumb()) 4597 SwitchMode(); 4598 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4599 } 4600 4601 return false; 4602 } 4603 4604 extern "C" void LLVMInitializeARMAsmLexer(); 4605 4606 /// Force static initialization. 4607 extern "C" void LLVMInitializeARMAsmParser() { 4608 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4609 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4610 LLVMInitializeARMAsmLexer(); 4611 } 4612 4613 #define GET_REGISTER_MATCHER 4614 #define GET_MATCHER_IMPLEMENTATION 4615 #include "ARMGenAsmMatcher.inc" 4616