Home | History | Annotate | Download | only in AsmParser
      1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "MCTargetDesc/AArch64AddressingModes.h"
     11 #include "MCTargetDesc/AArch64MCExpr.h"
     12 #include "MCTargetDesc/AArch64TargetStreamer.h"
     13 #include "Utils/AArch64BaseInfo.h"
     14 #include "llvm/ADT/APInt.h"
     15 #include "llvm/ADT/STLExtras.h"
     16 #include "llvm/ADT/SmallString.h"
     17 #include "llvm/ADT/SmallVector.h"
     18 #include "llvm/ADT/StringSwitch.h"
     19 #include "llvm/ADT/Twine.h"
     20 #include "llvm/MC/MCContext.h"
     21 #include "llvm/MC/MCExpr.h"
     22 #include "llvm/MC/MCInst.h"
     23 #include "llvm/MC/MCObjectFileInfo.h"
     24 #include "llvm/MC/MCParser/MCAsmLexer.h"
     25 #include "llvm/MC/MCParser/MCAsmParser.h"
     26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
     27 #include "llvm/MC/MCRegisterInfo.h"
     28 #include "llvm/MC/MCStreamer.h"
     29 #include "llvm/MC/MCSubtargetInfo.h"
     30 #include "llvm/MC/MCSymbol.h"
     31 #include "llvm/MC/MCTargetAsmParser.h"
     32 #include "llvm/Support/ErrorHandling.h"
     33 #include "llvm/Support/SourceMgr.h"
     34 #include "llvm/Support/TargetRegistry.h"
     35 #include "llvm/Support/raw_ostream.h"
     36 #include <cstdio>
     37 using namespace llvm;
     38 
     39 namespace {
     40 
     41 class AArch64Operand;
     42 
     43 class AArch64AsmParser : public MCTargetAsmParser {
     44 private:
     45   StringRef Mnemonic; ///< Instruction mnemonic.
     46 
     47   // Map of register aliases registers via the .req directive.
     48   StringMap<std::pair<bool, unsigned> > RegisterReqs;
     49 
     50   AArch64TargetStreamer &getTargetStreamer() {
     51     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
     52     return static_cast<AArch64TargetStreamer &>(TS);
     53   }
     54 
     55   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
     56 
     57   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
     58   AArch64CC::CondCode parseCondCodeString(StringRef Cond);
     59   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
     60   unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
     61   int tryParseRegister();
     62   int tryMatchVectorRegister(StringRef &Kind, bool expected);
     63   bool parseRegister(OperandVector &Operands);
     64   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
     65   bool parseVectorList(OperandVector &Operands);
     66   bool parseOperand(OperandVector &Operands, bool isCondCode,
     67                     bool invertCondCode);
     68 
     69   void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
     70   bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
     71   bool showMatchError(SMLoc Loc, unsigned ErrCode);
     72 
     73   bool parseDirectiveWord(unsigned Size, SMLoc L);
     74   bool parseDirectiveInst(SMLoc L);
     75 
     76   bool parseDirectiveTLSDescCall(SMLoc L);
     77 
     78   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
     79   bool parseDirectiveLtorg(SMLoc L);
     80 
     81   bool parseDirectiveReq(StringRef Name, SMLoc L);
     82   bool parseDirectiveUnreq(SMLoc L);
     83 
     84   bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
     85   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
     86                                OperandVector &Operands, MCStreamer &Out,
     87                                uint64_t &ErrorInfo,
     88                                bool MatchingInlineAsm) override;
     89 /// @name Auto-generated Match Functions
     90 /// {
     91 
     92 #define GET_ASSEMBLER_HEADER
     93 #include "AArch64GenAsmMatcher.inc"
     94 
     95   /// }
     96 
     97   OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
     98   OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
     99   OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
    100   OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
    101   OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
    102   OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
    103   OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
    104   OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
    105   OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
    106   OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
    107   OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
    108   OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
    109   bool tryParseVectorRegister(OperandVector &Operands);
    110   OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
    111 
    112 public:
    113   enum AArch64MatchResultTy {
    114     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
    115 #define GET_OPERAND_DIAGNOSTIC_TYPES
    116 #include "AArch64GenAsmMatcher.inc"
    117   };
    118   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
    119                    const MCInstrInfo &MII, const MCTargetOptions &Options)
    120     : MCTargetAsmParser(Options, STI) {
    121     MCAsmParserExtension::Initialize(Parser);
    122     MCStreamer &S = getParser().getStreamer();
    123     if (S.getTargetStreamer() == nullptr)
    124       new AArch64TargetStreamer(S);
    125 
    126     // Initialize the set of available features.
    127     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
    128   }
    129 
    130   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
    131                         SMLoc NameLoc, OperandVector &Operands) override;
    132   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
    133   bool ParseDirective(AsmToken DirectiveID) override;
    134   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
    135                                       unsigned Kind) override;
    136 
    137   static bool classifySymbolRef(const MCExpr *Expr,
    138                                 AArch64MCExpr::VariantKind &ELFRefKind,
    139                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
    140                                 int64_t &Addend);
    141 };
    142 } // end anonymous namespace
    143 
    144 namespace {
    145 
    146 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
    147 /// instruction.
    148 class AArch64Operand : public MCParsedAsmOperand {
    149 private:
    150   enum KindTy {
    151     k_Immediate,
    152     k_ShiftedImm,
    153     k_CondCode,
    154     k_Register,
    155     k_VectorList,
    156     k_VectorIndex,
    157     k_Token,
    158     k_SysReg,
    159     k_SysCR,
    160     k_Prefetch,
    161     k_ShiftExtend,
    162     k_FPImm,
    163     k_Barrier,
    164     k_PSBHint,
    165   } Kind;
    166 
    167   SMLoc StartLoc, EndLoc;
    168 
    169   struct TokOp {
    170     const char *Data;
    171     unsigned Length;
    172     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
    173   };
    174 
    175   struct RegOp {
    176     unsigned RegNum;
    177     bool isVector;
    178   };
    179 
    180   struct VectorListOp {
    181     unsigned RegNum;
    182     unsigned Count;
    183     unsigned NumElements;
    184     unsigned ElementKind;
    185   };
    186 
    187   struct VectorIndexOp {
    188     unsigned Val;
    189   };
    190 
    191   struct ImmOp {
    192     const MCExpr *Val;
    193   };
    194 
    195   struct ShiftedImmOp {
    196     const MCExpr *Val;
    197     unsigned ShiftAmount;
    198   };
    199 
    200   struct CondCodeOp {
    201     AArch64CC::CondCode Code;
    202   };
    203 
    204   struct FPImmOp {
    205     unsigned Val; // Encoded 8-bit representation.
    206   };
    207 
    208   struct BarrierOp {
    209     unsigned Val; // Not the enum since not all values have names.
    210     const char *Data;
    211     unsigned Length;
    212   };
    213 
    214   struct SysRegOp {
    215     const char *Data;
    216     unsigned Length;
    217     uint32_t MRSReg;
    218     uint32_t MSRReg;
    219     uint32_t PStateField;
    220   };
    221 
    222   struct SysCRImmOp {
    223     unsigned Val;
    224   };
    225 
    226   struct PrefetchOp {
    227     unsigned Val;
    228     const char *Data;
    229     unsigned Length;
    230   };
    231 
    232   struct PSBHintOp {
    233     unsigned Val;
    234     const char *Data;
    235     unsigned Length;
    236   };
    237 
    238   struct ShiftExtendOp {
    239     AArch64_AM::ShiftExtendType Type;
    240     unsigned Amount;
    241     bool HasExplicitAmount;
    242   };
    243 
    244   struct ExtendOp {
    245     unsigned Val;
    246   };
    247 
    248   union {
    249     struct TokOp Tok;
    250     struct RegOp Reg;
    251     struct VectorListOp VectorList;
    252     struct VectorIndexOp VectorIndex;
    253     struct ImmOp Imm;
    254     struct ShiftedImmOp ShiftedImm;
    255     struct CondCodeOp CondCode;
    256     struct FPImmOp FPImm;
    257     struct BarrierOp Barrier;
    258     struct SysRegOp SysReg;
    259     struct SysCRImmOp SysCRImm;
    260     struct PrefetchOp Prefetch;
    261     struct PSBHintOp PSBHint;
    262     struct ShiftExtendOp ShiftExtend;
    263   };
    264 
    265   // Keep the MCContext around as the MCExprs may need manipulated during
    266   // the add<>Operands() calls.
    267   MCContext &Ctx;
    268 
    269 public:
    270   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
    271 
    272   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
    273     Kind = o.Kind;
    274     StartLoc = o.StartLoc;
    275     EndLoc = o.EndLoc;
    276     switch (Kind) {
    277     case k_Token:
    278       Tok = o.Tok;
    279       break;
    280     case k_Immediate:
    281       Imm = o.Imm;
    282       break;
    283     case k_ShiftedImm:
    284       ShiftedImm = o.ShiftedImm;
    285       break;
    286     case k_CondCode:
    287       CondCode = o.CondCode;
    288       break;
    289     case k_FPImm:
    290       FPImm = o.FPImm;
    291       break;
    292     case k_Barrier:
    293       Barrier = o.Barrier;
    294       break;
    295     case k_Register:
    296       Reg = o.Reg;
    297       break;
    298     case k_VectorList:
    299       VectorList = o.VectorList;
    300       break;
    301     case k_VectorIndex:
    302       VectorIndex = o.VectorIndex;
    303       break;
    304     case k_SysReg:
    305       SysReg = o.SysReg;
    306       break;
    307     case k_SysCR:
    308       SysCRImm = o.SysCRImm;
    309       break;
    310     case k_Prefetch:
    311       Prefetch = o.Prefetch;
    312       break;
    313     case k_PSBHint:
    314       PSBHint = o.PSBHint;
    315       break;
    316     case k_ShiftExtend:
    317       ShiftExtend = o.ShiftExtend;
    318       break;
    319     }
    320   }
    321 
    322   /// getStartLoc - Get the location of the first token of this operand.
    323   SMLoc getStartLoc() const override { return StartLoc; }
    324   /// getEndLoc - Get the location of the last token of this operand.
    325   SMLoc getEndLoc() const override { return EndLoc; }
    326 
    327   StringRef getToken() const {
    328     assert(Kind == k_Token && "Invalid access!");
    329     return StringRef(Tok.Data, Tok.Length);
    330   }
    331 
    332   bool isTokenSuffix() const {
    333     assert(Kind == k_Token && "Invalid access!");
    334     return Tok.IsSuffix;
    335   }
    336 
    337   const MCExpr *getImm() const {
    338     assert(Kind == k_Immediate && "Invalid access!");
    339     return Imm.Val;
    340   }
    341 
    342   const MCExpr *getShiftedImmVal() const {
    343     assert(Kind == k_ShiftedImm && "Invalid access!");
    344     return ShiftedImm.Val;
    345   }
    346 
    347   unsigned getShiftedImmShift() const {
    348     assert(Kind == k_ShiftedImm && "Invalid access!");
    349     return ShiftedImm.ShiftAmount;
    350   }
    351 
    352   AArch64CC::CondCode getCondCode() const {
    353     assert(Kind == k_CondCode && "Invalid access!");
    354     return CondCode.Code;
    355   }
    356 
    357   unsigned getFPImm() const {
    358     assert(Kind == k_FPImm && "Invalid access!");
    359     return FPImm.Val;
    360   }
    361 
    362   unsigned getBarrier() const {
    363     assert(Kind == k_Barrier && "Invalid access!");
    364     return Barrier.Val;
    365   }
    366 
    367   StringRef getBarrierName() const {
    368     assert(Kind == k_Barrier && "Invalid access!");
    369     return StringRef(Barrier.Data, Barrier.Length);
    370   }
    371 
    372   unsigned getReg() const override {
    373     assert(Kind == k_Register && "Invalid access!");
    374     return Reg.RegNum;
    375   }
    376 
    377   unsigned getVectorListStart() const {
    378     assert(Kind == k_VectorList && "Invalid access!");
    379     return VectorList.RegNum;
    380   }
    381 
    382   unsigned getVectorListCount() const {
    383     assert(Kind == k_VectorList && "Invalid access!");
    384     return VectorList.Count;
    385   }
    386 
    387   unsigned getVectorIndex() const {
    388     assert(Kind == k_VectorIndex && "Invalid access!");
    389     return VectorIndex.Val;
    390   }
    391 
    392   StringRef getSysReg() const {
    393     assert(Kind == k_SysReg && "Invalid access!");
    394     return StringRef(SysReg.Data, SysReg.Length);
    395   }
    396 
    397   unsigned getSysCR() const {
    398     assert(Kind == k_SysCR && "Invalid access!");
    399     return SysCRImm.Val;
    400   }
    401 
    402   unsigned getPrefetch() const {
    403     assert(Kind == k_Prefetch && "Invalid access!");
    404     return Prefetch.Val;
    405   }
    406 
    407   unsigned getPSBHint() const {
    408     assert(Kind == k_PSBHint && "Invalid access!");
    409     return PSBHint.Val;
    410   }
    411 
    412   StringRef getPSBHintName() const {
    413     assert(Kind == k_PSBHint && "Invalid access!");
    414     return StringRef(PSBHint.Data, PSBHint.Length);
    415   }
    416 
    417   StringRef getPrefetchName() const {
    418     assert(Kind == k_Prefetch && "Invalid access!");
    419     return StringRef(Prefetch.Data, Prefetch.Length);
    420   }
    421 
    422   AArch64_AM::ShiftExtendType getShiftExtendType() const {
    423     assert(Kind == k_ShiftExtend && "Invalid access!");
    424     return ShiftExtend.Type;
    425   }
    426 
    427   unsigned getShiftExtendAmount() const {
    428     assert(Kind == k_ShiftExtend && "Invalid access!");
    429     return ShiftExtend.Amount;
    430   }
    431 
    432   bool hasShiftExtendAmount() const {
    433     assert(Kind == k_ShiftExtend && "Invalid access!");
    434     return ShiftExtend.HasExplicitAmount;
    435   }
    436 
    437   bool isImm() const override { return Kind == k_Immediate; }
    438   bool isMem() const override { return false; }
    439   bool isSImm9() const {
    440     if (!isImm())
    441       return false;
    442     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    443     if (!MCE)
    444       return false;
    445     int64_t Val = MCE->getValue();
    446     return (Val >= -256 && Val < 256);
    447   }
    448   bool isSImm7s4() const {
    449     if (!isImm())
    450       return false;
    451     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    452     if (!MCE)
    453       return false;
    454     int64_t Val = MCE->getValue();
    455     return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
    456   }
    457   bool isSImm7s8() const {
    458     if (!isImm())
    459       return false;
    460     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    461     if (!MCE)
    462       return false;
    463     int64_t Val = MCE->getValue();
    464     return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
    465   }
    466   bool isSImm7s16() const {
    467     if (!isImm())
    468       return false;
    469     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    470     if (!MCE)
    471       return false;
    472     int64_t Val = MCE->getValue();
    473     return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
    474   }
    475 
    476   bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
    477     AArch64MCExpr::VariantKind ELFRefKind;
    478     MCSymbolRefExpr::VariantKind DarwinRefKind;
    479     int64_t Addend;
    480     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
    481                                            Addend)) {
    482       // If we don't understand the expression, assume the best and
    483       // let the fixup and relocation code deal with it.
    484       return true;
    485     }
    486 
    487     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
    488         ELFRefKind == AArch64MCExpr::VK_LO12 ||
    489         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
    490         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
    491         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
    492         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
    493         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
    494         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
    495         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
    496       // Note that we don't range-check the addend. It's adjusted modulo page
    497       // size when converted, so there is no "out of range" condition when using
    498       // @pageoff.
    499       return Addend >= 0 && (Addend % Scale) == 0;
    500     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
    501                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
    502       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
    503       return Addend == 0;
    504     }
    505 
    506     return false;
    507   }
    508 
    509   template <int Scale> bool isUImm12Offset() const {
    510     if (!isImm())
    511       return false;
    512 
    513     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    514     if (!MCE)
    515       return isSymbolicUImm12Offset(getImm(), Scale);
    516 
    517     int64_t Val = MCE->getValue();
    518     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
    519   }
    520 
    521   bool isImm0_1() const {
    522     if (!isImm())
    523       return false;
    524     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    525     if (!MCE)
    526       return false;
    527     int64_t Val = MCE->getValue();
    528     return (Val >= 0 && Val < 2);
    529   }
    530   bool isImm0_7() const {
    531     if (!isImm())
    532       return false;
    533     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    534     if (!MCE)
    535       return false;
    536     int64_t Val = MCE->getValue();
    537     return (Val >= 0 && Val < 8);
    538   }
    539   bool isImm1_8() const {
    540     if (!isImm())
    541       return false;
    542     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    543     if (!MCE)
    544       return false;
    545     int64_t Val = MCE->getValue();
    546     return (Val > 0 && Val < 9);
    547   }
    548   bool isImm0_15() const {
    549     if (!isImm())
    550       return false;
    551     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    552     if (!MCE)
    553       return false;
    554     int64_t Val = MCE->getValue();
    555     return (Val >= 0 && Val < 16);
    556   }
    557   bool isImm1_16() const {
    558     if (!isImm())
    559       return false;
    560     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    561     if (!MCE)
    562       return false;
    563     int64_t Val = MCE->getValue();
    564     return (Val > 0 && Val < 17);
    565   }
    566   bool isImm0_31() const {
    567     if (!isImm())
    568       return false;
    569     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    570     if (!MCE)
    571       return false;
    572     int64_t Val = MCE->getValue();
    573     return (Val >= 0 && Val < 32);
    574   }
    575   bool isImm1_31() const {
    576     if (!isImm())
    577       return false;
    578     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    579     if (!MCE)
    580       return false;
    581     int64_t Val = MCE->getValue();
    582     return (Val >= 1 && Val < 32);
    583   }
    584   bool isImm1_32() const {
    585     if (!isImm())
    586       return false;
    587     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    588     if (!MCE)
    589       return false;
    590     int64_t Val = MCE->getValue();
    591     return (Val >= 1 && Val < 33);
    592   }
    593   bool isImm0_63() const {
    594     if (!isImm())
    595       return false;
    596     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    597     if (!MCE)
    598       return false;
    599     int64_t Val = MCE->getValue();
    600     return (Val >= 0 && Val < 64);
    601   }
    602   bool isImm1_63() const {
    603     if (!isImm())
    604       return false;
    605     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    606     if (!MCE)
    607       return false;
    608     int64_t Val = MCE->getValue();
    609     return (Val >= 1 && Val < 64);
    610   }
    611   bool isImm1_64() const {
    612     if (!isImm())
    613       return false;
    614     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    615     if (!MCE)
    616       return false;
    617     int64_t Val = MCE->getValue();
    618     return (Val >= 1 && Val < 65);
    619   }
    620   bool isImm0_127() const {
    621     if (!isImm())
    622       return false;
    623     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    624     if (!MCE)
    625       return false;
    626     int64_t Val = MCE->getValue();
    627     return (Val >= 0 && Val < 128);
    628   }
    629   bool isImm0_255() const {
    630     if (!isImm())
    631       return false;
    632     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    633     if (!MCE)
    634       return false;
    635     int64_t Val = MCE->getValue();
    636     return (Val >= 0 && Val < 256);
    637   }
    638   bool isImm0_65535() const {
    639     if (!isImm())
    640       return false;
    641     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    642     if (!MCE)
    643       return false;
    644     int64_t Val = MCE->getValue();
    645     return (Val >= 0 && Val < 65536);
    646   }
    647   bool isImm32_63() const {
    648     if (!isImm())
    649       return false;
    650     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    651     if (!MCE)
    652       return false;
    653     int64_t Val = MCE->getValue();
    654     return (Val >= 32 && Val < 64);
    655   }
    656   bool isLogicalImm32() const {
    657     if (!isImm())
    658       return false;
    659     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    660     if (!MCE)
    661       return false;
    662     int64_t Val = MCE->getValue();
    663     if (Val >> 32 != 0 && Val >> 32 != ~0LL)
    664       return false;
    665     Val &= 0xFFFFFFFF;
    666     return AArch64_AM::isLogicalImmediate(Val, 32);
    667   }
    668   bool isLogicalImm64() const {
    669     if (!isImm())
    670       return false;
    671     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    672     if (!MCE)
    673       return false;
    674     return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
    675   }
    676   bool isLogicalImm32Not() const {
    677     if (!isImm())
    678       return false;
    679     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    680     if (!MCE)
    681       return false;
    682     int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
    683     return AArch64_AM::isLogicalImmediate(Val, 32);
    684   }
    685   bool isLogicalImm64Not() const {
    686     if (!isImm())
    687       return false;
    688     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    689     if (!MCE)
    690       return false;
    691     return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
    692   }
    693   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
    694   bool isAddSubImm() const {
    695     if (!isShiftedImm() && !isImm())
    696       return false;
    697 
    698     const MCExpr *Expr;
    699 
    700     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
    701     if (isShiftedImm()) {
    702       unsigned Shift = ShiftedImm.ShiftAmount;
    703       Expr = ShiftedImm.Val;
    704       if (Shift != 0 && Shift != 12)
    705         return false;
    706     } else {
    707       Expr = getImm();
    708     }
    709 
    710     AArch64MCExpr::VariantKind ELFRefKind;
    711     MCSymbolRefExpr::VariantKind DarwinRefKind;
    712     int64_t Addend;
    713     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
    714                                           DarwinRefKind, Addend)) {
    715       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
    716           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
    717           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
    718           || ELFRefKind == AArch64MCExpr::VK_LO12
    719           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
    720           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
    721           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
    722           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
    723           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
    724           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
    725           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
    726     }
    727 
    728     // Otherwise it should be a real immediate in range:
    729     const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
    730     return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
    731   }
    732   bool isAddSubImmNeg() const {
    733     if (!isShiftedImm() && !isImm())
    734       return false;
    735 
    736     const MCExpr *Expr;
    737 
    738     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
    739     if (isShiftedImm()) {
    740       unsigned Shift = ShiftedImm.ShiftAmount;
    741       Expr = ShiftedImm.Val;
    742       if (Shift != 0 && Shift != 12)
    743         return false;
    744     } else
    745       Expr = getImm();
    746 
    747     // Otherwise it should be a real negative immediate in range:
    748     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
    749     return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
    750   }
    751   bool isCondCode() const { return Kind == k_CondCode; }
    752   bool isSIMDImmType10() const {
    753     if (!isImm())
    754       return false;
    755     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    756     if (!MCE)
    757       return false;
    758     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
    759   }
    760   bool isBranchTarget26() const {
    761     if (!isImm())
    762       return false;
    763     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    764     if (!MCE)
    765       return true;
    766     int64_t Val = MCE->getValue();
    767     if (Val & 0x3)
    768       return false;
    769     return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
    770   }
    771   bool isPCRelLabel19() const {
    772     if (!isImm())
    773       return false;
    774     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    775     if (!MCE)
    776       return true;
    777     int64_t Val = MCE->getValue();
    778     if (Val & 0x3)
    779       return false;
    780     return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
    781   }
    782   bool isBranchTarget14() const {
    783     if (!isImm())
    784       return false;
    785     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    786     if (!MCE)
    787       return true;
    788     int64_t Val = MCE->getValue();
    789     if (Val & 0x3)
    790       return false;
    791     return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
    792   }
    793 
    794   bool
    795   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
    796     if (!isImm())
    797       return false;
    798 
    799     AArch64MCExpr::VariantKind ELFRefKind;
    800     MCSymbolRefExpr::VariantKind DarwinRefKind;
    801     int64_t Addend;
    802     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
    803                                              DarwinRefKind, Addend)) {
    804       return false;
    805     }
    806     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
    807       return false;
    808 
    809     for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
    810       if (ELFRefKind == AllowedModifiers[i])
    811         return Addend == 0;
    812     }
    813 
    814     return false;
    815   }
    816 
    817   bool isMovZSymbolG3() const {
    818     return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
    819   }
    820 
    821   bool isMovZSymbolG2() const {
    822     return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
    823                          AArch64MCExpr::VK_TPREL_G2,
    824                          AArch64MCExpr::VK_DTPREL_G2});
    825   }
    826 
    827   bool isMovZSymbolG1() const {
    828     return isMovWSymbol({
    829         AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
    830         AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
    831         AArch64MCExpr::VK_DTPREL_G1,
    832     });
    833   }
    834 
    835   bool isMovZSymbolG0() const {
    836     return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
    837                          AArch64MCExpr::VK_TPREL_G0,
    838                          AArch64MCExpr::VK_DTPREL_G0});
    839   }
    840 
    841   bool isMovKSymbolG3() const {
    842     return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
    843   }
    844 
    845   bool isMovKSymbolG2() const {
    846     return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
    847   }
    848 
    849   bool isMovKSymbolG1() const {
    850     return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
    851                          AArch64MCExpr::VK_TPREL_G1_NC,
    852                          AArch64MCExpr::VK_DTPREL_G1_NC});
    853   }
    854 
    855   bool isMovKSymbolG0() const {
    856     return isMovWSymbol(
    857         {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
    858          AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
    859   }
    860 
    861   template<int RegWidth, int Shift>
    862   bool isMOVZMovAlias() const {
    863     if (!isImm()) return false;
    864 
    865     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    866     if (!CE) return false;
    867     uint64_t Value = CE->getValue();
    868 
    869     if (RegWidth == 32)
    870       Value &= 0xffffffffULL;
    871 
    872     // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
    873     if (Value == 0 && Shift != 0)
    874       return false;
    875 
    876     return (Value & ~(0xffffULL << Shift)) == 0;
    877   }
    878 
    879   template<int RegWidth, int Shift>
    880   bool isMOVNMovAlias() const {
    881     if (!isImm()) return false;
    882 
    883     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    884     if (!CE) return false;
    885     uint64_t Value = CE->getValue();
    886 
    887     // MOVZ takes precedence over MOVN.
    888     for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
    889       if ((Value & ~(0xffffULL << MOVZShift)) == 0)
    890         return false;
    891 
    892     Value = ~Value;
    893     if (RegWidth == 32)
    894       Value &= 0xffffffffULL;
    895 
    896     return (Value & ~(0xffffULL << Shift)) == 0;
    897   }
    898 
    899   bool isFPImm() const { return Kind == k_FPImm; }
    900   bool isBarrier() const { return Kind == k_Barrier; }
    901   bool isSysReg() const { return Kind == k_SysReg; }
    902   bool isMRSSystemRegister() const {
    903     if (!isSysReg()) return false;
    904 
    905     return SysReg.MRSReg != -1U;
    906   }
    907   bool isMSRSystemRegister() const {
    908     if (!isSysReg()) return false;
    909     return SysReg.MSRReg != -1U;
    910   }
    911   bool isSystemPStateFieldWithImm0_1() const {
    912     if (!isSysReg()) return false;
    913     return (SysReg.PStateField == AArch64PState::PAN ||
    914             SysReg.PStateField == AArch64PState::UAO);
    915   }
    916   bool isSystemPStateFieldWithImm0_15() const {
    917     if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
    918     return SysReg.PStateField != -1U;
    919   }
    920   bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
    921   bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
    922   bool isVectorRegLo() const {
    923     return Kind == k_Register && Reg.isVector &&
    924            AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
    925                Reg.RegNum);
    926   }
    927   bool isGPR32as64() const {
    928     return Kind == k_Register && !Reg.isVector &&
    929       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
    930   }
    931   bool isWSeqPair() const {
    932     return Kind == k_Register && !Reg.isVector &&
    933            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
    934                Reg.RegNum);
    935   }
    936   bool isXSeqPair() const {
    937     return Kind == k_Register && !Reg.isVector &&
    938            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
    939                Reg.RegNum);
    940   }
    941 
    942   bool isGPR64sp0() const {
    943     return Kind == k_Register && !Reg.isVector &&
    944       AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
    945   }
    946 
    947   /// Is this a vector list with the type implicit (presumably attached to the
    948   /// instruction itself)?
    949   template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
    950     return Kind == k_VectorList && VectorList.Count == NumRegs &&
    951            !VectorList.ElementKind;
    952   }
    953 
    954   template <unsigned NumRegs, unsigned NumElements, char ElementKind>
    955   bool isTypedVectorList() const {
    956     if (Kind != k_VectorList)
    957       return false;
    958     if (VectorList.Count != NumRegs)
    959       return false;
    960     if (VectorList.ElementKind != ElementKind)
    961       return false;
    962     return VectorList.NumElements == NumElements;
    963   }
    964 
    965   bool isVectorIndex1() const {
    966     return Kind == k_VectorIndex && VectorIndex.Val == 1;
    967   }
    968   bool isVectorIndexB() const {
    969     return Kind == k_VectorIndex && VectorIndex.Val < 16;
    970   }
    971   bool isVectorIndexH() const {
    972     return Kind == k_VectorIndex && VectorIndex.Val < 8;
    973   }
    974   bool isVectorIndexS() const {
    975     return Kind == k_VectorIndex && VectorIndex.Val < 4;
    976   }
    977   bool isVectorIndexD() const {
    978     return Kind == k_VectorIndex && VectorIndex.Val < 2;
    979   }
    980   bool isToken() const override { return Kind == k_Token; }
    981   bool isTokenEqual(StringRef Str) const {
    982     return Kind == k_Token && getToken() == Str;
    983   }
    984   bool isSysCR() const { return Kind == k_SysCR; }
    985   bool isPrefetch() const { return Kind == k_Prefetch; }
    986   bool isPSBHint() const { return Kind == k_PSBHint; }
    987   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
    988   bool isShifter() const {
    989     if (!isShiftExtend())
    990       return false;
    991 
    992     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
    993     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
    994             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
    995             ST == AArch64_AM::MSL);
    996   }
    997   bool isExtend() const {
    998     if (!isShiftExtend())
    999       return false;
   1000 
   1001     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1002     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
   1003             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
   1004             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
   1005             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
   1006             ET == AArch64_AM::LSL) &&
   1007            getShiftExtendAmount() <= 4;
   1008   }
   1009 
   1010   bool isExtend64() const {
   1011     if (!isExtend())
   1012       return false;
   1013     // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
   1014     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1015     return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
   1016   }
   1017   bool isExtendLSL64() const {
   1018     if (!isExtend())
   1019       return false;
   1020     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1021     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
   1022             ET == AArch64_AM::LSL) &&
   1023            getShiftExtendAmount() <= 4;
   1024   }
   1025 
   1026   template<int Width> bool isMemXExtend() const {
   1027     if (!isExtend())
   1028       return false;
   1029     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1030     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
   1031            (getShiftExtendAmount() == Log2_32(Width / 8) ||
   1032             getShiftExtendAmount() == 0);
   1033   }
   1034 
   1035   template<int Width> bool isMemWExtend() const {
   1036     if (!isExtend())
   1037       return false;
   1038     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1039     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
   1040            (getShiftExtendAmount() == Log2_32(Width / 8) ||
   1041             getShiftExtendAmount() == 0);
   1042   }
   1043 
   1044   template <unsigned width>
   1045   bool isArithmeticShifter() const {
   1046     if (!isShifter())
   1047       return false;
   1048 
   1049     // An arithmetic shifter is LSL, LSR, or ASR.
   1050     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
   1051     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
   1052             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
   1053   }
   1054 
   1055   template <unsigned width>
   1056   bool isLogicalShifter() const {
   1057     if (!isShifter())
   1058       return false;
   1059 
   1060     // A logical shifter is LSL, LSR, ASR or ROR.
   1061     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
   1062     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
   1063             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
   1064            getShiftExtendAmount() < width;
   1065   }
   1066 
   1067   bool isMovImm32Shifter() const {
   1068     if (!isShifter())
   1069       return false;
   1070 
   1071     // A MOVi shifter is LSL of 0, 16, 32, or 48.
   1072     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
   1073     if (ST != AArch64_AM::LSL)
   1074       return false;
   1075     uint64_t Val = getShiftExtendAmount();
   1076     return (Val == 0 || Val == 16);
   1077   }
   1078 
   1079   bool isMovImm64Shifter() const {
   1080     if (!isShifter())
   1081       return false;
   1082 
   1083     // A MOVi shifter is LSL of 0 or 16.
   1084     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
   1085     if (ST != AArch64_AM::LSL)
   1086       return false;
   1087     uint64_t Val = getShiftExtendAmount();
   1088     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
   1089   }
   1090 
   1091   bool isLogicalVecShifter() const {
   1092     if (!isShifter())
   1093       return false;
   1094 
   1095     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
   1096     unsigned Shift = getShiftExtendAmount();
   1097     return getShiftExtendType() == AArch64_AM::LSL &&
   1098            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
   1099   }
   1100 
   1101   bool isLogicalVecHalfWordShifter() const {
   1102     if (!isLogicalVecShifter())
   1103       return false;
   1104 
   1105     // A logical vector shifter is a left shift by 0 or 8.
   1106     unsigned Shift = getShiftExtendAmount();
   1107     return getShiftExtendType() == AArch64_AM::LSL &&
   1108            (Shift == 0 || Shift == 8);
   1109   }
   1110 
   1111   bool isMoveVecShifter() const {
   1112     if (!isShiftExtend())
   1113       return false;
   1114 
   1115     // A logical vector shifter is a left shift by 8 or 16.
   1116     unsigned Shift = getShiftExtendAmount();
   1117     return getShiftExtendType() == AArch64_AM::MSL &&
   1118            (Shift == 8 || Shift == 16);
   1119   }
   1120 
   1121   // Fallback unscaled operands are for aliases of LDR/STR that fall back
   1122   // to LDUR/STUR when the offset is not legal for the former but is for
   1123   // the latter. As such, in addition to checking for being a legal unscaled
   1124   // address, also check that it is not a legal scaled address. This avoids
   1125   // ambiguity in the matcher.
   1126   template<int Width>
   1127   bool isSImm9OffsetFB() const {
   1128     return isSImm9() && !isUImm12Offset<Width / 8>();
   1129   }
   1130 
   1131   bool isAdrpLabel() const {
   1132     // Validation was handled during parsing, so we just sanity check that
   1133     // something didn't go haywire.
   1134     if (!isImm())
   1135         return false;
   1136 
   1137     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
   1138       int64_t Val = CE->getValue();
   1139       int64_t Min = - (4096 * (1LL << (21 - 1)));
   1140       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
   1141       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
   1142     }
   1143 
   1144     return true;
   1145   }
   1146 
   1147   bool isAdrLabel() const {
   1148     // Validation was handled during parsing, so we just sanity check that
   1149     // something didn't go haywire.
   1150     if (!isImm())
   1151         return false;
   1152 
   1153     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
   1154       int64_t Val = CE->getValue();
   1155       int64_t Min = - (1LL << (21 - 1));
   1156       int64_t Max = ((1LL << (21 - 1)) - 1);
   1157       return Val >= Min && Val <= Max;
   1158     }
   1159 
   1160     return true;
   1161   }
   1162 
   1163   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
   1164     // Add as immediates when possible.  Null MCExpr = 0.
   1165     if (!Expr)
   1166       Inst.addOperand(MCOperand::createImm(0));
   1167     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
   1168       Inst.addOperand(MCOperand::createImm(CE->getValue()));
   1169     else
   1170       Inst.addOperand(MCOperand::createExpr(Expr));
   1171   }
   1172 
   1173   void addRegOperands(MCInst &Inst, unsigned N) const {
   1174     assert(N == 1 && "Invalid number of operands!");
   1175     Inst.addOperand(MCOperand::createReg(getReg()));
   1176   }
   1177 
   1178   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
   1179     assert(N == 1 && "Invalid number of operands!");
   1180     assert(
   1181         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
   1182 
   1183     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
   1184     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
   1185         RI->getEncodingValue(getReg()));
   1186 
   1187     Inst.addOperand(MCOperand::createReg(Reg));
   1188   }
   1189 
   1190   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
   1191     assert(N == 1 && "Invalid number of operands!");
   1192     assert(
   1193         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
   1194     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
   1195   }
   1196 
   1197   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
   1198     assert(N == 1 && "Invalid number of operands!");
   1199     assert(
   1200         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
   1201     Inst.addOperand(MCOperand::createReg(getReg()));
   1202   }
   1203 
   1204   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
   1205     assert(N == 1 && "Invalid number of operands!");
   1206     Inst.addOperand(MCOperand::createReg(getReg()));
   1207   }
   1208 
   1209   template <unsigned NumRegs>
   1210   void addVectorList64Operands(MCInst &Inst, unsigned N) const {
   1211     assert(N == 1 && "Invalid number of operands!");
   1212     static const unsigned FirstRegs[] = { AArch64::D0,
   1213                                           AArch64::D0_D1,
   1214                                           AArch64::D0_D1_D2,
   1215                                           AArch64::D0_D1_D2_D3 };
   1216     unsigned FirstReg = FirstRegs[NumRegs - 1];
   1217 
   1218     Inst.addOperand(
   1219         MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
   1220   }
   1221 
   1222   template <unsigned NumRegs>
   1223   void addVectorList128Operands(MCInst &Inst, unsigned N) const {
   1224     assert(N == 1 && "Invalid number of operands!");
   1225     static const unsigned FirstRegs[] = { AArch64::Q0,
   1226                                           AArch64::Q0_Q1,
   1227                                           AArch64::Q0_Q1_Q2,
   1228                                           AArch64::Q0_Q1_Q2_Q3 };
   1229     unsigned FirstReg = FirstRegs[NumRegs - 1];
   1230 
   1231     Inst.addOperand(
   1232         MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
   1233   }
   1234 
   1235   void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
   1236     assert(N == 1 && "Invalid number of operands!");
   1237     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1238   }
   1239 
   1240   void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
   1241     assert(N == 1 && "Invalid number of operands!");
   1242     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1243   }
   1244 
   1245   void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
   1246     assert(N == 1 && "Invalid number of operands!");
   1247     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1248   }
   1249 
   1250   void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
   1251     assert(N == 1 && "Invalid number of operands!");
   1252     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1253   }
   1254 
   1255   void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
   1256     assert(N == 1 && "Invalid number of operands!");
   1257     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1258   }
   1259 
   1260   void addImmOperands(MCInst &Inst, unsigned N) const {
   1261     assert(N == 1 && "Invalid number of operands!");
   1262     // If this is a pageoff symrefexpr with an addend, adjust the addend
   1263     // to be only the page-offset portion. Otherwise, just add the expr
   1264     // as-is.
   1265     addExpr(Inst, getImm());
   1266   }
   1267 
   1268   void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
   1269     assert(N == 2 && "Invalid number of operands!");
   1270     if (isShiftedImm()) {
   1271       addExpr(Inst, getShiftedImmVal());
   1272       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
   1273     } else {
   1274       addExpr(Inst, getImm());
   1275       Inst.addOperand(MCOperand::createImm(0));
   1276     }
   1277   }
   1278 
   1279   void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
   1280     assert(N == 2 && "Invalid number of operands!");
   1281 
   1282     const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
   1283     const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
   1284     int64_t Val = -CE->getValue();
   1285     unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
   1286 
   1287     Inst.addOperand(MCOperand::createImm(Val));
   1288     Inst.addOperand(MCOperand::createImm(ShiftAmt));
   1289   }
   1290 
   1291   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
   1292     assert(N == 1 && "Invalid number of operands!");
   1293     Inst.addOperand(MCOperand::createImm(getCondCode()));
   1294   }
   1295 
   1296   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
   1297     assert(N == 1 && "Invalid number of operands!");
   1298     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1299     if (!MCE)
   1300       addExpr(Inst, getImm());
   1301     else
   1302       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
   1303   }
   1304 
   1305   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
   1306     addImmOperands(Inst, N);
   1307   }
   1308 
   1309   template<int Scale>
   1310   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
   1311     assert(N == 1 && "Invalid number of operands!");
   1312     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1313 
   1314     if (!MCE) {
   1315       Inst.addOperand(MCOperand::createExpr(getImm()));
   1316       return;
   1317     }
   1318     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
   1319   }
   1320 
   1321   void addSImm9Operands(MCInst &Inst, unsigned N) const {
   1322     assert(N == 1 && "Invalid number of operands!");
   1323     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1324     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1325   }
   1326 
   1327   void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
   1328     assert(N == 1 && "Invalid number of operands!");
   1329     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1330     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
   1331   }
   1332 
   1333   void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
   1334     assert(N == 1 && "Invalid number of operands!");
   1335     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1336     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
   1337   }
   1338 
   1339   void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
   1340     assert(N == 1 && "Invalid number of operands!");
   1341     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1342     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
   1343   }
   1344 
   1345   void addImm0_1Operands(MCInst &Inst, unsigned N) const {
   1346     assert(N == 1 && "Invalid number of operands!");
   1347     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1348     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1349   }
   1350 
   1351   void addImm0_7Operands(MCInst &Inst, unsigned N) const {
   1352     assert(N == 1 && "Invalid number of operands!");
   1353     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1354     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1355   }
   1356 
   1357   void addImm1_8Operands(MCInst &Inst, unsigned N) const {
   1358     assert(N == 1 && "Invalid number of operands!");
   1359     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1360     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1361   }
   1362 
   1363   void addImm0_15Operands(MCInst &Inst, unsigned N) const {
   1364     assert(N == 1 && "Invalid number of operands!");
   1365     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1366     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1367   }
   1368 
   1369   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
   1370     assert(N == 1 && "Invalid number of operands!");
   1371     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1372     assert(MCE && "Invalid constant immediate operand!");
   1373     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1374   }
   1375 
   1376   void addImm0_31Operands(MCInst &Inst, unsigned N) const {
   1377     assert(N == 1 && "Invalid number of operands!");
   1378     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1379     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1380   }
   1381 
   1382   void addImm1_31Operands(MCInst &Inst, unsigned N) const {
   1383     assert(N == 1 && "Invalid number of operands!");
   1384     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1385     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1386   }
   1387 
   1388   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
   1389     assert(N == 1 && "Invalid number of operands!");
   1390     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1391     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1392   }
   1393 
   1394   void addImm0_63Operands(MCInst &Inst, unsigned N) const {
   1395     assert(N == 1 && "Invalid number of operands!");
   1396     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1397     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1398   }
   1399 
   1400   void addImm1_63Operands(MCInst &Inst, unsigned N) const {
   1401     assert(N == 1 && "Invalid number of operands!");
   1402     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1403     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1404   }
   1405 
   1406   void addImm1_64Operands(MCInst &Inst, unsigned N) const {
   1407     assert(N == 1 && "Invalid number of operands!");
   1408     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1409     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1410   }
   1411 
   1412   void addImm0_127Operands(MCInst &Inst, unsigned N) const {
   1413     assert(N == 1 && "Invalid number of operands!");
   1414     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1415     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1416   }
   1417 
   1418   void addImm0_255Operands(MCInst &Inst, unsigned N) const {
   1419     assert(N == 1 && "Invalid number of operands!");
   1420     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1421     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1422   }
   1423 
   1424   void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
   1425     assert(N == 1 && "Invalid number of operands!");
   1426     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1427     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1428   }
   1429 
   1430   void addImm32_63Operands(MCInst &Inst, unsigned N) const {
   1431     assert(N == 1 && "Invalid number of operands!");
   1432     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1433     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1434   }
   1435 
   1436   void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
   1437     assert(N == 1 && "Invalid number of operands!");
   1438     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1439     uint64_t encoding =
   1440         AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
   1441     Inst.addOperand(MCOperand::createImm(encoding));
   1442   }
   1443 
   1444   void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
   1445     assert(N == 1 && "Invalid number of operands!");
   1446     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1447     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
   1448     Inst.addOperand(MCOperand::createImm(encoding));
   1449   }
   1450 
   1451   void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
   1452     assert(N == 1 && "Invalid number of operands!");
   1453     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1454     int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
   1455     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
   1456     Inst.addOperand(MCOperand::createImm(encoding));
   1457   }
   1458 
   1459   void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
   1460     assert(N == 1 && "Invalid number of operands!");
   1461     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1462     uint64_t encoding =
   1463         AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
   1464     Inst.addOperand(MCOperand::createImm(encoding));
   1465   }
   1466 
   1467   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
   1468     assert(N == 1 && "Invalid number of operands!");
   1469     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1470     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
   1471     Inst.addOperand(MCOperand::createImm(encoding));
   1472   }
   1473 
   1474   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
   1475     // Branch operands don't encode the low bits, so shift them off
   1476     // here. If it's a label, however, just put it on directly as there's
   1477     // not enough information now to do anything.
   1478     assert(N == 1 && "Invalid number of operands!");
   1479     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1480     if (!MCE) {
   1481       addExpr(Inst, getImm());
   1482       return;
   1483     }
   1484     assert(MCE && "Invalid constant immediate operand!");
   1485     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
   1486   }
   1487 
   1488   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
   1489     // Branch operands don't encode the low bits, so shift them off
   1490     // here. If it's a label, however, just put it on directly as there's
   1491     // not enough information now to do anything.
   1492     assert(N == 1 && "Invalid number of operands!");
   1493     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1494     if (!MCE) {
   1495       addExpr(Inst, getImm());
   1496       return;
   1497     }
   1498     assert(MCE && "Invalid constant immediate operand!");
   1499     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
   1500   }
   1501 
   1502   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
   1503     // Branch operands don't encode the low bits, so shift them off
   1504     // here. If it's a label, however, just put it on directly as there's
   1505     // not enough information now to do anything.
   1506     assert(N == 1 && "Invalid number of operands!");
   1507     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1508     if (!MCE) {
   1509       addExpr(Inst, getImm());
   1510       return;
   1511     }
   1512     assert(MCE && "Invalid constant immediate operand!");
   1513     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
   1514   }
   1515 
   1516   void addFPImmOperands(MCInst &Inst, unsigned N) const {
   1517     assert(N == 1 && "Invalid number of operands!");
   1518     Inst.addOperand(MCOperand::createImm(getFPImm()));
   1519   }
   1520 
   1521   void addBarrierOperands(MCInst &Inst, unsigned N) const {
   1522     assert(N == 1 && "Invalid number of operands!");
   1523     Inst.addOperand(MCOperand::createImm(getBarrier()));
   1524   }
   1525 
   1526   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
   1527     assert(N == 1 && "Invalid number of operands!");
   1528 
   1529     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
   1530   }
   1531 
   1532   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
   1533     assert(N == 1 && "Invalid number of operands!");
   1534 
   1535     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
   1536   }
   1537 
   1538   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
   1539     assert(N == 1 && "Invalid number of operands!");
   1540 
   1541     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
   1542   }
   1543 
   1544   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
   1545     assert(N == 1 && "Invalid number of operands!");
   1546 
   1547     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
   1548   }
   1549 
   1550   void addSysCROperands(MCInst &Inst, unsigned N) const {
   1551     assert(N == 1 && "Invalid number of operands!");
   1552     Inst.addOperand(MCOperand::createImm(getSysCR()));
   1553   }
   1554 
   1555   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
   1556     assert(N == 1 && "Invalid number of operands!");
   1557     Inst.addOperand(MCOperand::createImm(getPrefetch()));
   1558   }
   1559 
   1560   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
   1561     assert(N == 1 && "Invalid number of operands!");
   1562     Inst.addOperand(MCOperand::createImm(getPSBHint()));
   1563   }
   1564 
   1565   void addShifterOperands(MCInst &Inst, unsigned N) const {
   1566     assert(N == 1 && "Invalid number of operands!");
   1567     unsigned Imm =
   1568         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
   1569     Inst.addOperand(MCOperand::createImm(Imm));
   1570   }
   1571 
   1572   void addExtendOperands(MCInst &Inst, unsigned N) const {
   1573     assert(N == 1 && "Invalid number of operands!");
   1574     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1575     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
   1576     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
   1577     Inst.addOperand(MCOperand::createImm(Imm));
   1578   }
   1579 
   1580   void addExtend64Operands(MCInst &Inst, unsigned N) const {
   1581     assert(N == 1 && "Invalid number of operands!");
   1582     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1583     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
   1584     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
   1585     Inst.addOperand(MCOperand::createImm(Imm));
   1586   }
   1587 
   1588   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
   1589     assert(N == 2 && "Invalid number of operands!");
   1590     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1591     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
   1592     Inst.addOperand(MCOperand::createImm(IsSigned));
   1593     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
   1594   }
   1595 
   1596   // For 8-bit load/store instructions with a register offset, both the
   1597   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
   1598   // they're disambiguated by whether the shift was explicit or implicit rather
   1599   // than its size.
   1600   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
   1601     assert(N == 2 && "Invalid number of operands!");
   1602     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1603     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
   1604     Inst.addOperand(MCOperand::createImm(IsSigned));
   1605     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
   1606   }
   1607 
   1608   template<int Shift>
   1609   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
   1610     assert(N == 1 && "Invalid number of operands!");
   1611 
   1612     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
   1613     uint64_t Value = CE->getValue();
   1614     Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
   1615   }
   1616 
   1617   template<int Shift>
   1618   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
   1619     assert(N == 1 && "Invalid number of operands!");
   1620 
   1621     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
   1622     uint64_t Value = CE->getValue();
   1623     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
   1624   }
   1625 
   1626   void print(raw_ostream &OS) const override;
   1627 
   1628   static std::unique_ptr<AArch64Operand>
   1629   CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
   1630     auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
   1631     Op->Tok.Data = Str.data();
   1632     Op->Tok.Length = Str.size();
   1633     Op->Tok.IsSuffix = IsSuffix;
   1634     Op->StartLoc = S;
   1635     Op->EndLoc = S;
   1636     return Op;
   1637   }
   1638 
   1639   static std::unique_ptr<AArch64Operand>
   1640   CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
   1641     auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
   1642     Op->Reg.RegNum = RegNum;
   1643     Op->Reg.isVector = isVector;
   1644     Op->StartLoc = S;
   1645     Op->EndLoc = E;
   1646     return Op;
   1647   }
   1648 
   1649   static std::unique_ptr<AArch64Operand>
   1650   CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
   1651                    char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
   1652     auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
   1653     Op->VectorList.RegNum = RegNum;
   1654     Op->VectorList.Count = Count;
   1655     Op->VectorList.NumElements = NumElements;
   1656     Op->VectorList.ElementKind = ElementKind;
   1657     Op->StartLoc = S;
   1658     Op->EndLoc = E;
   1659     return Op;
   1660   }
   1661 
   1662   static std::unique_ptr<AArch64Operand>
   1663   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
   1664     auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
   1665     Op->VectorIndex.Val = Idx;
   1666     Op->StartLoc = S;
   1667     Op->EndLoc = E;
   1668     return Op;
   1669   }
   1670 
   1671   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
   1672                                                    SMLoc E, MCContext &Ctx) {
   1673     auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
   1674     Op->Imm.Val = Val;
   1675     Op->StartLoc = S;
   1676     Op->EndLoc = E;
   1677     return Op;
   1678   }
   1679 
   1680   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
   1681                                                           unsigned ShiftAmount,
   1682                                                           SMLoc S, SMLoc E,
   1683                                                           MCContext &Ctx) {
   1684     auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
   1685     Op->ShiftedImm .Val = Val;
   1686     Op->ShiftedImm.ShiftAmount = ShiftAmount;
   1687     Op->StartLoc = S;
   1688     Op->EndLoc = E;
   1689     return Op;
   1690   }
   1691 
   1692   static std::unique_ptr<AArch64Operand>
   1693   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
   1694     auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
   1695     Op->CondCode.Code = Code;
   1696     Op->StartLoc = S;
   1697     Op->EndLoc = E;
   1698     return Op;
   1699   }
   1700 
   1701   static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
   1702                                                      MCContext &Ctx) {
   1703     auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
   1704     Op->FPImm.Val = Val;
   1705     Op->StartLoc = S;
   1706     Op->EndLoc = S;
   1707     return Op;
   1708   }
   1709 
   1710   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
   1711                                                        StringRef Str,
   1712                                                        SMLoc S,
   1713                                                        MCContext &Ctx) {
   1714     auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
   1715     Op->Barrier.Val = Val;
   1716     Op->Barrier.Data = Str.data();
   1717     Op->Barrier.Length = Str.size();
   1718     Op->StartLoc = S;
   1719     Op->EndLoc = S;
   1720     return Op;
   1721   }
   1722 
   1723   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
   1724                                                       uint32_t MRSReg,
   1725                                                       uint32_t MSRReg,
   1726                                                       uint32_t PStateField,
   1727                                                       MCContext &Ctx) {
   1728     auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
   1729     Op->SysReg.Data = Str.data();
   1730     Op->SysReg.Length = Str.size();
   1731     Op->SysReg.MRSReg = MRSReg;
   1732     Op->SysReg.MSRReg = MSRReg;
   1733     Op->SysReg.PStateField = PStateField;
   1734     Op->StartLoc = S;
   1735     Op->EndLoc = S;
   1736     return Op;
   1737   }
   1738 
   1739   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
   1740                                                      SMLoc E, MCContext &Ctx) {
   1741     auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
   1742     Op->SysCRImm.Val = Val;
   1743     Op->StartLoc = S;
   1744     Op->EndLoc = E;
   1745     return Op;
   1746   }
   1747 
   1748   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
   1749                                                         StringRef Str,
   1750                                                         SMLoc S,
   1751                                                         MCContext &Ctx) {
   1752     auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
   1753     Op->Prefetch.Val = Val;
   1754     Op->Barrier.Data = Str.data();
   1755     Op->Barrier.Length = Str.size();
   1756     Op->StartLoc = S;
   1757     Op->EndLoc = S;
   1758     return Op;
   1759   }
   1760 
   1761   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
   1762                                                        StringRef Str,
   1763                                                        SMLoc S,
   1764                                                        MCContext &Ctx) {
   1765     auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
   1766     Op->PSBHint.Val = Val;
   1767     Op->PSBHint.Data = Str.data();
   1768     Op->PSBHint.Length = Str.size();
   1769     Op->StartLoc = S;
   1770     Op->EndLoc = S;
   1771     return Op;
   1772   }
   1773 
   1774   static std::unique_ptr<AArch64Operand>
   1775   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
   1776                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
   1777     auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
   1778     Op->ShiftExtend.Type = ShOp;
   1779     Op->ShiftExtend.Amount = Val;
   1780     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
   1781     Op->StartLoc = S;
   1782     Op->EndLoc = E;
   1783     return Op;
   1784   }
   1785 };
   1786 
   1787 } // end anonymous namespace.
   1788 
   1789 void AArch64Operand::print(raw_ostream &OS) const {
   1790   switch (Kind) {
   1791   case k_FPImm:
   1792     OS << "<fpimm " << getFPImm() << "("
   1793        << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
   1794     break;
   1795   case k_Barrier: {
   1796     StringRef Name = getBarrierName();
   1797     if (!Name.empty())
   1798       OS << "<barrier " << Name << ">";
   1799     else
   1800       OS << "<barrier invalid #" << getBarrier() << ">";
   1801     break;
   1802   }
   1803   case k_Immediate:
   1804     OS << *getImm();
   1805     break;
   1806   case k_ShiftedImm: {
   1807     unsigned Shift = getShiftedImmShift();
   1808     OS << "<shiftedimm ";
   1809     OS << *getShiftedImmVal();
   1810     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
   1811     break;
   1812   }
   1813   case k_CondCode:
   1814     OS << "<condcode " << getCondCode() << ">";
   1815     break;
   1816   case k_Register:
   1817     OS << "<register " << getReg() << ">";
   1818     break;
   1819   case k_VectorList: {
   1820     OS << "<vectorlist ";
   1821     unsigned Reg = getVectorListStart();
   1822     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
   1823       OS << Reg + i << " ";
   1824     OS << ">";
   1825     break;
   1826   }
   1827   case k_VectorIndex:
   1828     OS << "<vectorindex " << getVectorIndex() << ">";
   1829     break;
   1830   case k_SysReg:
   1831     OS << "<sysreg: " << getSysReg() << '>';
   1832     break;
   1833   case k_Token:
   1834     OS << "'" << getToken() << "'";
   1835     break;
   1836   case k_SysCR:
   1837     OS << "c" << getSysCR();
   1838     break;
   1839   case k_Prefetch: {
   1840     StringRef Name = getPrefetchName();
   1841     if (!Name.empty())
   1842       OS << "<prfop " << Name << ">";
   1843     else
   1844       OS << "<prfop invalid #" << getPrefetch() << ">";
   1845     break;
   1846   }
   1847   case k_PSBHint: {
   1848     OS << getPSBHintName();
   1849     break;
   1850   }
   1851   case k_ShiftExtend: {
   1852     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
   1853        << getShiftExtendAmount();
   1854     if (!hasShiftExtendAmount())
   1855       OS << "<imp>";
   1856     OS << '>';
   1857     break;
   1858   }
   1859   }
   1860 }
   1861 
   1862 /// @name Auto-generated Match Functions
   1863 /// {
   1864 
   1865 static unsigned MatchRegisterName(StringRef Name);
   1866 
   1867 /// }
   1868 
   1869 static unsigned matchVectorRegName(StringRef Name) {
   1870   return StringSwitch<unsigned>(Name.lower())
   1871       .Case("v0", AArch64::Q0)
   1872       .Case("v1", AArch64::Q1)
   1873       .Case("v2", AArch64::Q2)
   1874       .Case("v3", AArch64::Q3)
   1875       .Case("v4", AArch64::Q4)
   1876       .Case("v5", AArch64::Q5)
   1877       .Case("v6", AArch64::Q6)
   1878       .Case("v7", AArch64::Q7)
   1879       .Case("v8", AArch64::Q8)
   1880       .Case("v9", AArch64::Q9)
   1881       .Case("v10", AArch64::Q10)
   1882       .Case("v11", AArch64::Q11)
   1883       .Case("v12", AArch64::Q12)
   1884       .Case("v13", AArch64::Q13)
   1885       .Case("v14", AArch64::Q14)
   1886       .Case("v15", AArch64::Q15)
   1887       .Case("v16", AArch64::Q16)
   1888       .Case("v17", AArch64::Q17)
   1889       .Case("v18", AArch64::Q18)
   1890       .Case("v19", AArch64::Q19)
   1891       .Case("v20", AArch64::Q20)
   1892       .Case("v21", AArch64::Q21)
   1893       .Case("v22", AArch64::Q22)
   1894       .Case("v23", AArch64::Q23)
   1895       .Case("v24", AArch64::Q24)
   1896       .Case("v25", AArch64::Q25)
   1897       .Case("v26", AArch64::Q26)
   1898       .Case("v27", AArch64::Q27)
   1899       .Case("v28", AArch64::Q28)
   1900       .Case("v29", AArch64::Q29)
   1901       .Case("v30", AArch64::Q30)
   1902       .Case("v31", AArch64::Q31)
   1903       .Default(0);
   1904 }
   1905 
   1906 static bool isValidVectorKind(StringRef Name) {
   1907   return StringSwitch<bool>(Name.lower())
   1908       .Case(".8b", true)
   1909       .Case(".16b", true)
   1910       .Case(".4h", true)
   1911       .Case(".8h", true)
   1912       .Case(".2s", true)
   1913       .Case(".4s", true)
   1914       .Case(".1d", true)
   1915       .Case(".2d", true)
   1916       .Case(".1q", true)
   1917       // Accept the width neutral ones, too, for verbose syntax. If those
   1918       // aren't used in the right places, the token operand won't match so
   1919       // all will work out.
   1920       .Case(".b", true)
   1921       .Case(".h", true)
   1922       .Case(".s", true)
   1923       .Case(".d", true)
   1924       // Needed for fp16 scalar pairwise reductions
   1925       .Case(".2h", true)
   1926       .Default(false);
   1927 }
   1928 
   1929 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
   1930                                  char &ElementKind) {
   1931   assert(isValidVectorKind(Name));
   1932 
   1933   ElementKind = Name.lower()[Name.size() - 1];
   1934   NumElements = 0;
   1935 
   1936   if (Name.size() == 2)
   1937     return;
   1938 
   1939   // Parse the lane count
   1940   Name = Name.drop_front();
   1941   while (isdigit(Name.front())) {
   1942     NumElements = 10 * NumElements + (Name.front() - '0');
   1943     Name = Name.drop_front();
   1944   }
   1945 }
   1946 
   1947 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
   1948                                      SMLoc &EndLoc) {
   1949   StartLoc = getLoc();
   1950   RegNo = tryParseRegister();
   1951   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   1952   return (RegNo == (unsigned)-1);
   1953 }
   1954 
   1955 // Matches a register name or register alias previously defined by '.req'
   1956 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
   1957                                                   bool isVector) {
   1958   unsigned RegNum = isVector ? matchVectorRegName(Name)
   1959                              : MatchRegisterName(Name);
   1960 
   1961   if (RegNum == 0) {
   1962     // Check for aliases registered via .req. Canonicalize to lower case.
   1963     // That's more consistent since register names are case insensitive, and
   1964     // it's how the original entry was passed in from MC/MCParser/AsmParser.
   1965     auto Entry = RegisterReqs.find(Name.lower());
   1966     if (Entry == RegisterReqs.end())
   1967       return 0;
   1968     // set RegNum if the match is the right kind of register
   1969     if (isVector == Entry->getValue().first)
   1970       RegNum = Entry->getValue().second;
   1971   }
   1972   return RegNum;
   1973 }
   1974 
   1975 /// tryParseRegister - Try to parse a register name. The token must be an
   1976 /// Identifier when called, and if it is a register name the token is eaten and
   1977 /// the register is added to the operand list.
   1978 int AArch64AsmParser::tryParseRegister() {
   1979   MCAsmParser &Parser = getParser();
   1980   const AsmToken &Tok = Parser.getTok();
   1981   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
   1982 
   1983   std::string lowerCase = Tok.getString().lower();
   1984   unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
   1985   // Also handle a few aliases of registers.
   1986   if (RegNum == 0)
   1987     RegNum = StringSwitch<unsigned>(lowerCase)
   1988                  .Case("fp",  AArch64::FP)
   1989                  .Case("lr",  AArch64::LR)
   1990                  .Case("x31", AArch64::XZR)
   1991                  .Case("w31", AArch64::WZR)
   1992                  .Default(0);
   1993 
   1994   if (RegNum == 0)
   1995     return -1;
   1996 
   1997   Parser.Lex(); // Eat identifier token.
   1998   return RegNum;
   1999 }
   2000 
   2001 /// tryMatchVectorRegister - Try to parse a vector register name with optional
   2002 /// kind specifier. If it is a register specifier, eat the token and return it.
   2003 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
   2004   MCAsmParser &Parser = getParser();
   2005   if (Parser.getTok().isNot(AsmToken::Identifier)) {
   2006     TokError("vector register expected");
   2007     return -1;
   2008   }
   2009 
   2010   StringRef Name = Parser.getTok().getString();
   2011   // If there is a kind specifier, it's separated from the register name by
   2012   // a '.'.
   2013   size_t Start = 0, Next = Name.find('.');
   2014   StringRef Head = Name.slice(Start, Next);
   2015   unsigned RegNum = matchRegisterNameAlias(Head, true);
   2016 
   2017   if (RegNum) {
   2018     if (Next != StringRef::npos) {
   2019       Kind = Name.slice(Next, StringRef::npos);
   2020       if (!isValidVectorKind(Kind)) {
   2021         TokError("invalid vector kind qualifier");
   2022         return -1;
   2023       }
   2024     }
   2025     Parser.Lex(); // Eat the register token.
   2026     return RegNum;
   2027   }
   2028 
   2029   if (expected)
   2030     TokError("vector register expected");
   2031   return -1;
   2032 }
   2033 
   2034 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
   2035 AArch64AsmParser::OperandMatchResultTy
   2036 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
   2037   MCAsmParser &Parser = getParser();
   2038   SMLoc S = getLoc();
   2039 
   2040   if (Parser.getTok().isNot(AsmToken::Identifier)) {
   2041     Error(S, "Expected cN operand where 0 <= N <= 15");
   2042     return MatchOperand_ParseFail;
   2043   }
   2044 
   2045   StringRef Tok = Parser.getTok().getIdentifier();
   2046   if (Tok[0] != 'c' && Tok[0] != 'C') {
   2047     Error(S, "Expected cN operand where 0 <= N <= 15");
   2048     return MatchOperand_ParseFail;
   2049   }
   2050 
   2051   uint32_t CRNum;
   2052   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
   2053   if (BadNum || CRNum > 15) {
   2054     Error(S, "Expected cN operand where 0 <= N <= 15");
   2055     return MatchOperand_ParseFail;
   2056   }
   2057 
   2058   Parser.Lex(); // Eat identifier token.
   2059   Operands.push_back(
   2060       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
   2061   return MatchOperand_Success;
   2062 }
   2063 
   2064 /// tryParsePrefetch - Try to parse a prefetch operand.
   2065 AArch64AsmParser::OperandMatchResultTy
   2066 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
   2067   MCAsmParser &Parser = getParser();
   2068   SMLoc S = getLoc();
   2069   const AsmToken &Tok = Parser.getTok();
   2070   // Either an identifier for named values or a 5-bit immediate.
   2071   bool Hash = Tok.is(AsmToken::Hash);
   2072   if (Hash || Tok.is(AsmToken::Integer)) {
   2073     if (Hash)
   2074       Parser.Lex(); // Eat hash token.
   2075     const MCExpr *ImmVal;
   2076     if (getParser().parseExpression(ImmVal))
   2077       return MatchOperand_ParseFail;
   2078 
   2079     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2080     if (!MCE) {
   2081       TokError("immediate value expected for prefetch operand");
   2082       return MatchOperand_ParseFail;
   2083     }
   2084     unsigned prfop = MCE->getValue();
   2085     if (prfop > 31) {
   2086       TokError("prefetch operand out of range, [0,31] expected");
   2087       return MatchOperand_ParseFail;
   2088     }
   2089 
   2090     bool Valid;
   2091     auto Mapper = AArch64PRFM::PRFMMapper();
   2092     StringRef Name =
   2093         Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
   2094     Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
   2095                                                       S, getContext()));
   2096     return MatchOperand_Success;
   2097   }
   2098 
   2099   if (Tok.isNot(AsmToken::Identifier)) {
   2100     TokError("pre-fetch hint expected");
   2101     return MatchOperand_ParseFail;
   2102   }
   2103 
   2104   bool Valid;
   2105   auto Mapper = AArch64PRFM::PRFMMapper();
   2106   unsigned prfop =
   2107       Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
   2108   if (!Valid) {
   2109     TokError("pre-fetch hint expected");
   2110     return MatchOperand_ParseFail;
   2111   }
   2112 
   2113   Parser.Lex(); // Eat identifier token.
   2114   Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
   2115                                                     S, getContext()));
   2116   return MatchOperand_Success;
   2117 }
   2118 
   2119 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
   2120 AArch64AsmParser::OperandMatchResultTy
   2121 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
   2122   MCAsmParser &Parser = getParser();
   2123   SMLoc S = getLoc();
   2124   const AsmToken &Tok = Parser.getTok();
   2125   if (Tok.isNot(AsmToken::Identifier)) {
   2126     TokError("invalid operand for instruction");
   2127     return MatchOperand_ParseFail;
   2128   }
   2129 
   2130   bool Valid;
   2131   auto Mapper = AArch64PSBHint::PSBHintMapper();
   2132   unsigned psbhint =
   2133       Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
   2134   if (!Valid) {
   2135     TokError("invalid operand for instruction");
   2136     return MatchOperand_ParseFail;
   2137   }
   2138 
   2139   Parser.Lex(); // Eat identifier token.
   2140   Operands.push_back(AArch64Operand::CreatePSBHint(psbhint, Tok.getString(),
   2141                                                    S, getContext()));
   2142   return MatchOperand_Success;
   2143 }
   2144 
   2145 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
   2146 /// instruction.
   2147 AArch64AsmParser::OperandMatchResultTy
   2148 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
   2149   MCAsmParser &Parser = getParser();
   2150   SMLoc S = getLoc();
   2151   const MCExpr *Expr;
   2152 
   2153   if (Parser.getTok().is(AsmToken::Hash)) {
   2154     Parser.Lex(); // Eat hash token.
   2155   }
   2156 
   2157   if (parseSymbolicImmVal(Expr))
   2158     return MatchOperand_ParseFail;
   2159 
   2160   AArch64MCExpr::VariantKind ELFRefKind;
   2161   MCSymbolRefExpr::VariantKind DarwinRefKind;
   2162   int64_t Addend;
   2163   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
   2164     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
   2165         ELFRefKind == AArch64MCExpr::VK_INVALID) {
   2166       // No modifier was specified at all; this is the syntax for an ELF basic
   2167       // ADRP relocation (unfortunately).
   2168       Expr =
   2169           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
   2170     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
   2171                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
   2172                Addend != 0) {
   2173       Error(S, "gotpage label reference not allowed an addend");
   2174       return MatchOperand_ParseFail;
   2175     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
   2176                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
   2177                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
   2178                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
   2179                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
   2180                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
   2181       // The operand must be an @page or @gotpage qualified symbolref.
   2182       Error(S, "page or gotpage label reference expected");
   2183       return MatchOperand_ParseFail;
   2184     }
   2185   }
   2186 
   2187   // We have either a label reference possibly with addend or an immediate. The
   2188   // addend is a raw value here. The linker will adjust it to only reference the
   2189   // page.
   2190   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   2191   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
   2192 
   2193   return MatchOperand_Success;
   2194 }
   2195 
   2196 /// tryParseAdrLabel - Parse and validate a source label for the ADR
   2197 /// instruction.
   2198 AArch64AsmParser::OperandMatchResultTy
   2199 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
   2200   MCAsmParser &Parser = getParser();
   2201   SMLoc S = getLoc();
   2202   const MCExpr *Expr;
   2203 
   2204   if (Parser.getTok().is(AsmToken::Hash)) {
   2205     Parser.Lex(); // Eat hash token.
   2206   }
   2207 
   2208   if (getParser().parseExpression(Expr))
   2209     return MatchOperand_ParseFail;
   2210 
   2211   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   2212   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
   2213 
   2214   return MatchOperand_Success;
   2215 }
   2216 
   2217 /// tryParseFPImm - A floating point immediate expression operand.
   2218 AArch64AsmParser::OperandMatchResultTy
   2219 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
   2220   MCAsmParser &Parser = getParser();
   2221   SMLoc S = getLoc();
   2222 
   2223   bool Hash = false;
   2224   if (Parser.getTok().is(AsmToken::Hash)) {
   2225     Parser.Lex(); // Eat '#'
   2226     Hash = true;
   2227   }
   2228 
   2229   // Handle negation, as that still comes through as a separate token.
   2230   bool isNegative = false;
   2231   if (Parser.getTok().is(AsmToken::Minus)) {
   2232     isNegative = true;
   2233     Parser.Lex();
   2234   }
   2235   const AsmToken &Tok = Parser.getTok();
   2236   if (Tok.is(AsmToken::Real)) {
   2237     APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
   2238     if (isNegative)
   2239       RealVal.changeSign();
   2240 
   2241     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
   2242     int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
   2243     Parser.Lex(); // Eat the token.
   2244     // Check for out of range values. As an exception, we let Zero through,
   2245     // as we handle that special case in post-processing before matching in
   2246     // order to use the zero register for it.
   2247     if (Val == -1 && !RealVal.isPosZero()) {
   2248       TokError("expected compatible register or floating-point constant");
   2249       return MatchOperand_ParseFail;
   2250     }
   2251     Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
   2252     return MatchOperand_Success;
   2253   }
   2254   if (Tok.is(AsmToken::Integer)) {
   2255     int64_t Val;
   2256     if (!isNegative && Tok.getString().startswith("0x")) {
   2257       Val = Tok.getIntVal();
   2258       if (Val > 255 || Val < 0) {
   2259         TokError("encoded floating point value out of range");
   2260         return MatchOperand_ParseFail;
   2261       }
   2262     } else {
   2263       APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
   2264       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
   2265       // If we had a '-' in front, toggle the sign bit.
   2266       IntVal ^= (uint64_t)isNegative << 63;
   2267       Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
   2268     }
   2269     Parser.Lex(); // Eat the token.
   2270     Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
   2271     return MatchOperand_Success;
   2272   }
   2273 
   2274   if (!Hash)
   2275     return MatchOperand_NoMatch;
   2276 
   2277   TokError("invalid floating point immediate");
   2278   return MatchOperand_ParseFail;
   2279 }
   2280 
   2281 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
   2282 AArch64AsmParser::OperandMatchResultTy
   2283 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
   2284   MCAsmParser &Parser = getParser();
   2285   SMLoc S = getLoc();
   2286 
   2287   if (Parser.getTok().is(AsmToken::Hash))
   2288     Parser.Lex(); // Eat '#'
   2289   else if (Parser.getTok().isNot(AsmToken::Integer))
   2290     // Operand should start from # or should be integer, emit error otherwise.
   2291     return MatchOperand_NoMatch;
   2292 
   2293   const MCExpr *Imm;
   2294   if (parseSymbolicImmVal(Imm))
   2295     return MatchOperand_ParseFail;
   2296   else if (Parser.getTok().isNot(AsmToken::Comma)) {
   2297     uint64_t ShiftAmount = 0;
   2298     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
   2299     if (MCE) {
   2300       int64_t Val = MCE->getValue();
   2301       if (Val > 0xfff && (Val & 0xfff) == 0) {
   2302         Imm = MCConstantExpr::create(Val >> 12, getContext());
   2303         ShiftAmount = 12;
   2304       }
   2305     }
   2306     SMLoc E = Parser.getTok().getLoc();
   2307     Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
   2308                                                         getContext()));
   2309     return MatchOperand_Success;
   2310   }
   2311 
   2312   // Eat ','
   2313   Parser.Lex();
   2314 
   2315   // The optional operand must be "lsl #N" where N is non-negative.
   2316   if (!Parser.getTok().is(AsmToken::Identifier) ||
   2317       !Parser.getTok().getIdentifier().equals_lower("lsl")) {
   2318     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
   2319     return MatchOperand_ParseFail;
   2320   }
   2321 
   2322   // Eat 'lsl'
   2323   Parser.Lex();
   2324 
   2325   if (Parser.getTok().is(AsmToken::Hash)) {
   2326     Parser.Lex();
   2327   }
   2328 
   2329   if (Parser.getTok().isNot(AsmToken::Integer)) {
   2330     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
   2331     return MatchOperand_ParseFail;
   2332   }
   2333 
   2334   int64_t ShiftAmount = Parser.getTok().getIntVal();
   2335 
   2336   if (ShiftAmount < 0) {
   2337     Error(Parser.getTok().getLoc(), "positive shift amount required");
   2338     return MatchOperand_ParseFail;
   2339   }
   2340   Parser.Lex(); // Eat the number
   2341 
   2342   SMLoc E = Parser.getTok().getLoc();
   2343   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
   2344                                                       S, E, getContext()));
   2345   return MatchOperand_Success;
   2346 }
   2347 
   2348 /// parseCondCodeString - Parse a Condition Code string.
   2349 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
   2350   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
   2351                     .Case("eq", AArch64CC::EQ)
   2352                     .Case("ne", AArch64CC::NE)
   2353                     .Case("cs", AArch64CC::HS)
   2354                     .Case("hs", AArch64CC::HS)
   2355                     .Case("cc", AArch64CC::LO)
   2356                     .Case("lo", AArch64CC::LO)
   2357                     .Case("mi", AArch64CC::MI)
   2358                     .Case("pl", AArch64CC::PL)
   2359                     .Case("vs", AArch64CC::VS)
   2360                     .Case("vc", AArch64CC::VC)
   2361                     .Case("hi", AArch64CC::HI)
   2362                     .Case("ls", AArch64CC::LS)
   2363                     .Case("ge", AArch64CC::GE)
   2364                     .Case("lt", AArch64CC::LT)
   2365                     .Case("gt", AArch64CC::GT)
   2366                     .Case("le", AArch64CC::LE)
   2367                     .Case("al", AArch64CC::AL)
   2368                     .Case("nv", AArch64CC::NV)
   2369                     .Default(AArch64CC::Invalid);
   2370   return CC;
   2371 }
   2372 
   2373 /// parseCondCode - Parse a Condition Code operand.
   2374 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
   2375                                      bool invertCondCode) {
   2376   MCAsmParser &Parser = getParser();
   2377   SMLoc S = getLoc();
   2378   const AsmToken &Tok = Parser.getTok();
   2379   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
   2380 
   2381   StringRef Cond = Tok.getString();
   2382   AArch64CC::CondCode CC = parseCondCodeString(Cond);
   2383   if (CC == AArch64CC::Invalid)
   2384     return TokError("invalid condition code");
   2385   Parser.Lex(); // Eat identifier token.
   2386 
   2387   if (invertCondCode) {
   2388     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
   2389       return TokError("condition codes AL and NV are invalid for this instruction");
   2390     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
   2391   }
   2392 
   2393   Operands.push_back(
   2394       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
   2395   return false;
   2396 }
   2397 
   2398 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
   2399 /// them if present.
   2400 AArch64AsmParser::OperandMatchResultTy
   2401 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
   2402   MCAsmParser &Parser = getParser();
   2403   const AsmToken &Tok = Parser.getTok();
   2404   std::string LowerID = Tok.getString().lower();
   2405   AArch64_AM::ShiftExtendType ShOp =
   2406       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
   2407           .Case("lsl", AArch64_AM::LSL)
   2408           .Case("lsr", AArch64_AM::LSR)
   2409           .Case("asr", AArch64_AM::ASR)
   2410           .Case("ror", AArch64_AM::ROR)
   2411           .Case("msl", AArch64_AM::MSL)
   2412           .Case("uxtb", AArch64_AM::UXTB)
   2413           .Case("uxth", AArch64_AM::UXTH)
   2414           .Case("uxtw", AArch64_AM::UXTW)
   2415           .Case("uxtx", AArch64_AM::UXTX)
   2416           .Case("sxtb", AArch64_AM::SXTB)
   2417           .Case("sxth", AArch64_AM::SXTH)
   2418           .Case("sxtw", AArch64_AM::SXTW)
   2419           .Case("sxtx", AArch64_AM::SXTX)
   2420           .Default(AArch64_AM::InvalidShiftExtend);
   2421 
   2422   if (ShOp == AArch64_AM::InvalidShiftExtend)
   2423     return MatchOperand_NoMatch;
   2424 
   2425   SMLoc S = Tok.getLoc();
   2426   Parser.Lex();
   2427 
   2428   bool Hash = getLexer().is(AsmToken::Hash);
   2429   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
   2430     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
   2431         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
   2432         ShOp == AArch64_AM::MSL) {
   2433       // We expect a number here.
   2434       TokError("expected #imm after shift specifier");
   2435       return MatchOperand_ParseFail;
   2436     }
   2437 
   2438     // "extend" type operatoins don't need an immediate, #0 is implicit.
   2439     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   2440     Operands.push_back(
   2441         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
   2442     return MatchOperand_Success;
   2443   }
   2444 
   2445   if (Hash)
   2446     Parser.Lex(); // Eat the '#'.
   2447 
   2448   // Make sure we do actually have a number or a parenthesized expression.
   2449   SMLoc E = Parser.getTok().getLoc();
   2450   if (!Parser.getTok().is(AsmToken::Integer) &&
   2451       !Parser.getTok().is(AsmToken::LParen)) {
   2452     Error(E, "expected integer shift amount");
   2453     return MatchOperand_ParseFail;
   2454   }
   2455 
   2456   const MCExpr *ImmVal;
   2457   if (getParser().parseExpression(ImmVal))
   2458     return MatchOperand_ParseFail;
   2459 
   2460   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2461   if (!MCE) {
   2462     Error(E, "expected constant '#imm' after shift specifier");
   2463     return MatchOperand_ParseFail;
   2464   }
   2465 
   2466   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   2467   Operands.push_back(AArch64Operand::CreateShiftExtend(
   2468       ShOp, MCE->getValue(), true, S, E, getContext()));
   2469   return MatchOperand_Success;
   2470 }
   2471 
   2472 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
   2473 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
   2474 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
   2475                                    OperandVector &Operands) {
   2476   if (Name.find('.') != StringRef::npos)
   2477     return TokError("invalid operand");
   2478 
   2479   Mnemonic = Name;
   2480   Operands.push_back(
   2481       AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
   2482 
   2483   MCAsmParser &Parser = getParser();
   2484   const AsmToken &Tok = Parser.getTok();
   2485   StringRef Op = Tok.getString();
   2486   SMLoc S = Tok.getLoc();
   2487 
   2488   const MCExpr *Expr = nullptr;
   2489 
   2490 #define SYS_ALIAS(op1, Cn, Cm, op2)                                            \
   2491   do {                                                                         \
   2492     Expr = MCConstantExpr::create(op1, getContext());                          \
   2493     Operands.push_back(                                                        \
   2494         AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
   2495     Operands.push_back(                                                        \
   2496         AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));           \
   2497     Operands.push_back(                                                        \
   2498         AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));           \
   2499     Expr = MCConstantExpr::create(op2, getContext());                          \
   2500     Operands.push_back(                                                        \
   2501         AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
   2502   } while (0)
   2503 
   2504   if (Mnemonic == "ic") {
   2505     if (!Op.compare_lower("ialluis")) {
   2506       // SYS #0, C7, C1, #0
   2507       SYS_ALIAS(0, 7, 1, 0);
   2508     } else if (!Op.compare_lower("iallu")) {
   2509       // SYS #0, C7, C5, #0
   2510       SYS_ALIAS(0, 7, 5, 0);
   2511     } else if (!Op.compare_lower("ivau")) {
   2512       // SYS #3, C7, C5, #1
   2513       SYS_ALIAS(3, 7, 5, 1);
   2514     } else {
   2515       return TokError("invalid operand for IC instruction");
   2516     }
   2517   } else if (Mnemonic == "dc") {
   2518     if (!Op.compare_lower("zva")) {
   2519       // SYS #3, C7, C4, #1
   2520       SYS_ALIAS(3, 7, 4, 1);
   2521     } else if (!Op.compare_lower("ivac")) {
   2522       // SYS #3, C7, C6, #1
   2523       SYS_ALIAS(0, 7, 6, 1);
   2524     } else if (!Op.compare_lower("isw")) {
   2525       // SYS #0, C7, C6, #2
   2526       SYS_ALIAS(0, 7, 6, 2);
   2527     } else if (!Op.compare_lower("cvac")) {
   2528       // SYS #3, C7, C10, #1
   2529       SYS_ALIAS(3, 7, 10, 1);
   2530     } else if (!Op.compare_lower("csw")) {
   2531       // SYS #0, C7, C10, #2
   2532       SYS_ALIAS(0, 7, 10, 2);
   2533     } else if (!Op.compare_lower("cvau")) {
   2534       // SYS #3, C7, C11, #1
   2535       SYS_ALIAS(3, 7, 11, 1);
   2536     } else if (!Op.compare_lower("civac")) {
   2537       // SYS #3, C7, C14, #1
   2538       SYS_ALIAS(3, 7, 14, 1);
   2539     } else if (!Op.compare_lower("cisw")) {
   2540       // SYS #0, C7, C14, #2
   2541       SYS_ALIAS(0, 7, 14, 2);
   2542     } else if (!Op.compare_lower("cvap")) {
   2543       if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
   2544         // SYS #3, C7, C12, #1
   2545         SYS_ALIAS(3, 7, 12, 1);
   2546       } else {
   2547         return TokError("DC CVAP requires ARMv8.2a");
   2548       }
   2549     } else {
   2550       return TokError("invalid operand for DC instruction");
   2551     }
   2552   } else if (Mnemonic == "at") {
   2553     if (!Op.compare_lower("s1e1r")) {
   2554       // SYS #0, C7, C8, #0
   2555       SYS_ALIAS(0, 7, 8, 0);
   2556     } else if (!Op.compare_lower("s1e2r")) {
   2557       // SYS #4, C7, C8, #0
   2558       SYS_ALIAS(4, 7, 8, 0);
   2559     } else if (!Op.compare_lower("s1e3r")) {
   2560       // SYS #6, C7, C8, #0
   2561       SYS_ALIAS(6, 7, 8, 0);
   2562     } else if (!Op.compare_lower("s1e1w")) {
   2563       // SYS #0, C7, C8, #1
   2564       SYS_ALIAS(0, 7, 8, 1);
   2565     } else if (!Op.compare_lower("s1e2w")) {
   2566       // SYS #4, C7, C8, #1
   2567       SYS_ALIAS(4, 7, 8, 1);
   2568     } else if (!Op.compare_lower("s1e3w")) {
   2569       // SYS #6, C7, C8, #1
   2570       SYS_ALIAS(6, 7, 8, 1);
   2571     } else if (!Op.compare_lower("s1e0r")) {
   2572       // SYS #0, C7, C8, #3
   2573       SYS_ALIAS(0, 7, 8, 2);
   2574     } else if (!Op.compare_lower("s1e0w")) {
   2575       // SYS #0, C7, C8, #3
   2576       SYS_ALIAS(0, 7, 8, 3);
   2577     } else if (!Op.compare_lower("s12e1r")) {
   2578       // SYS #4, C7, C8, #4
   2579       SYS_ALIAS(4, 7, 8, 4);
   2580     } else if (!Op.compare_lower("s12e1w")) {
   2581       // SYS #4, C7, C8, #5
   2582       SYS_ALIAS(4, 7, 8, 5);
   2583     } else if (!Op.compare_lower("s12e0r")) {
   2584       // SYS #4, C7, C8, #6
   2585       SYS_ALIAS(4, 7, 8, 6);
   2586     } else if (!Op.compare_lower("s12e0w")) {
   2587       // SYS #4, C7, C8, #7
   2588       SYS_ALIAS(4, 7, 8, 7);
   2589     } else if (!Op.compare_lower("s1e1rp")) {
   2590       if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
   2591         // SYS #0, C7, C9, #0
   2592         SYS_ALIAS(0, 7, 9, 0);
   2593       } else {
   2594         return TokError("AT S1E1RP requires ARMv8.2a");
   2595       }
   2596     } else if (!Op.compare_lower("s1e1wp")) {
   2597       if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
   2598         // SYS #0, C7, C9, #1
   2599         SYS_ALIAS(0, 7, 9, 1);
   2600       } else {
   2601         return TokError("AT S1E1WP requires ARMv8.2a");
   2602       }
   2603     } else {
   2604       return TokError("invalid operand for AT instruction");
   2605     }
   2606   } else if (Mnemonic == "tlbi") {
   2607     if (!Op.compare_lower("vmalle1is")) {
   2608       // SYS #0, C8, C3, #0
   2609       SYS_ALIAS(0, 8, 3, 0);
   2610     } else if (!Op.compare_lower("alle2is")) {
   2611       // SYS #4, C8, C3, #0
   2612       SYS_ALIAS(4, 8, 3, 0);
   2613     } else if (!Op.compare_lower("alle3is")) {
   2614       // SYS #6, C8, C3, #0
   2615       SYS_ALIAS(6, 8, 3, 0);
   2616     } else if (!Op.compare_lower("vae1is")) {
   2617       // SYS #0, C8, C3, #1
   2618       SYS_ALIAS(0, 8, 3, 1);
   2619     } else if (!Op.compare_lower("vae2is")) {
   2620       // SYS #4, C8, C3, #1
   2621       SYS_ALIAS(4, 8, 3, 1);
   2622     } else if (!Op.compare_lower("vae3is")) {
   2623       // SYS #6, C8, C3, #1
   2624       SYS_ALIAS(6, 8, 3, 1);
   2625     } else if (!Op.compare_lower("aside1is")) {
   2626       // SYS #0, C8, C3, #2
   2627       SYS_ALIAS(0, 8, 3, 2);
   2628     } else if (!Op.compare_lower("vaae1is")) {
   2629       // SYS #0, C8, C3, #3
   2630       SYS_ALIAS(0, 8, 3, 3);
   2631     } else if (!Op.compare_lower("alle1is")) {
   2632       // SYS #4, C8, C3, #4
   2633       SYS_ALIAS(4, 8, 3, 4);
   2634     } else if (!Op.compare_lower("vale1is")) {
   2635       // SYS #0, C8, C3, #5
   2636       SYS_ALIAS(0, 8, 3, 5);
   2637     } else if (!Op.compare_lower("vaale1is")) {
   2638       // SYS #0, C8, C3, #7
   2639       SYS_ALIAS(0, 8, 3, 7);
   2640     } else if (!Op.compare_lower("vmalle1")) {
   2641       // SYS #0, C8, C7, #0
   2642       SYS_ALIAS(0, 8, 7, 0);
   2643     } else if (!Op.compare_lower("alle2")) {
   2644       // SYS #4, C8, C7, #0
   2645       SYS_ALIAS(4, 8, 7, 0);
   2646     } else if (!Op.compare_lower("vale2is")) {
   2647       // SYS #4, C8, C3, #5
   2648       SYS_ALIAS(4, 8, 3, 5);
   2649     } else if (!Op.compare_lower("vale3is")) {
   2650       // SYS #6, C8, C3, #5
   2651       SYS_ALIAS(6, 8, 3, 5);
   2652     } else if (!Op.compare_lower("alle3")) {
   2653       // SYS #6, C8, C7, #0
   2654       SYS_ALIAS(6, 8, 7, 0);
   2655     } else if (!Op.compare_lower("vae1")) {
   2656       // SYS #0, C8, C7, #1
   2657       SYS_ALIAS(0, 8, 7, 1);
   2658     } else if (!Op.compare_lower("vae2")) {
   2659       // SYS #4, C8, C7, #1
   2660       SYS_ALIAS(4, 8, 7, 1);
   2661     } else if (!Op.compare_lower("vae3")) {
   2662       // SYS #6, C8, C7, #1
   2663       SYS_ALIAS(6, 8, 7, 1);
   2664     } else if (!Op.compare_lower("aside1")) {
   2665       // SYS #0, C8, C7, #2
   2666       SYS_ALIAS(0, 8, 7, 2);
   2667     } else if (!Op.compare_lower("vaae1")) {
   2668       // SYS #0, C8, C7, #3
   2669       SYS_ALIAS(0, 8, 7, 3);
   2670     } else if (!Op.compare_lower("alle1")) {
   2671       // SYS #4, C8, C7, #4
   2672       SYS_ALIAS(4, 8, 7, 4);
   2673     } else if (!Op.compare_lower("vale1")) {
   2674       // SYS #0, C8, C7, #5
   2675       SYS_ALIAS(0, 8, 7, 5);
   2676     } else if (!Op.compare_lower("vale2")) {
   2677       // SYS #4, C8, C7, #5
   2678       SYS_ALIAS(4, 8, 7, 5);
   2679     } else if (!Op.compare_lower("vale3")) {
   2680       // SYS #6, C8, C7, #5
   2681       SYS_ALIAS(6, 8, 7, 5);
   2682     } else if (!Op.compare_lower("vaale1")) {
   2683       // SYS #0, C8, C7, #7
   2684       SYS_ALIAS(0, 8, 7, 7);
   2685     } else if (!Op.compare_lower("ipas2e1")) {
   2686       // SYS #4, C8, C4, #1
   2687       SYS_ALIAS(4, 8, 4, 1);
   2688     } else if (!Op.compare_lower("ipas2le1")) {
   2689       // SYS #4, C8, C4, #5
   2690       SYS_ALIAS(4, 8, 4, 5);
   2691     } else if (!Op.compare_lower("ipas2e1is")) {
   2692       // SYS #4, C8, C4, #1
   2693       SYS_ALIAS(4, 8, 0, 1);
   2694     } else if (!Op.compare_lower("ipas2le1is")) {
   2695       // SYS #4, C8, C4, #5
   2696       SYS_ALIAS(4, 8, 0, 5);
   2697     } else if (!Op.compare_lower("vmalls12e1")) {
   2698       // SYS #4, C8, C7, #6
   2699       SYS_ALIAS(4, 8, 7, 6);
   2700     } else if (!Op.compare_lower("vmalls12e1is")) {
   2701       // SYS #4, C8, C3, #6
   2702       SYS_ALIAS(4, 8, 3, 6);
   2703     } else {
   2704       return TokError("invalid operand for TLBI instruction");
   2705     }
   2706   }
   2707 
   2708 #undef SYS_ALIAS
   2709 
   2710   Parser.Lex(); // Eat operand.
   2711 
   2712   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
   2713   bool HasRegister = false;
   2714 
   2715   // Check for the optional register operand.
   2716   if (getLexer().is(AsmToken::Comma)) {
   2717     Parser.Lex(); // Eat comma.
   2718 
   2719     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
   2720       return TokError("expected register operand");
   2721 
   2722     HasRegister = true;
   2723   }
   2724 
   2725   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   2726     Parser.eatToEndOfStatement();
   2727     return TokError("unexpected token in argument list");
   2728   }
   2729 
   2730   if (ExpectRegister && !HasRegister) {
   2731     return TokError("specified " + Mnemonic + " op requires a register");
   2732   }
   2733   else if (!ExpectRegister && HasRegister) {
   2734     return TokError("specified " + Mnemonic + " op does not use a register");
   2735   }
   2736 
   2737   Parser.Lex(); // Consume the EndOfStatement
   2738   return false;
   2739 }
   2740 
   2741 AArch64AsmParser::OperandMatchResultTy
   2742 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
   2743   MCAsmParser &Parser = getParser();
   2744   const AsmToken &Tok = Parser.getTok();
   2745 
   2746   // Can be either a #imm style literal or an option name
   2747   bool Hash = Tok.is(AsmToken::Hash);
   2748   if (Hash || Tok.is(AsmToken::Integer)) {
   2749     // Immediate operand.
   2750     if (Hash)
   2751       Parser.Lex(); // Eat the '#'
   2752     const MCExpr *ImmVal;
   2753     SMLoc ExprLoc = getLoc();
   2754     if (getParser().parseExpression(ImmVal))
   2755       return MatchOperand_ParseFail;
   2756     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2757     if (!MCE) {
   2758       Error(ExprLoc, "immediate value expected for barrier operand");
   2759       return MatchOperand_ParseFail;
   2760     }
   2761     if (MCE->getValue() < 0 || MCE->getValue() > 15) {
   2762       Error(ExprLoc, "barrier operand out of range");
   2763       return MatchOperand_ParseFail;
   2764     }
   2765     bool Valid;
   2766     auto Mapper = AArch64DB::DBarrierMapper();
   2767     StringRef Name =
   2768         Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
   2769     Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
   2770                                                       ExprLoc, getContext()));
   2771     return MatchOperand_Success;
   2772   }
   2773 
   2774   if (Tok.isNot(AsmToken::Identifier)) {
   2775     TokError("invalid operand for instruction");
   2776     return MatchOperand_ParseFail;
   2777   }
   2778 
   2779   bool Valid;
   2780   auto Mapper = AArch64DB::DBarrierMapper();
   2781   unsigned Opt =
   2782       Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
   2783   if (!Valid) {
   2784     TokError("invalid barrier option name");
   2785     return MatchOperand_ParseFail;
   2786   }
   2787 
   2788   // The only valid named option for ISB is 'sy'
   2789   if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
   2790     TokError("'sy' or #imm operand expected");
   2791     return MatchOperand_ParseFail;
   2792   }
   2793 
   2794   Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
   2795                                                     getLoc(), getContext()));
   2796   Parser.Lex(); // Consume the option
   2797 
   2798   return MatchOperand_Success;
   2799 }
   2800 
   2801 AArch64AsmParser::OperandMatchResultTy
   2802 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
   2803   MCAsmParser &Parser = getParser();
   2804   const AsmToken &Tok = Parser.getTok();
   2805 
   2806   if (Tok.isNot(AsmToken::Identifier))
   2807     return MatchOperand_NoMatch;
   2808 
   2809   bool IsKnown;
   2810   auto MRSMapper = AArch64SysReg::MRSMapper();
   2811   uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
   2812                                          getSTI().getFeatureBits(), IsKnown);
   2813   assert(IsKnown == (MRSReg != -1U) &&
   2814          "register should be -1 if and only if it's unknown");
   2815 
   2816   auto MSRMapper = AArch64SysReg::MSRMapper();
   2817   uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
   2818                                          getSTI().getFeatureBits(), IsKnown);
   2819   assert(IsKnown == (MSRReg != -1U) &&
   2820          "register should be -1 if and only if it's unknown");
   2821 
   2822   auto PStateMapper = AArch64PState::PStateMapper();
   2823   uint32_t PStateField =
   2824       PStateMapper.fromString(Tok.getString(),
   2825                               getSTI().getFeatureBits(), IsKnown);
   2826   assert(IsKnown == (PStateField != -1U) &&
   2827          "register should be -1 if and only if it's unknown");
   2828 
   2829   Operands.push_back(AArch64Operand::CreateSysReg(
   2830       Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
   2831   Parser.Lex(); // Eat identifier
   2832 
   2833   return MatchOperand_Success;
   2834 }
   2835 
   2836 /// tryParseVectorRegister - Parse a vector register operand.
   2837 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
   2838   MCAsmParser &Parser = getParser();
   2839   if (Parser.getTok().isNot(AsmToken::Identifier))
   2840     return true;
   2841 
   2842   SMLoc S = getLoc();
   2843   // Check for a vector register specifier first.
   2844   StringRef Kind;
   2845   int64_t Reg = tryMatchVectorRegister(Kind, false);
   2846   if (Reg == -1)
   2847     return true;
   2848   Operands.push_back(
   2849       AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
   2850   // If there was an explicit qualifier, that goes on as a literal text
   2851   // operand.
   2852   if (!Kind.empty())
   2853     Operands.push_back(
   2854         AArch64Operand::CreateToken(Kind, false, S, getContext()));
   2855 
   2856   // If there is an index specifier following the register, parse that too.
   2857   if (Parser.getTok().is(AsmToken::LBrac)) {
   2858     SMLoc SIdx = getLoc();
   2859     Parser.Lex(); // Eat left bracket token.
   2860 
   2861     const MCExpr *ImmVal;
   2862     if (getParser().parseExpression(ImmVal))
   2863       return false;
   2864     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2865     if (!MCE) {
   2866       TokError("immediate value expected for vector index");
   2867       return false;
   2868     }
   2869 
   2870     SMLoc E = getLoc();
   2871     if (Parser.getTok().isNot(AsmToken::RBrac)) {
   2872       Error(E, "']' expected");
   2873       return false;
   2874     }
   2875 
   2876     Parser.Lex(); // Eat right bracket token.
   2877 
   2878     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
   2879                                                          E, getContext()));
   2880   }
   2881 
   2882   return false;
   2883 }
   2884 
   2885 /// parseRegister - Parse a non-vector register operand.
   2886 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
   2887   MCAsmParser &Parser = getParser();
   2888   SMLoc S = getLoc();
   2889   // Try for a vector register.
   2890   if (!tryParseVectorRegister(Operands))
   2891     return false;
   2892 
   2893   // Try for a scalar register.
   2894   int64_t Reg = tryParseRegister();
   2895   if (Reg == -1)
   2896     return true;
   2897   Operands.push_back(
   2898       AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
   2899 
   2900   // A small number of instructions (FMOVXDhighr, for example) have "[1]"
   2901   // as a string token in the instruction itself.
   2902   if (getLexer().getKind() == AsmToken::LBrac) {
   2903     SMLoc LBracS = getLoc();
   2904     Parser.Lex();
   2905     const AsmToken &Tok = Parser.getTok();
   2906     if (Tok.is(AsmToken::Integer)) {
   2907       SMLoc IntS = getLoc();
   2908       int64_t Val = Tok.getIntVal();
   2909       if (Val == 1) {
   2910         Parser.Lex();
   2911         if (getLexer().getKind() == AsmToken::RBrac) {
   2912           SMLoc RBracS = getLoc();
   2913           Parser.Lex();
   2914           Operands.push_back(
   2915               AArch64Operand::CreateToken("[", false, LBracS, getContext()));
   2916           Operands.push_back(
   2917               AArch64Operand::CreateToken("1", false, IntS, getContext()));
   2918           Operands.push_back(
   2919               AArch64Operand::CreateToken("]", false, RBracS, getContext()));
   2920           return false;
   2921         }
   2922       }
   2923     }
   2924   }
   2925 
   2926   return false;
   2927 }
   2928 
   2929 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
   2930   MCAsmParser &Parser = getParser();
   2931   bool HasELFModifier = false;
   2932   AArch64MCExpr::VariantKind RefKind;
   2933 
   2934   if (Parser.getTok().is(AsmToken::Colon)) {
   2935     Parser.Lex(); // Eat ':"
   2936     HasELFModifier = true;
   2937 
   2938     if (Parser.getTok().isNot(AsmToken::Identifier)) {
   2939       Error(Parser.getTok().getLoc(),
   2940             "expect relocation specifier in operand after ':'");
   2941       return true;
   2942     }
   2943 
   2944     std::string LowerCase = Parser.getTok().getIdentifier().lower();
   2945     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
   2946                   .Case("lo12", AArch64MCExpr::VK_LO12)
   2947                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
   2948                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
   2949                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
   2950                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
   2951                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
   2952                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
   2953                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
   2954                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
   2955                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
   2956                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
   2957                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
   2958                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
   2959                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
   2960                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
   2961                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
   2962                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
   2963                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
   2964                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
   2965                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
   2966                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
   2967                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
   2968                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
   2969                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
   2970                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
   2971                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
   2972                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
   2973                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
   2974                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
   2975                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
   2976                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
   2977                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
   2978                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
   2979                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
   2980                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
   2981                   .Default(AArch64MCExpr::VK_INVALID);
   2982 
   2983     if (RefKind == AArch64MCExpr::VK_INVALID) {
   2984       Error(Parser.getTok().getLoc(),
   2985             "expect relocation specifier in operand after ':'");
   2986       return true;
   2987     }
   2988 
   2989     Parser.Lex(); // Eat identifier
   2990 
   2991     if (Parser.getTok().isNot(AsmToken::Colon)) {
   2992       Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
   2993       return true;
   2994     }
   2995     Parser.Lex(); // Eat ':'
   2996   }
   2997 
   2998   if (getParser().parseExpression(ImmVal))
   2999     return true;
   3000 
   3001   if (HasELFModifier)
   3002     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
   3003 
   3004   return false;
   3005 }
   3006 
   3007 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
   3008 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
   3009   MCAsmParser &Parser = getParser();
   3010   assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
   3011   SMLoc S = getLoc();
   3012   Parser.Lex(); // Eat left bracket token.
   3013   StringRef Kind;
   3014   int64_t FirstReg = tryMatchVectorRegister(Kind, true);
   3015   if (FirstReg == -1)
   3016     return true;
   3017   int64_t PrevReg = FirstReg;
   3018   unsigned Count = 1;
   3019 
   3020   if (Parser.getTok().is(AsmToken::Minus)) {
   3021     Parser.Lex(); // Eat the minus.
   3022 
   3023     SMLoc Loc = getLoc();
   3024     StringRef NextKind;
   3025     int64_t Reg = tryMatchVectorRegister(NextKind, true);
   3026     if (Reg == -1)
   3027       return true;
   3028     // Any Kind suffices must match on all regs in the list.
   3029     if (Kind != NextKind)
   3030       return Error(Loc, "mismatched register size suffix");
   3031 
   3032     unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
   3033 
   3034     if (Space == 0 || Space > 3) {
   3035       return Error(Loc, "invalid number of vectors");
   3036     }
   3037 
   3038     Count += Space;
   3039   }
   3040   else {
   3041     while (Parser.getTok().is(AsmToken::Comma)) {
   3042       Parser.Lex(); // Eat the comma token.
   3043 
   3044       SMLoc Loc = getLoc();
   3045       StringRef NextKind;
   3046       int64_t Reg = tryMatchVectorRegister(NextKind, true);
   3047       if (Reg == -1)
   3048         return true;
   3049       // Any Kind suffices must match on all regs in the list.
   3050       if (Kind != NextKind)
   3051         return Error(Loc, "mismatched register size suffix");
   3052 
   3053       // Registers must be incremental (with wraparound at 31)
   3054       if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
   3055           (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
   3056        return Error(Loc, "registers must be sequential");
   3057 
   3058       PrevReg = Reg;
   3059       ++Count;
   3060     }
   3061   }
   3062 
   3063   if (Parser.getTok().isNot(AsmToken::RCurly))
   3064     return Error(getLoc(), "'}' expected");
   3065   Parser.Lex(); // Eat the '}' token.
   3066 
   3067   if (Count > 4)
   3068     return Error(S, "invalid number of vectors");
   3069 
   3070   unsigned NumElements = 0;
   3071   char ElementKind = 0;
   3072   if (!Kind.empty())
   3073     parseValidVectorKind(Kind, NumElements, ElementKind);
   3074 
   3075   Operands.push_back(AArch64Operand::CreateVectorList(
   3076       FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
   3077 
   3078   // If there is an index specifier following the list, parse that too.
   3079   if (Parser.getTok().is(AsmToken::LBrac)) {
   3080     SMLoc SIdx = getLoc();
   3081     Parser.Lex(); // Eat left bracket token.
   3082 
   3083     const MCExpr *ImmVal;
   3084     if (getParser().parseExpression(ImmVal))
   3085       return false;
   3086     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   3087     if (!MCE) {
   3088       TokError("immediate value expected for vector index");
   3089       return false;
   3090     }
   3091 
   3092     SMLoc E = getLoc();
   3093     if (Parser.getTok().isNot(AsmToken::RBrac)) {
   3094       Error(E, "']' expected");
   3095       return false;
   3096     }
   3097 
   3098     Parser.Lex(); // Eat right bracket token.
   3099 
   3100     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
   3101                                                          E, getContext()));
   3102   }
   3103   return false;
   3104 }
   3105 
   3106 AArch64AsmParser::OperandMatchResultTy
   3107 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
   3108   MCAsmParser &Parser = getParser();
   3109   const AsmToken &Tok = Parser.getTok();
   3110   if (!Tok.is(AsmToken::Identifier))
   3111     return MatchOperand_NoMatch;
   3112 
   3113   unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
   3114 
   3115   MCContext &Ctx = getContext();
   3116   const MCRegisterInfo *RI = Ctx.getRegisterInfo();
   3117   if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
   3118     return MatchOperand_NoMatch;
   3119 
   3120   SMLoc S = getLoc();
   3121   Parser.Lex(); // Eat register
   3122 
   3123   if (Parser.getTok().isNot(AsmToken::Comma)) {
   3124     Operands.push_back(
   3125         AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
   3126     return MatchOperand_Success;
   3127   }
   3128   Parser.Lex(); // Eat comma.
   3129 
   3130   if (Parser.getTok().is(AsmToken::Hash))
   3131     Parser.Lex(); // Eat hash
   3132 
   3133   if (Parser.getTok().isNot(AsmToken::Integer)) {
   3134     Error(getLoc(), "index must be absent or #0");
   3135     return MatchOperand_ParseFail;
   3136   }
   3137 
   3138   const MCExpr *ImmVal;
   3139   if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
   3140       cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
   3141     Error(getLoc(), "index must be absent or #0");
   3142     return MatchOperand_ParseFail;
   3143   }
   3144 
   3145   Operands.push_back(
   3146       AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
   3147   return MatchOperand_Success;
   3148 }
   3149 
   3150 /// parseOperand - Parse a arm instruction operand.  For now this parses the
   3151 /// operand regardless of the mnemonic.
   3152 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
   3153                                   bool invertCondCode) {
   3154   MCAsmParser &Parser = getParser();
   3155   // Check if the current operand has a custom associated parser, if so, try to
   3156   // custom parse the operand, or fallback to the general approach.
   3157   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
   3158   if (ResTy == MatchOperand_Success)
   3159     return false;
   3160   // If there wasn't a custom match, try the generic matcher below. Otherwise,
   3161   // there was a match, but an error occurred, in which case, just return that
   3162   // the operand parsing failed.
   3163   if (ResTy == MatchOperand_ParseFail)
   3164     return true;
   3165 
   3166   // Nothing custom, so do general case parsing.
   3167   SMLoc S, E;
   3168   switch (getLexer().getKind()) {
   3169   default: {
   3170     SMLoc S = getLoc();
   3171     const MCExpr *Expr;
   3172     if (parseSymbolicImmVal(Expr))
   3173       return Error(S, "invalid operand");
   3174 
   3175     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   3176     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
   3177     return false;
   3178   }
   3179   case AsmToken::LBrac: {
   3180     SMLoc Loc = Parser.getTok().getLoc();
   3181     Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
   3182                                                    getContext()));
   3183     Parser.Lex(); // Eat '['
   3184 
   3185     // There's no comma after a '[', so we can parse the next operand
   3186     // immediately.
   3187     return parseOperand(Operands, false, false);
   3188   }
   3189   case AsmToken::LCurly:
   3190     return parseVectorList(Operands);
   3191   case AsmToken::Identifier: {
   3192     // If we're expecting a Condition Code operand, then just parse that.
   3193     if (isCondCode)
   3194       return parseCondCode(Operands, invertCondCode);
   3195 
   3196     // If it's a register name, parse it.
   3197     if (!parseRegister(Operands))
   3198       return false;
   3199 
   3200     // This could be an optional "shift" or "extend" operand.
   3201     OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
   3202     // We can only continue if no tokens were eaten.
   3203     if (GotShift != MatchOperand_NoMatch)
   3204       return GotShift;
   3205 
   3206     // This was not a register so parse other operands that start with an
   3207     // identifier (like labels) as expressions and create them as immediates.
   3208     const MCExpr *IdVal;
   3209     S = getLoc();
   3210     if (getParser().parseExpression(IdVal))
   3211       return true;
   3212 
   3213     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   3214     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
   3215     return false;
   3216   }
   3217   case AsmToken::Integer:
   3218   case AsmToken::Real:
   3219   case AsmToken::Hash: {
   3220     // #42 -> immediate.
   3221     S = getLoc();
   3222     if (getLexer().is(AsmToken::Hash))
   3223       Parser.Lex();
   3224 
   3225     // Parse a negative sign
   3226     bool isNegative = false;
   3227     if (Parser.getTok().is(AsmToken::Minus)) {
   3228       isNegative = true;
   3229       // We need to consume this token only when we have a Real, otherwise
   3230       // we let parseSymbolicImmVal take care of it
   3231       if (Parser.getLexer().peekTok().is(AsmToken::Real))
   3232         Parser.Lex();
   3233     }
   3234 
   3235     // The only Real that should come through here is a literal #0.0 for
   3236     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
   3237     // so convert the value.
   3238     const AsmToken &Tok = Parser.getTok();
   3239     if (Tok.is(AsmToken::Real)) {
   3240       APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
   3241       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
   3242       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
   3243           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
   3244           Mnemonic != "fcmlt")
   3245         return TokError("unexpected floating point literal");
   3246       else if (IntVal != 0 || isNegative)
   3247         return TokError("expected floating-point constant #0.0");
   3248       Parser.Lex(); // Eat the token.
   3249 
   3250       Operands.push_back(
   3251           AArch64Operand::CreateToken("#0", false, S, getContext()));
   3252       Operands.push_back(
   3253           AArch64Operand::CreateToken(".0", false, S, getContext()));
   3254       return false;
   3255     }
   3256 
   3257     const MCExpr *ImmVal;
   3258     if (parseSymbolicImmVal(ImmVal))
   3259       return true;
   3260 
   3261     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   3262     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
   3263     return false;
   3264   }
   3265   case AsmToken::Equal: {
   3266     SMLoc Loc = Parser.getTok().getLoc();
   3267     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
   3268       return Error(Loc, "unexpected token in operand");
   3269     Parser.Lex(); // Eat '='
   3270     const MCExpr *SubExprVal;
   3271     if (getParser().parseExpression(SubExprVal))
   3272       return true;
   3273 
   3274     if (Operands.size() < 2 ||
   3275         !static_cast<AArch64Operand &>(*Operands[1]).isReg())
   3276       return Error(Loc, "Only valid when first operand is register");
   3277 
   3278     bool IsXReg =
   3279         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3280             Operands[1]->getReg());
   3281 
   3282     MCContext& Ctx = getContext();
   3283     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
   3284     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
   3285     if (isa<MCConstantExpr>(SubExprVal)) {
   3286       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
   3287       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
   3288       while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
   3289         ShiftAmt += 16;
   3290         Imm >>= 16;
   3291       }
   3292       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
   3293           Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
   3294           Operands.push_back(AArch64Operand::CreateImm(
   3295                      MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
   3296         if (ShiftAmt)
   3297           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
   3298                      ShiftAmt, true, S, E, Ctx));
   3299         return false;
   3300       }
   3301       APInt Simm = APInt(64, Imm << ShiftAmt);
   3302       // check if the immediate is an unsigned or signed 32-bit int for W regs
   3303       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
   3304         return Error(Loc, "Immediate too large for register");
   3305     }
   3306     // If it is a label or an imm that cannot fit in a movz, put it into CP.
   3307     const MCExpr *CPLoc =
   3308         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
   3309     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
   3310     return false;
   3311   }
   3312   }
   3313 }
   3314 
   3315 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
   3316 /// operands.
   3317 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
   3318                                         StringRef Name, SMLoc NameLoc,
   3319                                         OperandVector &Operands) {
   3320   MCAsmParser &Parser = getParser();
   3321   Name = StringSwitch<StringRef>(Name.lower())
   3322              .Case("beq", "b.eq")
   3323              .Case("bne", "b.ne")
   3324              .Case("bhs", "b.hs")
   3325              .Case("bcs", "b.cs")
   3326              .Case("blo", "b.lo")
   3327              .Case("bcc", "b.cc")
   3328              .Case("bmi", "b.mi")
   3329              .Case("bpl", "b.pl")
   3330              .Case("bvs", "b.vs")
   3331              .Case("bvc", "b.vc")
   3332              .Case("bhi", "b.hi")
   3333              .Case("bls", "b.ls")
   3334              .Case("bge", "b.ge")
   3335              .Case("blt", "b.lt")
   3336              .Case("bgt", "b.gt")
   3337              .Case("ble", "b.le")
   3338              .Case("bal", "b.al")
   3339              .Case("bnv", "b.nv")
   3340              .Default(Name);
   3341 
   3342   // First check for the AArch64-specific .req directive.
   3343   if (Parser.getTok().is(AsmToken::Identifier) &&
   3344       Parser.getTok().getIdentifier() == ".req") {
   3345     parseDirectiveReq(Name, NameLoc);
   3346     // We always return 'error' for this, as we're done with this
   3347     // statement and don't need to match the 'instruction."
   3348     return true;
   3349   }
   3350 
   3351   // Create the leading tokens for the mnemonic, split by '.' characters.
   3352   size_t Start = 0, Next = Name.find('.');
   3353   StringRef Head = Name.slice(Start, Next);
   3354 
   3355   // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
   3356   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
   3357     bool IsError = parseSysAlias(Head, NameLoc, Operands);
   3358     if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
   3359       Parser.eatToEndOfStatement();
   3360     return IsError;
   3361   }
   3362 
   3363   Operands.push_back(
   3364       AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
   3365   Mnemonic = Head;
   3366 
   3367   // Handle condition codes for a branch mnemonic
   3368   if (Head == "b" && Next != StringRef::npos) {
   3369     Start = Next;
   3370     Next = Name.find('.', Start + 1);
   3371     Head = Name.slice(Start + 1, Next);
   3372 
   3373     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
   3374                                             (Head.data() - Name.data()));
   3375     AArch64CC::CondCode CC = parseCondCodeString(Head);
   3376     if (CC == AArch64CC::Invalid)
   3377       return Error(SuffixLoc, "invalid condition code");
   3378     Operands.push_back(
   3379         AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
   3380     Operands.push_back(
   3381         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
   3382   }
   3383 
   3384   // Add the remaining tokens in the mnemonic.
   3385   while (Next != StringRef::npos) {
   3386     Start = Next;
   3387     Next = Name.find('.', Start + 1);
   3388     Head = Name.slice(Start, Next);
   3389     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
   3390                                             (Head.data() - Name.data()) + 1);
   3391     Operands.push_back(
   3392         AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
   3393   }
   3394 
   3395   // Conditional compare instructions have a Condition Code operand, which needs
   3396   // to be parsed and an immediate operand created.
   3397   bool condCodeFourthOperand =
   3398       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
   3399        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
   3400        Head == "csinc" || Head == "csinv" || Head == "csneg");
   3401 
   3402   // These instructions are aliases to some of the conditional select
   3403   // instructions. However, the condition code is inverted in the aliased
   3404   // instruction.
   3405   //
   3406   // FIXME: Is this the correct way to handle these? Or should the parser
   3407   //        generate the aliased instructions directly?
   3408   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
   3409   bool condCodeThirdOperand =
   3410       (Head == "cinc" || Head == "cinv" || Head == "cneg");
   3411 
   3412   // Read the remaining operands.
   3413   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   3414     // Read the first operand.
   3415     if (parseOperand(Operands, false, false)) {
   3416       Parser.eatToEndOfStatement();
   3417       return true;
   3418     }
   3419 
   3420     unsigned N = 2;
   3421     while (getLexer().is(AsmToken::Comma)) {
   3422       Parser.Lex(); // Eat the comma.
   3423 
   3424       // Parse and remember the operand.
   3425       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
   3426                                      (N == 3 && condCodeThirdOperand) ||
   3427                                      (N == 2 && condCodeSecondOperand),
   3428                        condCodeSecondOperand || condCodeThirdOperand)) {
   3429         Parser.eatToEndOfStatement();
   3430         return true;
   3431       }
   3432 
   3433       // After successfully parsing some operands there are two special cases to
   3434       // consider (i.e. notional operands not separated by commas). Both are due
   3435       // to memory specifiers:
   3436       //  + An RBrac will end an address for load/store/prefetch
   3437       //  + An '!' will indicate a pre-indexed operation.
   3438       //
   3439       // It's someone else's responsibility to make sure these tokens are sane
   3440       // in the given context!
   3441       if (Parser.getTok().is(AsmToken::RBrac)) {
   3442         SMLoc Loc = Parser.getTok().getLoc();
   3443         Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
   3444                                                        getContext()));
   3445         Parser.Lex();
   3446       }
   3447 
   3448       if (Parser.getTok().is(AsmToken::Exclaim)) {
   3449         SMLoc Loc = Parser.getTok().getLoc();
   3450         Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
   3451                                                        getContext()));
   3452         Parser.Lex();
   3453       }
   3454 
   3455       ++N;
   3456     }
   3457   }
   3458 
   3459   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   3460     SMLoc Loc = Parser.getTok().getLoc();
   3461     Parser.eatToEndOfStatement();
   3462     return Error(Loc, "unexpected token in argument list");
   3463   }
   3464 
   3465   Parser.Lex(); // Consume the EndOfStatement
   3466   return false;
   3467 }
   3468 
   3469 // FIXME: This entire function is a giant hack to provide us with decent
   3470 // operand range validation/diagnostics until TableGen/MC can be extended
   3471 // to support autogeneration of this kind of validation.
   3472 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
   3473                                          SmallVectorImpl<SMLoc> &Loc) {
   3474   const MCRegisterInfo *RI = getContext().getRegisterInfo();
   3475   // Check for indexed addressing modes w/ the base register being the
   3476   // same as a destination/source register or pair load where
   3477   // the Rt == Rt2. All of those are undefined behaviour.
   3478   switch (Inst.getOpcode()) {
   3479   case AArch64::LDPSWpre:
   3480   case AArch64::LDPWpost:
   3481   case AArch64::LDPWpre:
   3482   case AArch64::LDPXpost:
   3483   case AArch64::LDPXpre: {
   3484     unsigned Rt = Inst.getOperand(1).getReg();
   3485     unsigned Rt2 = Inst.getOperand(2).getReg();
   3486     unsigned Rn = Inst.getOperand(3).getReg();
   3487     if (RI->isSubRegisterEq(Rn, Rt))
   3488       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
   3489                            "is also a destination");
   3490     if (RI->isSubRegisterEq(Rn, Rt2))
   3491       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
   3492                            "is also a destination");
   3493     // FALLTHROUGH
   3494   }
   3495   case AArch64::LDPDi:
   3496   case AArch64::LDPQi:
   3497   case AArch64::LDPSi:
   3498   case AArch64::LDPSWi:
   3499   case AArch64::LDPWi:
   3500   case AArch64::LDPXi: {
   3501     unsigned Rt = Inst.getOperand(0).getReg();
   3502     unsigned Rt2 = Inst.getOperand(1).getReg();
   3503     if (Rt == Rt2)
   3504       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
   3505     break;
   3506   }
   3507   case AArch64::LDPDpost:
   3508   case AArch64::LDPDpre:
   3509   case AArch64::LDPQpost:
   3510   case AArch64::LDPQpre:
   3511   case AArch64::LDPSpost:
   3512   case AArch64::LDPSpre:
   3513   case AArch64::LDPSWpost: {
   3514     unsigned Rt = Inst.getOperand(1).getReg();
   3515     unsigned Rt2 = Inst.getOperand(2).getReg();
   3516     if (Rt == Rt2)
   3517       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
   3518     break;
   3519   }
   3520   case AArch64::STPDpost:
   3521   case AArch64::STPDpre:
   3522   case AArch64::STPQpost:
   3523   case AArch64::STPQpre:
   3524   case AArch64::STPSpost:
   3525   case AArch64::STPSpre:
   3526   case AArch64::STPWpost:
   3527   case AArch64::STPWpre:
   3528   case AArch64::STPXpost:
   3529   case AArch64::STPXpre: {
   3530     unsigned Rt = Inst.getOperand(1).getReg();
   3531     unsigned Rt2 = Inst.getOperand(2).getReg();
   3532     unsigned Rn = Inst.getOperand(3).getReg();
   3533     if (RI->isSubRegisterEq(Rn, Rt))
   3534       return Error(Loc[0], "unpredictable STP instruction, writeback base "
   3535                            "is also a source");
   3536     if (RI->isSubRegisterEq(Rn, Rt2))
   3537       return Error(Loc[1], "unpredictable STP instruction, writeback base "
   3538                            "is also a source");
   3539     break;
   3540   }
   3541   case AArch64::LDRBBpre:
   3542   case AArch64::LDRBpre:
   3543   case AArch64::LDRHHpre:
   3544   case AArch64::LDRHpre:
   3545   case AArch64::LDRSBWpre:
   3546   case AArch64::LDRSBXpre:
   3547   case AArch64::LDRSHWpre:
   3548   case AArch64::LDRSHXpre:
   3549   case AArch64::LDRSWpre:
   3550   case AArch64::LDRWpre:
   3551   case AArch64::LDRXpre:
   3552   case AArch64::LDRBBpost:
   3553   case AArch64::LDRBpost:
   3554   case AArch64::LDRHHpost:
   3555   case AArch64::LDRHpost:
   3556   case AArch64::LDRSBWpost:
   3557   case AArch64::LDRSBXpost:
   3558   case AArch64::LDRSHWpost:
   3559   case AArch64::LDRSHXpost:
   3560   case AArch64::LDRSWpost:
   3561   case AArch64::LDRWpost:
   3562   case AArch64::LDRXpost: {
   3563     unsigned Rt = Inst.getOperand(1).getReg();
   3564     unsigned Rn = Inst.getOperand(2).getReg();
   3565     if (RI->isSubRegisterEq(Rn, Rt))
   3566       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
   3567                            "is also a source");
   3568     break;
   3569   }
   3570   case AArch64::STRBBpost:
   3571   case AArch64::STRBpost:
   3572   case AArch64::STRHHpost:
   3573   case AArch64::STRHpost:
   3574   case AArch64::STRWpost:
   3575   case AArch64::STRXpost:
   3576   case AArch64::STRBBpre:
   3577   case AArch64::STRBpre:
   3578   case AArch64::STRHHpre:
   3579   case AArch64::STRHpre:
   3580   case AArch64::STRWpre:
   3581   case AArch64::STRXpre: {
   3582     unsigned Rt = Inst.getOperand(1).getReg();
   3583     unsigned Rn = Inst.getOperand(2).getReg();
   3584     if (RI->isSubRegisterEq(Rn, Rt))
   3585       return Error(Loc[0], "unpredictable STR instruction, writeback base "
   3586                            "is also a source");
   3587     break;
   3588   }
   3589   }
   3590 
   3591   // Now check immediate ranges. Separate from the above as there is overlap
   3592   // in the instructions being checked and this keeps the nested conditionals
   3593   // to a minimum.
   3594   switch (Inst.getOpcode()) {
   3595   case AArch64::ADDSWri:
   3596   case AArch64::ADDSXri:
   3597   case AArch64::ADDWri:
   3598   case AArch64::ADDXri:
   3599   case AArch64::SUBSWri:
   3600   case AArch64::SUBSXri:
   3601   case AArch64::SUBWri:
   3602   case AArch64::SUBXri: {
   3603     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
   3604     // some slight duplication here.
   3605     if (Inst.getOperand(2).isExpr()) {
   3606       const MCExpr *Expr = Inst.getOperand(2).getExpr();
   3607       AArch64MCExpr::VariantKind ELFRefKind;
   3608       MCSymbolRefExpr::VariantKind DarwinRefKind;
   3609       int64_t Addend;
   3610       if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
   3611         return Error(Loc[2], "invalid immediate expression");
   3612       }
   3613 
   3614       // Only allow these with ADDXri.
   3615       if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
   3616           DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
   3617           Inst.getOpcode() == AArch64::ADDXri)
   3618         return false;
   3619 
   3620       // Only allow these with ADDXri/ADDWri
   3621       if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
   3622           ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
   3623           ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
   3624           ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
   3625           ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
   3626           ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
   3627           ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
   3628           ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
   3629           (Inst.getOpcode() == AArch64::ADDXri ||
   3630           Inst.getOpcode() == AArch64::ADDWri))
   3631         return false;
   3632 
   3633       // Don't allow expressions in the immediate field otherwise
   3634       return Error(Loc[2], "invalid immediate expression");
   3635     }
   3636     return false;
   3637   }
   3638   default:
   3639     return false;
   3640   }
   3641 }
   3642 
   3643 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
   3644   switch (ErrCode) {
   3645   case Match_MissingFeature:
   3646     return Error(Loc,
   3647                  "instruction requires a CPU feature not currently enabled");
   3648   case Match_InvalidOperand:
   3649     return Error(Loc, "invalid operand for instruction");
   3650   case Match_InvalidSuffix:
   3651     return Error(Loc, "invalid type suffix for instruction");
   3652   case Match_InvalidCondCode:
   3653     return Error(Loc, "expected AArch64 condition code");
   3654   case Match_AddSubRegExtendSmall:
   3655     return Error(Loc,
   3656       "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
   3657   case Match_AddSubRegExtendLarge:
   3658     return Error(Loc,
   3659       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
   3660   case Match_AddSubSecondSource:
   3661     return Error(Loc,
   3662       "expected compatible register, symbol or integer in range [0, 4095]");
   3663   case Match_LogicalSecondSource:
   3664     return Error(Loc, "expected compatible register or logical immediate");
   3665   case Match_InvalidMovImm32Shift:
   3666     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
   3667   case Match_InvalidMovImm64Shift:
   3668     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
   3669   case Match_AddSubRegShift32:
   3670     return Error(Loc,
   3671        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
   3672   case Match_AddSubRegShift64:
   3673     return Error(Loc,
   3674        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
   3675   case Match_InvalidFPImm:
   3676     return Error(Loc,
   3677                  "expected compatible register or floating-point constant");
   3678   case Match_InvalidMemoryIndexedSImm9:
   3679     return Error(Loc, "index must be an integer in range [-256, 255].");
   3680   case Match_InvalidMemoryIndexed4SImm7:
   3681     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
   3682   case Match_InvalidMemoryIndexed8SImm7:
   3683     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
   3684   case Match_InvalidMemoryIndexed16SImm7:
   3685     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
   3686   case Match_InvalidMemoryWExtend8:
   3687     return Error(Loc,
   3688                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
   3689   case Match_InvalidMemoryWExtend16:
   3690     return Error(Loc,
   3691                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
   3692   case Match_InvalidMemoryWExtend32:
   3693     return Error(Loc,
   3694                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
   3695   case Match_InvalidMemoryWExtend64:
   3696     return Error(Loc,
   3697                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
   3698   case Match_InvalidMemoryWExtend128:
   3699     return Error(Loc,
   3700                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
   3701   case Match_InvalidMemoryXExtend8:
   3702     return Error(Loc,
   3703                  "expected 'lsl' or 'sxtx' with optional shift of #0");
   3704   case Match_InvalidMemoryXExtend16:
   3705     return Error(Loc,
   3706                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
   3707   case Match_InvalidMemoryXExtend32:
   3708     return Error(Loc,
   3709                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
   3710   case Match_InvalidMemoryXExtend64:
   3711     return Error(Loc,
   3712                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
   3713   case Match_InvalidMemoryXExtend128:
   3714     return Error(Loc,
   3715                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
   3716   case Match_InvalidMemoryIndexed1:
   3717     return Error(Loc, "index must be an integer in range [0, 4095].");
   3718   case Match_InvalidMemoryIndexed2:
   3719     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
   3720   case Match_InvalidMemoryIndexed4:
   3721     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
   3722   case Match_InvalidMemoryIndexed8:
   3723     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
   3724   case Match_InvalidMemoryIndexed16:
   3725     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
   3726   case Match_InvalidImm0_1:
   3727     return Error(Loc, "immediate must be an integer in range [0, 1].");
   3728   case Match_InvalidImm0_7:
   3729     return Error(Loc, "immediate must be an integer in range [0, 7].");
   3730   case Match_InvalidImm0_15:
   3731     return Error(Loc, "immediate must be an integer in range [0, 15].");
   3732   case Match_InvalidImm0_31:
   3733     return Error(Loc, "immediate must be an integer in range [0, 31].");
   3734   case Match_InvalidImm0_63:
   3735     return Error(Loc, "immediate must be an integer in range [0, 63].");
   3736   case Match_InvalidImm0_127:
   3737     return Error(Loc, "immediate must be an integer in range [0, 127].");
   3738   case Match_InvalidImm0_65535:
   3739     return Error(Loc, "immediate must be an integer in range [0, 65535].");
   3740   case Match_InvalidImm1_8:
   3741     return Error(Loc, "immediate must be an integer in range [1, 8].");
   3742   case Match_InvalidImm1_16:
   3743     return Error(Loc, "immediate must be an integer in range [1, 16].");
   3744   case Match_InvalidImm1_32:
   3745     return Error(Loc, "immediate must be an integer in range [1, 32].");
   3746   case Match_InvalidImm1_64:
   3747     return Error(Loc, "immediate must be an integer in range [1, 64].");
   3748   case Match_InvalidIndex1:
   3749     return Error(Loc, "expected lane specifier '[1]'");
   3750   case Match_InvalidIndexB:
   3751     return Error(Loc, "vector lane must be an integer in range [0, 15].");
   3752   case Match_InvalidIndexH:
   3753     return Error(Loc, "vector lane must be an integer in range [0, 7].");
   3754   case Match_InvalidIndexS:
   3755     return Error(Loc, "vector lane must be an integer in range [0, 3].");
   3756   case Match_InvalidIndexD:
   3757     return Error(Loc, "vector lane must be an integer in range [0, 1].");
   3758   case Match_InvalidLabel:
   3759     return Error(Loc, "expected label or encodable integer pc offset");
   3760   case Match_MRS:
   3761     return Error(Loc, "expected readable system register");
   3762   case Match_MSR:
   3763     return Error(Loc, "expected writable system register or pstate");
   3764   case Match_MnemonicFail:
   3765     return Error(Loc, "unrecognized instruction mnemonic");
   3766   default:
   3767     llvm_unreachable("unexpected error code!");
   3768   }
   3769 }
   3770 
   3771 static const char *getSubtargetFeatureName(uint64_t Val);
   3772 
   3773 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
   3774                                                OperandVector &Operands,
   3775                                                MCStreamer &Out,
   3776                                                uint64_t &ErrorInfo,
   3777                                                bool MatchingInlineAsm) {
   3778   assert(!Operands.empty() && "Unexpect empty operand list!");
   3779   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
   3780   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
   3781 
   3782   StringRef Tok = Op.getToken();
   3783   unsigned NumOperands = Operands.size();
   3784 
   3785   if (NumOperands == 4 && Tok == "lsl") {
   3786     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
   3787     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
   3788     if (Op2.isReg() && Op3.isImm()) {
   3789       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
   3790       if (Op3CE) {
   3791         uint64_t Op3Val = Op3CE->getValue();
   3792         uint64_t NewOp3Val = 0;
   3793         uint64_t NewOp4Val = 0;
   3794         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
   3795                 Op2.getReg())) {
   3796           NewOp3Val = (32 - Op3Val) & 0x1f;
   3797           NewOp4Val = 31 - Op3Val;
   3798         } else {
   3799           NewOp3Val = (64 - Op3Val) & 0x3f;
   3800           NewOp4Val = 63 - Op3Val;
   3801         }
   3802 
   3803         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
   3804         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
   3805 
   3806         Operands[0] = AArch64Operand::CreateToken(
   3807             "ubfm", false, Op.getStartLoc(), getContext());
   3808         Operands.push_back(AArch64Operand::CreateImm(
   3809             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
   3810         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
   3811                                                 Op3.getEndLoc(), getContext());
   3812       }
   3813     }
   3814   } else if (NumOperands == 4 && Tok == "bfc") {
   3815     // FIXME: Horrible hack to handle BFC->BFM alias.
   3816     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
   3817     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
   3818     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
   3819 
   3820     if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
   3821       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
   3822       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
   3823 
   3824       if (LSBCE && WidthCE) {
   3825         uint64_t LSB = LSBCE->getValue();
   3826         uint64_t Width = WidthCE->getValue();
   3827 
   3828         uint64_t RegWidth = 0;
   3829         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3830                 Op1.getReg()))
   3831           RegWidth = 64;
   3832         else
   3833           RegWidth = 32;
   3834 
   3835         if (LSB >= RegWidth)
   3836           return Error(LSBOp.getStartLoc(),
   3837                        "expected integer in range [0, 31]");
   3838         if (Width < 1 || Width > RegWidth)
   3839           return Error(WidthOp.getStartLoc(),
   3840                        "expected integer in range [1, 32]");
   3841 
   3842         uint64_t ImmR = 0;
   3843         if (RegWidth == 32)
   3844           ImmR = (32 - LSB) & 0x1f;
   3845         else
   3846           ImmR = (64 - LSB) & 0x3f;
   3847 
   3848         uint64_t ImmS = Width - 1;
   3849 
   3850         if (ImmR != 0 && ImmS >= ImmR)
   3851           return Error(WidthOp.getStartLoc(),
   3852                        "requested insert overflows register");
   3853 
   3854         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
   3855         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
   3856         Operands[0] = AArch64Operand::CreateToken(
   3857               "bfm", false, Op.getStartLoc(), getContext());
   3858         Operands[2] = AArch64Operand::CreateReg(
   3859             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
   3860             SMLoc(), getContext());
   3861         Operands[3] = AArch64Operand::CreateImm(
   3862             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
   3863         Operands.emplace_back(
   3864             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
   3865                                       WidthOp.getEndLoc(), getContext()));
   3866       }
   3867     }
   3868   } else if (NumOperands == 5) {
   3869     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
   3870     // UBFIZ -> UBFM aliases.
   3871     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
   3872       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
   3873       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
   3874       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
   3875 
   3876       if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
   3877         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
   3878         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
   3879 
   3880         if (Op3CE && Op4CE) {
   3881           uint64_t Op3Val = Op3CE->getValue();
   3882           uint64_t Op4Val = Op4CE->getValue();
   3883 
   3884           uint64_t RegWidth = 0;
   3885           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3886                   Op1.getReg()))
   3887             RegWidth = 64;
   3888           else
   3889             RegWidth = 32;
   3890 
   3891           if (Op3Val >= RegWidth)
   3892             return Error(Op3.getStartLoc(),
   3893                          "expected integer in range [0, 31]");
   3894           if (Op4Val < 1 || Op4Val > RegWidth)
   3895             return Error(Op4.getStartLoc(),
   3896                          "expected integer in range [1, 32]");
   3897 
   3898           uint64_t NewOp3Val = 0;
   3899           if (RegWidth == 32)
   3900             NewOp3Val = (32 - Op3Val) & 0x1f;
   3901           else
   3902             NewOp3Val = (64 - Op3Val) & 0x3f;
   3903 
   3904           uint64_t NewOp4Val = Op4Val - 1;
   3905 
   3906           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
   3907             return Error(Op4.getStartLoc(),
   3908                          "requested insert overflows register");
   3909 
   3910           const MCExpr *NewOp3 =
   3911               MCConstantExpr::create(NewOp3Val, getContext());
   3912           const MCExpr *NewOp4 =
   3913               MCConstantExpr::create(NewOp4Val, getContext());
   3914           Operands[3] = AArch64Operand::CreateImm(
   3915               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
   3916           Operands[4] = AArch64Operand::CreateImm(
   3917               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
   3918           if (Tok == "bfi")
   3919             Operands[0] = AArch64Operand::CreateToken(
   3920                 "bfm", false, Op.getStartLoc(), getContext());
   3921           else if (Tok == "sbfiz")
   3922             Operands[0] = AArch64Operand::CreateToken(
   3923                 "sbfm", false, Op.getStartLoc(), getContext());
   3924           else if (Tok == "ubfiz")
   3925             Operands[0] = AArch64Operand::CreateToken(
   3926                 "ubfm", false, Op.getStartLoc(), getContext());
   3927           else
   3928             llvm_unreachable("No valid mnemonic for alias?");
   3929         }
   3930       }
   3931 
   3932       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
   3933       // UBFX -> UBFM aliases.
   3934     } else if (NumOperands == 5 &&
   3935                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
   3936       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
   3937       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
   3938       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
   3939 
   3940       if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
   3941         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
   3942         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
   3943 
   3944         if (Op3CE && Op4CE) {
   3945           uint64_t Op3Val = Op3CE->getValue();
   3946           uint64_t Op4Val = Op4CE->getValue();
   3947 
   3948           uint64_t RegWidth = 0;
   3949           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3950                   Op1.getReg()))
   3951             RegWidth = 64;
   3952           else
   3953             RegWidth = 32;
   3954 
   3955           if (Op3Val >= RegWidth)
   3956             return Error(Op3.getStartLoc(),
   3957                          "expected integer in range [0, 31]");
   3958           if (Op4Val < 1 || Op4Val > RegWidth)
   3959             return Error(Op4.getStartLoc(),
   3960                          "expected integer in range [1, 32]");
   3961 
   3962           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
   3963 
   3964           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
   3965             return Error(Op4.getStartLoc(),
   3966                          "requested extract overflows register");
   3967 
   3968           const MCExpr *NewOp4 =
   3969               MCConstantExpr::create(NewOp4Val, getContext());
   3970           Operands[4] = AArch64Operand::CreateImm(
   3971               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
   3972           if (Tok == "bfxil")
   3973             Operands[0] = AArch64Operand::CreateToken(
   3974                 "bfm", false, Op.getStartLoc(), getContext());
   3975           else if (Tok == "sbfx")
   3976             Operands[0] = AArch64Operand::CreateToken(
   3977                 "sbfm", false, Op.getStartLoc(), getContext());
   3978           else if (Tok == "ubfx")
   3979             Operands[0] = AArch64Operand::CreateToken(
   3980                 "ubfm", false, Op.getStartLoc(), getContext());
   3981           else
   3982             llvm_unreachable("No valid mnemonic for alias?");
   3983         }
   3984       }
   3985     }
   3986   }
   3987   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
   3988   //        InstAlias can't quite handle this since the reg classes aren't
   3989   //        subclasses.
   3990   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
   3991     // The source register can be Wn here, but the matcher expects a
   3992     // GPR64. Twiddle it here if necessary.
   3993     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
   3994     if (Op.isReg()) {
   3995       unsigned Reg = getXRegFromWReg(Op.getReg());
   3996       Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
   3997                                               Op.getEndLoc(), getContext());
   3998     }
   3999   }
   4000   // FIXME: Likewise for sxt[bh] with a Xd dst operand
   4001   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
   4002     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
   4003     if (Op.isReg() &&
   4004         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   4005             Op.getReg())) {
   4006       // The source register can be Wn here, but the matcher expects a
   4007       // GPR64. Twiddle it here if necessary.
   4008       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
   4009       if (Op.isReg()) {
   4010         unsigned Reg = getXRegFromWReg(Op.getReg());
   4011         Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
   4012                                                 Op.getEndLoc(), getContext());
   4013       }
   4014     }
   4015   }
   4016   // FIXME: Likewise for uxt[bh] with a Xd dst operand
   4017   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
   4018     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
   4019     if (Op.isReg() &&
   4020         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   4021             Op.getReg())) {
   4022       // The source register can be Wn here, but the matcher expects a
   4023       // GPR32. Twiddle it here if necessary.
   4024       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
   4025       if (Op.isReg()) {
   4026         unsigned Reg = getWRegFromXReg(Op.getReg());
   4027         Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
   4028                                                 Op.getEndLoc(), getContext());
   4029       }
   4030     }
   4031   }
   4032 
   4033   // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
   4034   if (NumOperands == 3 && Tok == "fmov") {
   4035     AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
   4036     AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
   4037     if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
   4038       unsigned zreg =
   4039           !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
   4040               RegOp.getReg())
   4041               ? AArch64::WZR
   4042               : AArch64::XZR;
   4043       Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
   4044                                               Op.getEndLoc(), getContext());
   4045     }
   4046   }
   4047 
   4048   MCInst Inst;
   4049   // First try to match against the secondary set of tables containing the
   4050   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
   4051   unsigned MatchResult =
   4052       MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
   4053 
   4054   // If that fails, try against the alternate table containing long-form NEON:
   4055   // "fadd v0.2s, v1.2s, v2.2s"
   4056   if (MatchResult != Match_Success) {
   4057     // But first, save the short-form match result: we can use it in case the
   4058     // long-form match also fails.
   4059     auto ShortFormNEONErrorInfo = ErrorInfo;
   4060     auto ShortFormNEONMatchResult = MatchResult;
   4061 
   4062     MatchResult =
   4063         MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
   4064 
   4065     // Now, both matches failed, and the long-form match failed on the mnemonic
   4066     // suffix token operand.  The short-form match failure is probably more
   4067     // relevant: use it instead.
   4068     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
   4069         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
   4070         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
   4071       MatchResult = ShortFormNEONMatchResult;
   4072       ErrorInfo = ShortFormNEONErrorInfo;
   4073     }
   4074   }
   4075 
   4076 
   4077   switch (MatchResult) {
   4078   case Match_Success: {
   4079     // Perform range checking and other semantic validations
   4080     SmallVector<SMLoc, 8> OperandLocs;
   4081     NumOperands = Operands.size();
   4082     for (unsigned i = 1; i < NumOperands; ++i)
   4083       OperandLocs.push_back(Operands[i]->getStartLoc());
   4084     if (validateInstruction(Inst, OperandLocs))
   4085       return true;
   4086 
   4087     Inst.setLoc(IDLoc);
   4088     Out.EmitInstruction(Inst, getSTI());
   4089     return false;
   4090   }
   4091   case Match_MissingFeature: {
   4092     assert(ErrorInfo && "Unknown missing feature!");
   4093     // Special case the error message for the very common case where only
   4094     // a single subtarget feature is missing (neon, e.g.).
   4095     std::string Msg = "instruction requires:";
   4096     uint64_t Mask = 1;
   4097     for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
   4098       if (ErrorInfo & Mask) {
   4099         Msg += " ";
   4100         Msg += getSubtargetFeatureName(ErrorInfo & Mask);
   4101       }
   4102       Mask <<= 1;
   4103     }
   4104     return Error(IDLoc, Msg);
   4105   }
   4106   case Match_MnemonicFail:
   4107     return showMatchError(IDLoc, MatchResult);
   4108   case Match_InvalidOperand: {
   4109     SMLoc ErrorLoc = IDLoc;
   4110 
   4111     if (ErrorInfo != ~0ULL) {
   4112       if (ErrorInfo >= Operands.size())
   4113         return Error(IDLoc, "too few operands for instruction");
   4114 
   4115       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
   4116       if (ErrorLoc == SMLoc())
   4117         ErrorLoc = IDLoc;
   4118     }
   4119     // If the match failed on a suffix token operand, tweak the diagnostic
   4120     // accordingly.
   4121     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
   4122         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
   4123       MatchResult = Match_InvalidSuffix;
   4124 
   4125     return showMatchError(ErrorLoc, MatchResult);
   4126   }
   4127   case Match_InvalidMemoryIndexed1:
   4128   case Match_InvalidMemoryIndexed2:
   4129   case Match_InvalidMemoryIndexed4:
   4130   case Match_InvalidMemoryIndexed8:
   4131   case Match_InvalidMemoryIndexed16:
   4132   case Match_InvalidCondCode:
   4133   case Match_AddSubRegExtendSmall:
   4134   case Match_AddSubRegExtendLarge:
   4135   case Match_AddSubSecondSource:
   4136   case Match_LogicalSecondSource:
   4137   case Match_AddSubRegShift32:
   4138   case Match_AddSubRegShift64:
   4139   case Match_InvalidMovImm32Shift:
   4140   case Match_InvalidMovImm64Shift:
   4141   case Match_InvalidFPImm:
   4142   case Match_InvalidMemoryWExtend8:
   4143   case Match_InvalidMemoryWExtend16:
   4144   case Match_InvalidMemoryWExtend32:
   4145   case Match_InvalidMemoryWExtend64:
   4146   case Match_InvalidMemoryWExtend128:
   4147   case Match_InvalidMemoryXExtend8:
   4148   case Match_InvalidMemoryXExtend16:
   4149   case Match_InvalidMemoryXExtend32:
   4150   case Match_InvalidMemoryXExtend64:
   4151   case Match_InvalidMemoryXExtend128:
   4152   case Match_InvalidMemoryIndexed4SImm7:
   4153   case Match_InvalidMemoryIndexed8SImm7:
   4154   case Match_InvalidMemoryIndexed16SImm7:
   4155   case Match_InvalidMemoryIndexedSImm9:
   4156   case Match_InvalidImm0_1:
   4157   case Match_InvalidImm0_7:
   4158   case Match_InvalidImm0_15:
   4159   case Match_InvalidImm0_31:
   4160   case Match_InvalidImm0_63:
   4161   case Match_InvalidImm0_127:
   4162   case Match_InvalidImm0_65535:
   4163   case Match_InvalidImm1_8:
   4164   case Match_InvalidImm1_16:
   4165   case Match_InvalidImm1_32:
   4166   case Match_InvalidImm1_64:
   4167   case Match_InvalidIndex1:
   4168   case Match_InvalidIndexB:
   4169   case Match_InvalidIndexH:
   4170   case Match_InvalidIndexS:
   4171   case Match_InvalidIndexD:
   4172   case Match_InvalidLabel:
   4173   case Match_MSR:
   4174   case Match_MRS: {
   4175     if (ErrorInfo >= Operands.size())
   4176       return Error(IDLoc, "too few operands for instruction");
   4177     // Any time we get here, there's nothing fancy to do. Just get the
   4178     // operand SMLoc and display the diagnostic.
   4179     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
   4180     if (ErrorLoc == SMLoc())
   4181       ErrorLoc = IDLoc;
   4182     return showMatchError(ErrorLoc, MatchResult);
   4183   }
   4184   }
   4185 
   4186   llvm_unreachable("Implement any new match types added!");
   4187 }
   4188 
   4189 /// ParseDirective parses the arm specific directives
   4190 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
   4191   const MCObjectFileInfo::Environment Format =
   4192     getContext().getObjectFileInfo()->getObjectFileType();
   4193   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
   4194   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
   4195 
   4196   StringRef IDVal = DirectiveID.getIdentifier();
   4197   SMLoc Loc = DirectiveID.getLoc();
   4198   if (IDVal == ".hword")
   4199     return parseDirectiveWord(2, Loc);
   4200   if (IDVal == ".word")
   4201     return parseDirectiveWord(4, Loc);
   4202   if (IDVal == ".xword")
   4203     return parseDirectiveWord(8, Loc);
   4204   if (IDVal == ".tlsdesccall")
   4205     return parseDirectiveTLSDescCall(Loc);
   4206   if (IDVal == ".ltorg" || IDVal == ".pool")
   4207     return parseDirectiveLtorg(Loc);
   4208   if (IDVal == ".unreq")
   4209     return parseDirectiveUnreq(Loc);
   4210 
   4211   if (!IsMachO && !IsCOFF) {
   4212     if (IDVal == ".inst")
   4213       return parseDirectiveInst(Loc);
   4214   }
   4215 
   4216   return parseDirectiveLOH(IDVal, Loc);
   4217 }
   4218 
   4219 /// parseDirectiveWord
   4220 ///  ::= .word [ expression (, expression)* ]
   4221 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
   4222   MCAsmParser &Parser = getParser();
   4223   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   4224     for (;;) {
   4225       const MCExpr *Value;
   4226       if (getParser().parseExpression(Value))
   4227         return true;
   4228 
   4229       getParser().getStreamer().EmitValue(Value, Size, L);
   4230 
   4231       if (getLexer().is(AsmToken::EndOfStatement))
   4232         break;
   4233 
   4234       // FIXME: Improve diagnostic.
   4235       if (getLexer().isNot(AsmToken::Comma))
   4236         return Error(L, "unexpected token in directive");
   4237       Parser.Lex();
   4238     }
   4239   }
   4240 
   4241   Parser.Lex();
   4242   return false;
   4243 }
   4244 
   4245 /// parseDirectiveInst
   4246 ///  ::= .inst opcode [, ...]
   4247 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
   4248   MCAsmParser &Parser = getParser();
   4249   if (getLexer().is(AsmToken::EndOfStatement)) {
   4250     Parser.eatToEndOfStatement();
   4251     Error(Loc, "expected expression following directive");
   4252     return false;
   4253   }
   4254 
   4255   for (;;) {
   4256     const MCExpr *Expr;
   4257 
   4258     if (getParser().parseExpression(Expr)) {
   4259       Error(Loc, "expected expression");
   4260       return false;
   4261     }
   4262 
   4263     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
   4264     if (!Value) {
   4265       Error(Loc, "expected constant expression");
   4266       return false;
   4267     }
   4268 
   4269     getTargetStreamer().emitInst(Value->getValue());
   4270 
   4271     if (getLexer().is(AsmToken::EndOfStatement))
   4272       break;
   4273 
   4274     if (getLexer().isNot(AsmToken::Comma)) {
   4275       Error(Loc, "unexpected token in directive");
   4276       return false;
   4277     }
   4278 
   4279     Parser.Lex(); // Eat comma.
   4280   }
   4281 
   4282   Parser.Lex();
   4283   return false;
   4284 }
   4285 
   4286 // parseDirectiveTLSDescCall:
   4287 //   ::= .tlsdesccall symbol
   4288 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
   4289   StringRef Name;
   4290   if (getParser().parseIdentifier(Name))
   4291     return Error(L, "expected symbol after directive");
   4292 
   4293   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
   4294   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
   4295   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
   4296 
   4297   MCInst Inst;
   4298   Inst.setOpcode(AArch64::TLSDESCCALL);
   4299   Inst.addOperand(MCOperand::createExpr(Expr));
   4300 
   4301   getParser().getStreamer().EmitInstruction(Inst, getSTI());
   4302   return false;
   4303 }
   4304 
   4305 /// ::= .loh <lohName | lohId> label1, ..., labelN
   4306 /// The number of arguments depends on the loh identifier.
   4307 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
   4308   if (IDVal != MCLOHDirectiveName())
   4309     return true;
   4310   MCLOHType Kind;
   4311   if (getParser().getTok().isNot(AsmToken::Identifier)) {
   4312     if (getParser().getTok().isNot(AsmToken::Integer))
   4313       return TokError("expected an identifier or a number in directive");
   4314     // We successfully get a numeric value for the identifier.
   4315     // Check if it is valid.
   4316     int64_t Id = getParser().getTok().getIntVal();
   4317     if (Id <= -1U && !isValidMCLOHType(Id))
   4318       return TokError("invalid numeric identifier in directive");
   4319     Kind = (MCLOHType)Id;
   4320   } else {
   4321     StringRef Name = getTok().getIdentifier();
   4322     // We successfully parse an identifier.
   4323     // Check if it is a recognized one.
   4324     int Id = MCLOHNameToId(Name);
   4325 
   4326     if (Id == -1)
   4327       return TokError("invalid identifier in directive");
   4328     Kind = (MCLOHType)Id;
   4329   }
   4330   // Consume the identifier.
   4331   Lex();
   4332   // Get the number of arguments of this LOH.
   4333   int NbArgs = MCLOHIdToNbArgs(Kind);
   4334 
   4335   assert(NbArgs != -1 && "Invalid number of arguments");
   4336 
   4337   SmallVector<MCSymbol *, 3> Args;
   4338   for (int Idx = 0; Idx < NbArgs; ++Idx) {
   4339     StringRef Name;
   4340     if (getParser().parseIdentifier(Name))
   4341       return TokError("expected identifier in directive");
   4342     Args.push_back(getContext().getOrCreateSymbol(Name));
   4343 
   4344     if (Idx + 1 == NbArgs)
   4345       break;
   4346     if (getLexer().isNot(AsmToken::Comma))
   4347       return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
   4348     Lex();
   4349   }
   4350   if (getLexer().isNot(AsmToken::EndOfStatement))
   4351     return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
   4352 
   4353   getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
   4354   return false;
   4355 }
   4356 
   4357 /// parseDirectiveLtorg
   4358 ///  ::= .ltorg | .pool
   4359 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
   4360   getTargetStreamer().emitCurrentConstantPool();
   4361   return false;
   4362 }
   4363 
   4364 /// parseDirectiveReq
   4365 ///  ::= name .req registername
   4366 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
   4367   MCAsmParser &Parser = getParser();
   4368   Parser.Lex(); // Eat the '.req' token.
   4369   SMLoc SRegLoc = getLoc();
   4370   unsigned RegNum = tryParseRegister();
   4371   bool IsVector = false;
   4372 
   4373   if (RegNum == static_cast<unsigned>(-1)) {
   4374     StringRef Kind;
   4375     RegNum = tryMatchVectorRegister(Kind, false);
   4376     if (!Kind.empty()) {
   4377       Error(SRegLoc, "vector register without type specifier expected");
   4378       return false;
   4379     }
   4380     IsVector = true;
   4381   }
   4382 
   4383   if (RegNum == static_cast<unsigned>(-1)) {
   4384     Parser.eatToEndOfStatement();
   4385     Error(SRegLoc, "register name or alias expected");
   4386     return false;
   4387   }
   4388 
   4389   // Shouldn't be anything else.
   4390   if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
   4391     Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
   4392     Parser.eatToEndOfStatement();
   4393     return false;
   4394   }
   4395 
   4396   Parser.Lex(); // Consume the EndOfStatement
   4397 
   4398   auto pair = std::make_pair(IsVector, RegNum);
   4399   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
   4400     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
   4401 
   4402   return true;
   4403 }
   4404 
   4405 /// parseDirectiveUneq
   4406 ///  ::= .unreq registername
   4407 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
   4408   MCAsmParser &Parser = getParser();
   4409   if (Parser.getTok().isNot(AsmToken::Identifier)) {
   4410     Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
   4411     Parser.eatToEndOfStatement();
   4412     return false;
   4413   }
   4414   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
   4415   Parser.Lex(); // Eat the identifier.
   4416   return false;
   4417 }
   4418 
   4419 bool
   4420 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
   4421                                     AArch64MCExpr::VariantKind &ELFRefKind,
   4422                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
   4423                                     int64_t &Addend) {
   4424   ELFRefKind = AArch64MCExpr::VK_INVALID;
   4425   DarwinRefKind = MCSymbolRefExpr::VK_None;
   4426   Addend = 0;
   4427 
   4428   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
   4429     ELFRefKind = AE->getKind();
   4430     Expr = AE->getSubExpr();
   4431   }
   4432 
   4433   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
   4434   if (SE) {
   4435     // It's a simple symbol reference with no addend.
   4436     DarwinRefKind = SE->getKind();
   4437     return true;
   4438   }
   4439 
   4440   const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
   4441   if (!BE)
   4442     return false;
   4443 
   4444   SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
   4445   if (!SE)
   4446     return false;
   4447   DarwinRefKind = SE->getKind();
   4448 
   4449   if (BE->getOpcode() != MCBinaryExpr::Add &&
   4450       BE->getOpcode() != MCBinaryExpr::Sub)
   4451     return false;
   4452 
   4453   // See if the addend is is a constant, otherwise there's more going
   4454   // on here than we can deal with.
   4455   auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
   4456   if (!AddendExpr)
   4457     return false;
   4458 
   4459   Addend = AddendExpr->getValue();
   4460   if (BE->getOpcode() == MCBinaryExpr::Sub)
   4461     Addend = -Addend;
   4462 
   4463   // It's some symbol reference + a constant addend, but really
   4464   // shouldn't use both Darwin and ELF syntax.
   4465   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
   4466          DarwinRefKind == MCSymbolRefExpr::VK_None;
   4467 }
   4468 
   4469 /// Force static initialization.
   4470 extern "C" void LLVMInitializeAArch64AsmParser() {
   4471   RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
   4472   RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
   4473   RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
   4474 }
   4475 
   4476 #define GET_REGISTER_MATCHER
   4477 #define GET_SUBTARGET_FEATURE_NAME
   4478 #define GET_MATCHER_IMPLEMENTATION
   4479 #include "AArch64GenAsmMatcher.inc"
   4480 
   4481 // Define this matcher function after the auto-generated include so we
   4482 // have the match class enum definitions.
   4483 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
   4484                                                       unsigned Kind) {
   4485   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
   4486   // If the kind is a token for a literal immediate, check if our asm
   4487   // operand matches. This is for InstAliases which have a fixed-value
   4488   // immediate in the syntax.
   4489   int64_t ExpectedVal;
   4490   switch (Kind) {
   4491   default:
   4492     return Match_InvalidOperand;
   4493   case MCK__35_0:
   4494     ExpectedVal = 0;
   4495     break;
   4496   case MCK__35_1:
   4497     ExpectedVal = 1;
   4498     break;
   4499   case MCK__35_12:
   4500     ExpectedVal = 12;
   4501     break;
   4502   case MCK__35_16:
   4503     ExpectedVal = 16;
   4504     break;
   4505   case MCK__35_2:
   4506     ExpectedVal = 2;
   4507     break;
   4508   case MCK__35_24:
   4509     ExpectedVal = 24;
   4510     break;
   4511   case MCK__35_3:
   4512     ExpectedVal = 3;
   4513     break;
   4514   case MCK__35_32:
   4515     ExpectedVal = 32;
   4516     break;
   4517   case MCK__35_4:
   4518     ExpectedVal = 4;
   4519     break;
   4520   case MCK__35_48:
   4521     ExpectedVal = 48;
   4522     break;
   4523   case MCK__35_6:
   4524     ExpectedVal = 6;
   4525     break;
   4526   case MCK__35_64:
   4527     ExpectedVal = 64;
   4528     break;
   4529   case MCK__35_8:
   4530     ExpectedVal = 8;
   4531     break;
   4532   }
   4533   if (!Op.isImm())
   4534     return Match_InvalidOperand;
   4535   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
   4536   if (!CE)
   4537     return Match_InvalidOperand;
   4538   if (CE->getValue() == ExpectedVal)
   4539     return Match_Success;
   4540   return Match_InvalidOperand;
   4541 }
   4542 
   4543 
   4544 AArch64AsmParser::OperandMatchResultTy
   4545 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
   4546 
   4547   SMLoc S = getLoc();
   4548 
   4549   if (getParser().getTok().isNot(AsmToken::Identifier)) {
   4550     Error(S, "expected register");
   4551     return MatchOperand_ParseFail;
   4552   }
   4553 
   4554   int FirstReg = tryParseRegister();
   4555   if (FirstReg == -1) {
   4556     return MatchOperand_ParseFail;
   4557   }
   4558   const MCRegisterClass &WRegClass =
   4559       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
   4560   const MCRegisterClass &XRegClass =
   4561       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
   4562 
   4563   bool isXReg = XRegClass.contains(FirstReg),
   4564        isWReg = WRegClass.contains(FirstReg);
   4565   if (!isXReg && !isWReg) {
   4566     Error(S, "expected first even register of a "
   4567              "consecutive same-size even/odd register pair");
   4568     return MatchOperand_ParseFail;
   4569   }
   4570 
   4571   const MCRegisterInfo *RI = getContext().getRegisterInfo();
   4572   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
   4573 
   4574   if (FirstEncoding & 0x1) {
   4575     Error(S, "expected first even register of a "
   4576              "consecutive same-size even/odd register pair");
   4577     return MatchOperand_ParseFail;
   4578   }
   4579 
   4580   SMLoc M = getLoc();
   4581   if (getParser().getTok().isNot(AsmToken::Comma)) {
   4582     Error(M, "expected comma");
   4583     return MatchOperand_ParseFail;
   4584   }
   4585   // Eat the comma
   4586   getParser().Lex();
   4587 
   4588   SMLoc E = getLoc();
   4589   int SecondReg = tryParseRegister();
   4590   if (SecondReg ==-1) {
   4591     return MatchOperand_ParseFail;
   4592   }
   4593 
   4594  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
   4595       (isXReg && !XRegClass.contains(SecondReg)) ||
   4596       (isWReg && !WRegClass.contains(SecondReg))) {
   4597     Error(E,"expected second odd register of a "
   4598              "consecutive same-size even/odd register pair");
   4599     return MatchOperand_ParseFail;
   4600   }
   4601 
   4602   unsigned Pair = 0;
   4603   if(isXReg) {
   4604     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
   4605            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
   4606   } else {
   4607     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
   4608            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
   4609   }
   4610 
   4611   Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
   4612       getContext()));
   4613 
   4614   return MatchOperand_Success;
   4615 }
   4616