Home | History | Annotate | Download | only in opcodes
      1 /* aarch64-opc.c -- AArch64 opcode support.
      2    Copyright (C) 2009-2016 Free Software Foundation, Inc.
      3    Contributed by ARM Ltd.
      4 
      5    This file is part of the GNU opcodes library.
      6 
      7    This library is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3, or (at your option)
     10    any later version.
     11 
     12    It is distributed in the hope that it will be useful, but WITHOUT
     13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
     15    License for more details.
     16 
     17    You should have received a copy of the GNU General Public License
     18    along with this program; see the file COPYING3. If not,
     19    see <http://www.gnu.org/licenses/>.  */
     20 
     21 #include "sysdep.h"
     22 #include <assert.h>
     23 #include <stdlib.h>
     24 #include <stdio.h>
     25 #include <stdint.h>
     26 #include <stdarg.h>
     27 #include <inttypes.h>
     28 
     29 #include "opintl.h"
     30 
     31 #include "aarch64-opc.h"
     32 
     33 #ifdef DEBUG_AARCH64
     34 int debug_dump = FALSE;
     35 #endif /* DEBUG_AARCH64 */
     36 
     37 /* Helper functions to determine which operand to be used to encode/decode
     38    the size:Q fields for AdvSIMD instructions.  */
     39 
     40 static inline bfd_boolean
     41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
     42 {
     43   return ((qualifier >= AARCH64_OPND_QLF_V_8B
     44 	  && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
     45 	  : FALSE);
     46 }
     47 
     48 static inline bfd_boolean
     49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
     50 {
     51   return ((qualifier >= AARCH64_OPND_QLF_S_B
     52 	  && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
     53 	  : FALSE);
     54 }
     55 
     56 enum data_pattern
     57 {
     58   DP_UNKNOWN,
     59   DP_VECTOR_3SAME,
     60   DP_VECTOR_LONG,
     61   DP_VECTOR_WIDE,
     62   DP_VECTOR_ACROSS_LANES,
     63 };
     64 
     65 static const char significant_operand_index [] =
     66 {
     67   0,	/* DP_UNKNOWN, by default using operand 0.  */
     68   0,	/* DP_VECTOR_3SAME */
     69   1,	/* DP_VECTOR_LONG */
     70   2,	/* DP_VECTOR_WIDE */
     71   1,	/* DP_VECTOR_ACROSS_LANES */
     72 };
     73 
     74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
     75    the data pattern.
     76    N.B. QUALIFIERS is a possible sequence of qualifiers each of which
     77    corresponds to one of a sequence of operands.  */
     78 
     79 static enum data_pattern
     80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
     81 {
     82   if (vector_qualifier_p (qualifiers[0]) == TRUE)
     83     {
     84       /* e.g. v.4s, v.4s, v.4s
     85 	   or v.4h, v.4h, v.h[3].  */
     86       if (qualifiers[0] == qualifiers[1]
     87 	  && vector_qualifier_p (qualifiers[2]) == TRUE
     88 	  && (aarch64_get_qualifier_esize (qualifiers[0])
     89 	      == aarch64_get_qualifier_esize (qualifiers[1]))
     90 	  && (aarch64_get_qualifier_esize (qualifiers[0])
     91 	      == aarch64_get_qualifier_esize (qualifiers[2])))
     92 	return DP_VECTOR_3SAME;
     93       /* e.g. v.8h, v.8b, v.8b.
     94            or v.4s, v.4h, v.h[2].
     95 	   or v.8h, v.16b.  */
     96       if (vector_qualifier_p (qualifiers[1]) == TRUE
     97 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
     98 	  && (aarch64_get_qualifier_esize (qualifiers[0])
     99 	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
    100 	return DP_VECTOR_LONG;
    101       /* e.g. v.8h, v.8h, v.8b.  */
    102       if (qualifiers[0] == qualifiers[1]
    103 	  && vector_qualifier_p (qualifiers[2]) == TRUE
    104 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
    105 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    106 	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
    107 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    108 	      == aarch64_get_qualifier_esize (qualifiers[1])))
    109 	return DP_VECTOR_WIDE;
    110     }
    111   else if (fp_qualifier_p (qualifiers[0]) == TRUE)
    112     {
    113       /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
    114       if (vector_qualifier_p (qualifiers[1]) == TRUE
    115 	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
    116 	return DP_VECTOR_ACROSS_LANES;
    117     }
    118 
    119   return DP_UNKNOWN;
    120 }
    121 
    122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
    123    the AdvSIMD instructions.  */
    124 /* N.B. it is possible to do some optimization that doesn't call
    125    get_data_pattern each time when we need to select an operand.  We can
    126    either buffer the caculated the result or statically generate the data,
    127    however, it is not obvious that the optimization will bring significant
    128    benefit.  */
    129 
    130 int
    131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
    132 {
    133   return
    134     significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
    135 }
    136 
    137 const aarch64_field fields[] =
    139 {
    140     {  0,  0 },	/* NIL.  */
    141     {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
    142     {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
    143     {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
    144     { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
    145     {  5, 19 },	/* imm19: e.g. in CBZ.  */
    146     {  5, 19 },	/* immhi: e.g. in ADRP.  */
    147     { 29,  2 },	/* immlo: e.g. in ADRP.  */
    148     { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
    149     { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
    150     { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
    151     { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
    152     {  0,  5 },	/* Rt: in load/store instructions.  */
    153     {  0,  5 },	/* Rd: in many integer instructions.  */
    154     {  5,  5 },	/* Rn: in many integer instructions.  */
    155     { 10,  5 },	/* Rt2: in load/store pair instructions.  */
    156     { 10,  5 },	/* Ra: in fp instructions.  */
    157     {  5,  3 },	/* op2: in the system instructions.  */
    158     {  8,  4 },	/* CRm: in the system instructions.  */
    159     { 12,  4 },	/* CRn: in the system instructions.  */
    160     { 16,  3 },	/* op1: in the system instructions.  */
    161     { 19,  2 },	/* op0: in the system instructions.  */
    162     { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
    163     { 12,  4 },	/* cond: condition flags as a source operand.  */
    164     { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
    165     { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
    166     { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
    167     { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
    168     { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
    169     { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
    170     { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
    171     { 12,  1 },	/* S: in load/store reg offset instructions.  */
    172     { 21,  2 },	/* hw: in move wide constant instructions.  */
    173     { 22,  2 },	/* opc: in load/store reg offset instructions.  */
    174     { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
    175     { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
    176     { 22,  2 },	/* type: floating point type field in fp data inst.  */
    177     { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
    178     { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
    179     { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
    180     { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
    181     { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
    182     { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
    183     { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
    184     { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
    185     {  5, 14 },	/* imm14: in test bit and branch instructions.  */
    186     {  5, 16 },	/* imm16: in exception instructions.  */
    187     {  0, 26 },	/* imm26: in unconditional branch instructions.  */
    188     { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
    189     { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
    190     { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
    191     { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
    192     { 22,  1 },	/* N: in logical (immediate) instructions.  */
    193     { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
    194     { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
    195     { 31,  1 },	/* sf: in integer data processing instructions.  */
    196     { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
    197     { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
    198     { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
    199     { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
    200     { 31,  1 },	/* b5: in the test bit and branch instructions.  */
    201     { 19,  5 },	/* b40: in the test bit and branch instructions.  */
    202     { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
    203 };
    204 
    205 enum aarch64_operand_class
    206 aarch64_get_operand_class (enum aarch64_opnd type)
    207 {
    208   return aarch64_operands[type].op_class;
    209 }
    210 
    211 const char *
    212 aarch64_get_operand_name (enum aarch64_opnd type)
    213 {
    214   return aarch64_operands[type].name;
    215 }
    216 
    217 /* Get operand description string.
    218    This is usually for the diagnosis purpose.  */
    219 const char *
    220 aarch64_get_operand_desc (enum aarch64_opnd type)
    221 {
    222   return aarch64_operands[type].desc;
    223 }
    224 
    225 /* Table of all conditional affixes.  */
    226 const aarch64_cond aarch64_conds[16] =
    227 {
    228   {{"eq"}, 0x0},
    229   {{"ne"}, 0x1},
    230   {{"cs", "hs"}, 0x2},
    231   {{"cc", "lo", "ul"}, 0x3},
    232   {{"mi"}, 0x4},
    233   {{"pl"}, 0x5},
    234   {{"vs"}, 0x6},
    235   {{"vc"}, 0x7},
    236   {{"hi"}, 0x8},
    237   {{"ls"}, 0x9},
    238   {{"ge"}, 0xa},
    239   {{"lt"}, 0xb},
    240   {{"gt"}, 0xc},
    241   {{"le"}, 0xd},
    242   {{"al"}, 0xe},
    243   {{"nv"}, 0xf},
    244 };
    245 
    246 const aarch64_cond *
    247 get_cond_from_value (aarch64_insn value)
    248 {
    249   assert (value < 16);
    250   return &aarch64_conds[(unsigned int) value];
    251 }
    252 
    253 const aarch64_cond *
    254 get_inverted_cond (const aarch64_cond *cond)
    255 {
    256   return &aarch64_conds[cond->value ^ 0x1];
    257 }
    258 
    259 /* Table describing the operand extension/shifting operators; indexed by
    260    enum aarch64_modifier_kind.
    261 
    262    The value column provides the most common values for encoding modifiers,
    263    which enables table-driven encoding/decoding for the modifiers.  */
    264 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
    265 {
    266     {"none", 0x0},
    267     {"msl",  0x0},
    268     {"ror",  0x3},
    269     {"asr",  0x2},
    270     {"lsr",  0x1},
    271     {"lsl",  0x0},
    272     {"uxtb", 0x0},
    273     {"uxth", 0x1},
    274     {"uxtw", 0x2},
    275     {"uxtx", 0x3},
    276     {"sxtb", 0x4},
    277     {"sxth", 0x5},
    278     {"sxtw", 0x6},
    279     {"sxtx", 0x7},
    280     {NULL, 0},
    281 };
    282 
    283 enum aarch64_modifier_kind
    284 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
    285 {
    286   return desc - aarch64_operand_modifiers;
    287 }
    288 
    289 aarch64_insn
    290 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
    291 {
    292   return aarch64_operand_modifiers[kind].value;
    293 }
    294 
    295 enum aarch64_modifier_kind
    296 aarch64_get_operand_modifier_from_value (aarch64_insn value,
    297 					 bfd_boolean extend_p)
    298 {
    299   if (extend_p == TRUE)
    300     return AARCH64_MOD_UXTB + value;
    301   else
    302     return AARCH64_MOD_LSL - value;
    303 }
    304 
    305 bfd_boolean
    306 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
    307 {
    308   return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
    309     ? TRUE : FALSE;
    310 }
    311 
    312 static inline bfd_boolean
    313 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
    314 {
    315   return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
    316     ? TRUE : FALSE;
    317 }
    318 
    319 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
    320 {
    321     { "#0x00", 0x0 },
    322     { "oshld", 0x1 },
    323     { "oshst", 0x2 },
    324     { "osh",   0x3 },
    325     { "#0x04", 0x4 },
    326     { "nshld", 0x5 },
    327     { "nshst", 0x6 },
    328     { "nsh",   0x7 },
    329     { "#0x08", 0x8 },
    330     { "ishld", 0x9 },
    331     { "ishst", 0xa },
    332     { "ish",   0xb },
    333     { "#0x0c", 0xc },
    334     { "ld",    0xd },
    335     { "st",    0xe },
    336     { "sy",    0xf },
    337 };
    338 
    339 /* Table describing the operands supported by the aliases of the HINT
    340    instruction.
    341 
    342    The name column is the operand that is accepted for the alias.  The value
    343    column is the hint number of the alias.  The list of operands is terminated
    344    by NULL in the name column.  */
    345 
    346 const struct aarch64_name_value_pair aarch64_hint_options[] =
    347 {
    348   { "csync", 0x11 },    /* PSB CSYNC.  */
    349   { NULL, 0x0 },
    350 };
    351 
    352 /* op -> op:       load = 0 instruction = 1 store = 2
    353    l  -> level:    1-3
    354    t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
    355 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
    356 const struct aarch64_name_value_pair aarch64_prfops[32] =
    357 {
    358   { "pldl1keep", B(0, 1, 0) },
    359   { "pldl1strm", B(0, 1, 1) },
    360   { "pldl2keep", B(0, 2, 0) },
    361   { "pldl2strm", B(0, 2, 1) },
    362   { "pldl3keep", B(0, 3, 0) },
    363   { "pldl3strm", B(0, 3, 1) },
    364   { NULL, 0x06 },
    365   { NULL, 0x07 },
    366   { "plil1keep", B(1, 1, 0) },
    367   { "plil1strm", B(1, 1, 1) },
    368   { "plil2keep", B(1, 2, 0) },
    369   { "plil2strm", B(1, 2, 1) },
    370   { "plil3keep", B(1, 3, 0) },
    371   { "plil3strm", B(1, 3, 1) },
    372   { NULL, 0x0e },
    373   { NULL, 0x0f },
    374   { "pstl1keep", B(2, 1, 0) },
    375   { "pstl1strm", B(2, 1, 1) },
    376   { "pstl2keep", B(2, 2, 0) },
    377   { "pstl2strm", B(2, 2, 1) },
    378   { "pstl3keep", B(2, 3, 0) },
    379   { "pstl3strm", B(2, 3, 1) },
    380   { NULL, 0x16 },
    381   { NULL, 0x17 },
    382   { NULL, 0x18 },
    383   { NULL, 0x19 },
    384   { NULL, 0x1a },
    385   { NULL, 0x1b },
    386   { NULL, 0x1c },
    387   { NULL, 0x1d },
    388   { NULL, 0x1e },
    389   { NULL, 0x1f },
    390 };
    391 #undef B
    392 
    393 /* Utilities on value constraint.  */
    395 
    396 static inline int
    397 value_in_range_p (int64_t value, int low, int high)
    398 {
    399   return (value >= low && value <= high) ? 1 : 0;
    400 }
    401 
    402 static inline int
    403 value_aligned_p (int64_t value, int align)
    404 {
    405   return ((value & (align - 1)) == 0) ? 1 : 0;
    406 }
    407 
    408 /* A signed value fits in a field.  */
    409 static inline int
    410 value_fit_signed_field_p (int64_t value, unsigned width)
    411 {
    412   assert (width < 32);
    413   if (width < sizeof (value) * 8)
    414     {
    415       int64_t lim = (int64_t)1 << (width - 1);
    416       if (value >= -lim && value < lim)
    417 	return 1;
    418     }
    419   return 0;
    420 }
    421 
    422 /* An unsigned value fits in a field.  */
    423 static inline int
    424 value_fit_unsigned_field_p (int64_t value, unsigned width)
    425 {
    426   assert (width < 32);
    427   if (width < sizeof (value) * 8)
    428     {
    429       int64_t lim = (int64_t)1 << width;
    430       if (value >= 0 && value < lim)
    431 	return 1;
    432     }
    433   return 0;
    434 }
    435 
    436 /* Return 1 if OPERAND is SP or WSP.  */
    437 int
    438 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
    439 {
    440   return ((aarch64_get_operand_class (operand->type)
    441 	   == AARCH64_OPND_CLASS_INT_REG)
    442 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
    443 	  && operand->reg.regno == 31);
    444 }
    445 
    446 /* Return 1 if OPERAND is XZR or WZP.  */
    447 int
    448 aarch64_zero_register_p (const aarch64_opnd_info *operand)
    449 {
    450   return ((aarch64_get_operand_class (operand->type)
    451 	   == AARCH64_OPND_CLASS_INT_REG)
    452 	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
    453 	  && operand->reg.regno == 31);
    454 }
    455 
    456 /* Return true if the operand *OPERAND that has the operand code
    457    OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
    458    qualified by the qualifier TARGET.  */
    459 
    460 static inline int
    461 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
    462 			  aarch64_opnd_qualifier_t target)
    463 {
    464   switch (operand->qualifier)
    465     {
    466     case AARCH64_OPND_QLF_W:
    467       if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
    468 	return 1;
    469       break;
    470     case AARCH64_OPND_QLF_X:
    471       if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
    472 	return 1;
    473       break;
    474     case AARCH64_OPND_QLF_WSP:
    475       if (target == AARCH64_OPND_QLF_W
    476 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    477 	return 1;
    478       break;
    479     case AARCH64_OPND_QLF_SP:
    480       if (target == AARCH64_OPND_QLF_X
    481 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    482 	return 1;
    483       break;
    484     default:
    485       break;
    486     }
    487 
    488   return 0;
    489 }
    490 
    491 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
    492    for operand KNOWN_IDX, return the expected qualifier for operand IDX.
    493 
    494    Return NIL if more than one expected qualifiers are found.  */
    495 
    496 aarch64_opnd_qualifier_t
    497 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
    498 				int idx,
    499 				const aarch64_opnd_qualifier_t known_qlf,
    500 				int known_idx)
    501 {
    502   int i, saved_i;
    503 
    504   /* Special case.
    505 
    506      When the known qualifier is NIL, we have to assume that there is only
    507      one qualifier sequence in the *QSEQ_LIST and return the corresponding
    508      qualifier directly.  One scenario is that for instruction
    509 	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
    510      which has only one possible valid qualifier sequence
    511 	NIL, S_D
    512      the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
    513      determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
    514 
    515      Because the qualifier NIL has dual roles in the qualifier sequence:
    516      it can mean no qualifier for the operand, or the qualifer sequence is
    517      not in use (when all qualifiers in the sequence are NILs), we have to
    518      handle this special case here.  */
    519   if (known_qlf == AARCH64_OPND_NIL)
    520     {
    521       assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
    522       return qseq_list[0][idx];
    523     }
    524 
    525   for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
    526     {
    527       if (qseq_list[i][known_idx] == known_qlf)
    528 	{
    529 	  if (saved_i != -1)
    530 	    /* More than one sequences are found to have KNOWN_QLF at
    531 	       KNOWN_IDX.  */
    532 	    return AARCH64_OPND_NIL;
    533 	  saved_i = i;
    534 	}
    535     }
    536 
    537   return qseq_list[saved_i][idx];
    538 }
    539 
    540 enum operand_qualifier_kind
    541 {
    542   OQK_NIL,
    543   OQK_OPD_VARIANT,
    544   OQK_VALUE_IN_RANGE,
    545   OQK_MISC,
    546 };
    547 
    548 /* Operand qualifier description.  */
    549 struct operand_qualifier_data
    550 {
    551   /* The usage of the three data fields depends on the qualifier kind.  */
    552   int data0;
    553   int data1;
    554   int data2;
    555   /* Description.  */
    556   const char *desc;
    557   /* Kind.  */
    558   enum operand_qualifier_kind kind;
    559 };
    560 
    561 /* Indexed by the operand qualifier enumerators.  */
    562 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
    563 {
    564   {0, 0, 0, "NIL", OQK_NIL},
    565 
    566   /* Operand variant qualifiers.
    567      First 3 fields:
    568      element size, number of elements and common value for encoding.  */
    569 
    570   {4, 1, 0x0, "w", OQK_OPD_VARIANT},
    571   {8, 1, 0x1, "x", OQK_OPD_VARIANT},
    572   {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
    573   {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
    574 
    575   {1, 1, 0x0, "b", OQK_OPD_VARIANT},
    576   {2, 1, 0x1, "h", OQK_OPD_VARIANT},
    577   {4, 1, 0x2, "s", OQK_OPD_VARIANT},
    578   {8, 1, 0x3, "d", OQK_OPD_VARIANT},
    579   {16, 1, 0x4, "q", OQK_OPD_VARIANT},
    580 
    581   {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
    582   {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
    583   {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
    584   {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
    585   {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
    586   {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
    587   {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
    588   {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
    589   {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
    590   {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
    591 
    592   /* Qualifiers constraining the value range.
    593      First 3 fields:
    594      Lower bound, higher bound, unused.  */
    595 
    596   {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
    597   {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
    598   {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
    599   {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
    600   {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
    601   {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
    602 
    603   /* Qualifiers for miscellaneous purpose.
    604      First 3 fields:
    605      unused, unused and unused.  */
    606 
    607   {0, 0, 0, "lsl", 0},
    608   {0, 0, 0, "msl", 0},
    609 
    610   {0, 0, 0, "retrieving", 0},
    611 };
    612 
    613 static inline bfd_boolean
    614 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
    615 {
    616   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
    617     ? TRUE : FALSE;
    618 }
    619 
    620 static inline bfd_boolean
    621 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
    622 {
    623   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
    624     ? TRUE : FALSE;
    625 }
    626 
    627 const char*
    628 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
    629 {
    630   return aarch64_opnd_qualifiers[qualifier].desc;
    631 }
    632 
    633 /* Given an operand qualifier, return the expected data element size
    634    of a qualified operand.  */
    635 unsigned char
    636 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
    637 {
    638   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    639   return aarch64_opnd_qualifiers[qualifier].data0;
    640 }
    641 
    642 unsigned char
    643 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
    644 {
    645   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    646   return aarch64_opnd_qualifiers[qualifier].data1;
    647 }
    648 
    649 aarch64_insn
    650 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
    651 {
    652   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    653   return aarch64_opnd_qualifiers[qualifier].data2;
    654 }
    655 
    656 static int
    657 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
    658 {
    659   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
    660   return aarch64_opnd_qualifiers[qualifier].data0;
    661 }
    662 
    663 static int
    664 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
    665 {
    666   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
    667   return aarch64_opnd_qualifiers[qualifier].data1;
    668 }
    669 
    670 #ifdef DEBUG_AARCH64
    671 void
    672 aarch64_verbose (const char *str, ...)
    673 {
    674   va_list ap;
    675   va_start (ap, str);
    676   printf ("#### ");
    677   vprintf (str, ap);
    678   printf ("\n");
    679   va_end (ap);
    680 }
    681 
    682 static inline void
    683 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
    684 {
    685   int i;
    686   printf ("#### \t");
    687   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
    688     printf ("%s,", aarch64_get_qualifier_name (*qualifier));
    689   printf ("\n");
    690 }
    691 
    692 static void
    693 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
    694 		       const aarch64_opnd_qualifier_t *qualifier)
    695 {
    696   int i;
    697   aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
    698 
    699   aarch64_verbose ("dump_match_qualifiers:");
    700   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
    701     curr[i] = opnd[i].qualifier;
    702   dump_qualifier_sequence (curr);
    703   aarch64_verbose ("against");
    704   dump_qualifier_sequence (qualifier);
    705 }
    706 #endif /* DEBUG_AARCH64 */
    707 
    708 /* TODO improve this, we can have an extra field at the runtime to
    709    store the number of operands rather than calculating it every time.  */
    710 
    711 int
    712 aarch64_num_of_operands (const aarch64_opcode *opcode)
    713 {
    714   int i = 0;
    715   const enum aarch64_opnd *opnds = opcode->operands;
    716   while (opnds[i++] != AARCH64_OPND_NIL)
    717     ;
    718   --i;
    719   assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
    720   return i;
    721 }
    722 
    723 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
    724    If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
    725 
    726    N.B. on the entry, it is very likely that only some operands in *INST
    727    have had their qualifiers been established.
    728 
    729    If STOP_AT is not -1, the function will only try to match
    730    the qualifier sequence for operands before and including the operand
    731    of index STOP_AT; and on success *RET will only be filled with the first
    732    (STOP_AT+1) qualifiers.
    733 
    734    A couple examples of the matching algorithm:
    735 
    736    X,W,NIL should match
    737    X,W,NIL
    738 
    739    NIL,NIL should match
    740    X  ,NIL
    741 
    742    Apart from serving the main encoding routine, this can also be called
    743    during or after the operand decoding.  */
    744 
    745 int
    746 aarch64_find_best_match (const aarch64_inst *inst,
    747 			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
    748 			 int stop_at, aarch64_opnd_qualifier_t *ret)
    749 {
    750   int found = 0;
    751   int i, num_opnds;
    752   const aarch64_opnd_qualifier_t *qualifiers;
    753 
    754   num_opnds = aarch64_num_of_operands (inst->opcode);
    755   if (num_opnds == 0)
    756     {
    757       DEBUG_TRACE ("SUCCEED: no operand");
    758       return 1;
    759     }
    760 
    761   if (stop_at < 0 || stop_at >= num_opnds)
    762     stop_at = num_opnds - 1;
    763 
    764   /* For each pattern.  */
    765   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
    766     {
    767       int j;
    768       qualifiers = *qualifiers_list;
    769 
    770       /* Start as positive.  */
    771       found = 1;
    772 
    773       DEBUG_TRACE ("%d", i);
    774 #ifdef DEBUG_AARCH64
    775       if (debug_dump)
    776 	dump_match_qualifiers (inst->operands, qualifiers);
    777 #endif
    778 
    779       /* Most opcodes has much fewer patterns in the list.
    780 	 First NIL qualifier indicates the end in the list.   */
    781       if (empty_qualifier_sequence_p (qualifiers) == TRUE)
    782 	{
    783 	  DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
    784 	  if (i)
    785 	    found = 0;
    786 	  break;
    787 	}
    788 
    789       for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
    790 	{
    791 	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
    792 	    {
    793 	      /* Either the operand does not have qualifier, or the qualifier
    794 		 for the operand needs to be deduced from the qualifier
    795 		 sequence.
    796 		 In the latter case, any constraint checking related with
    797 		 the obtained qualifier should be done later in
    798 		 operand_general_constraint_met_p.  */
    799 	      continue;
    800 	    }
    801 	  else if (*qualifiers != inst->operands[j].qualifier)
    802 	    {
    803 	      /* Unless the target qualifier can also qualify the operand
    804 		 (which has already had a non-nil qualifier), non-equal
    805 		 qualifiers are generally un-matched.  */
    806 	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
    807 		continue;
    808 	      else
    809 		{
    810 		  found = 0;
    811 		  break;
    812 		}
    813 	    }
    814 	  else
    815 	    continue;	/* Equal qualifiers are certainly matched.  */
    816 	}
    817 
    818       /* Qualifiers established.  */
    819       if (found == 1)
    820 	break;
    821     }
    822 
    823   if (found == 1)
    824     {
    825       /* Fill the result in *RET.  */
    826       int j;
    827       qualifiers = *qualifiers_list;
    828 
    829       DEBUG_TRACE ("complete qualifiers using list %d", i);
    830 #ifdef DEBUG_AARCH64
    831       if (debug_dump)
    832 	dump_qualifier_sequence (qualifiers);
    833 #endif
    834 
    835       for (j = 0; j <= stop_at; ++j, ++qualifiers)
    836 	ret[j] = *qualifiers;
    837       for (; j < AARCH64_MAX_OPND_NUM; ++j)
    838 	ret[j] = AARCH64_OPND_QLF_NIL;
    839 
    840       DEBUG_TRACE ("SUCCESS");
    841       return 1;
    842     }
    843 
    844   DEBUG_TRACE ("FAIL");
    845   return 0;
    846 }
    847 
    848 /* Operand qualifier matching and resolving.
    849 
    850    Return 1 if the operand qualifier(s) in *INST match one of the qualifier
    851    sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
    852 
    853    if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
    854    succeeds.  */
    855 
    856 static int
    857 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
    858 {
    859   int i;
    860   aarch64_opnd_qualifier_seq_t qualifiers;
    861 
    862   if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
    863 			       qualifiers))
    864     {
    865       DEBUG_TRACE ("matching FAIL");
    866       return 0;
    867     }
    868 
    869   /* Update the qualifiers.  */
    870   if (update_p == TRUE)
    871     for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
    872       {
    873 	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
    874 	  break;
    875 	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
    876 			"update %s with %s for operand %d",
    877 			aarch64_get_qualifier_name (inst->operands[i].qualifier),
    878 			aarch64_get_qualifier_name (qualifiers[i]), i);
    879 	inst->operands[i].qualifier = qualifiers[i];
    880       }
    881 
    882   DEBUG_TRACE ("matching SUCCESS");
    883   return 1;
    884 }
    885 
    886 /* Return TRUE if VALUE is a wide constant that can be moved into a general
    887    register by MOVZ.
    888 
    889    IS32 indicates whether value is a 32-bit immediate or not.
    890    If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
    891    amount will be returned in *SHIFT_AMOUNT.  */
    892 
    893 bfd_boolean
    894 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
    895 {
    896   int amount;
    897 
    898   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
    899 
    900   if (is32)
    901     {
    902       /* Allow all zeros or all ones in top 32-bits, so that
    903 	 32-bit constant expressions like ~0x80000000 are
    904 	 permitted.  */
    905       uint64_t ext = value;
    906       if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
    907 	/* Immediate out of range.  */
    908 	return FALSE;
    909       value &= (int64_t) 0xffffffff;
    910     }
    911 
    912   /* first, try movz then movn */
    913   amount = -1;
    914   if ((value & ((int64_t) 0xffff << 0)) == value)
    915     amount = 0;
    916   else if ((value & ((int64_t) 0xffff << 16)) == value)
    917     amount = 16;
    918   else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
    919     amount = 32;
    920   else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
    921     amount = 48;
    922 
    923   if (amount == -1)
    924     {
    925       DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
    926       return FALSE;
    927     }
    928 
    929   if (shift_amount != NULL)
    930     *shift_amount = amount;
    931 
    932   DEBUG_TRACE ("exit TRUE with amount %d", amount);
    933 
    934   return TRUE;
    935 }
    936 
    937 /* Build the accepted values for immediate logical SIMD instructions.
    938 
    939    The standard encodings of the immediate value are:
    940      N      imms     immr         SIMD size  R             S
    941      1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
    942      0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
    943      0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
    944      0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
    945      0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
    946      0      11110s   00000r       2       UInt(r)       UInt(s)
    947    where all-ones value of S is reserved.
    948 
    949    Let's call E the SIMD size.
    950 
    951    The immediate value is: S+1 bits '1' rotated to the right by R.
    952 
    953    The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
    954    (remember S != E - 1).  */
    955 
    956 #define TOTAL_IMM_NB  5334
    957 
    958 typedef struct
    959 {
    960   uint64_t imm;
    961   aarch64_insn encoding;
    962 } simd_imm_encoding;
    963 
    964 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
    965 
    966 static int
    967 simd_imm_encoding_cmp(const void *i1, const void *i2)
    968 {
    969   const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
    970   const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
    971 
    972   if (imm1->imm < imm2->imm)
    973     return -1;
    974   if (imm1->imm > imm2->imm)
    975     return +1;
    976   return 0;
    977 }
    978 
    979 /* immediate bitfield standard encoding
    980    imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
    981    1         ssssss     rrrrrr      64        rrrrrr ssssss
    982    0         0sssss     0rrrrr      32        rrrrr  sssss
    983    0         10ssss     00rrrr      16        rrrr   ssss
    984    0         110sss     000rrr      8         rrr    sss
    985    0         1110ss     0000rr      4         rr     ss
    986    0         11110s     00000r      2         r      s  */
    987 static inline int
    988 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
    989 {
    990   return (is64 << 12) | (r << 6) | s;
    991 }
    992 
    993 static void
    994 build_immediate_table (void)
    995 {
    996   uint32_t log_e, e, s, r, s_mask;
    997   uint64_t mask, imm;
    998   int nb_imms;
    999   int is64;
   1000 
   1001   nb_imms = 0;
   1002   for (log_e = 1; log_e <= 6; log_e++)
   1003     {
   1004       /* Get element size.  */
   1005       e = 1u << log_e;
   1006       if (log_e == 6)
   1007 	{
   1008 	  is64 = 1;
   1009 	  mask = 0xffffffffffffffffull;
   1010 	  s_mask = 0;
   1011 	}
   1012       else
   1013 	{
   1014 	  is64 = 0;
   1015 	  mask = (1ull << e) - 1;
   1016 	  /* log_e  s_mask
   1017 	     1     ((1 << 4) - 1) << 2 = 111100
   1018 	     2     ((1 << 3) - 1) << 3 = 111000
   1019 	     3     ((1 << 2) - 1) << 4 = 110000
   1020 	     4     ((1 << 1) - 1) << 5 = 100000
   1021 	     5     ((1 << 0) - 1) << 6 = 000000  */
   1022 	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
   1023 	}
   1024       for (s = 0; s < e - 1; s++)
   1025 	for (r = 0; r < e; r++)
   1026 	  {
   1027 	    /* s+1 consecutive bits to 1 (s < 63) */
   1028 	    imm = (1ull << (s + 1)) - 1;
   1029 	    /* rotate right by r */
   1030 	    if (r != 0)
   1031 	      imm = (imm >> r) | ((imm << (e - r)) & mask);
   1032 	    /* replicate the constant depending on SIMD size */
   1033 	    switch (log_e)
   1034 	      {
   1035 	      case 1: imm = (imm <<  2) | imm;
   1036 	      case 2: imm = (imm <<  4) | imm;
   1037 	      case 3: imm = (imm <<  8) | imm;
   1038 	      case 4: imm = (imm << 16) | imm;
   1039 	      case 5: imm = (imm << 32) | imm;
   1040 	      case 6: break;
   1041 	      default: abort ();
   1042 	      }
   1043 	    simd_immediates[nb_imms].imm = imm;
   1044 	    simd_immediates[nb_imms].encoding =
   1045 	      encode_immediate_bitfield(is64, s | s_mask, r);
   1046 	    nb_imms++;
   1047 	  }
   1048     }
   1049   assert (nb_imms == TOTAL_IMM_NB);
   1050   qsort(simd_immediates, nb_imms,
   1051 	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1052 }
   1053 
   1054 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
   1055    be accepted by logical (immediate) instructions
   1056    e.g. ORR <Xd|SP>, <Xn>, #<imm>.
   1057 
   1058    IS32 indicates whether or not VALUE is a 32-bit immediate.
   1059    If ENCODING is not NULL, on the return of TRUE, the standard encoding for
   1060    VALUE will be returned in *ENCODING.  */
   1061 
   1062 bfd_boolean
   1063 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
   1064 {
   1065   simd_imm_encoding imm_enc;
   1066   const simd_imm_encoding *imm_encoding;
   1067   static bfd_boolean initialized = FALSE;
   1068 
   1069   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
   1070 	       value, is32);
   1071 
   1072   if (initialized == FALSE)
   1073     {
   1074       build_immediate_table ();
   1075       initialized = TRUE;
   1076     }
   1077 
   1078   if (is32)
   1079     {
   1080       /* Allow all zeros or all ones in top 32-bits, so that
   1081 	 constant expressions like ~1 are permitted.  */
   1082       if (value >> 32 != 0 && value >> 32 != 0xffffffff)
   1083 	return FALSE;
   1084 
   1085       /* Replicate the 32 lower bits to the 32 upper bits.  */
   1086       value &= 0xffffffff;
   1087       value |= value << 32;
   1088     }
   1089 
   1090   imm_enc.imm = value;
   1091   imm_encoding = (const simd_imm_encoding *)
   1092     bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
   1093             sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1094   if (imm_encoding == NULL)
   1095     {
   1096       DEBUG_TRACE ("exit with FALSE");
   1097       return FALSE;
   1098     }
   1099   if (encoding != NULL)
   1100     *encoding = imm_encoding->encoding;
   1101   DEBUG_TRACE ("exit with TRUE");
   1102   return TRUE;
   1103 }
   1104 
   1105 /* If 64-bit immediate IMM is in the format of
   1106    "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
   1107    where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
   1108    of value "abcdefgh".  Otherwise return -1.  */
   1109 int
   1110 aarch64_shrink_expanded_imm8 (uint64_t imm)
   1111 {
   1112   int i, ret;
   1113   uint32_t byte;
   1114 
   1115   ret = 0;
   1116   for (i = 0; i < 8; i++)
   1117     {
   1118       byte = (imm >> (8 * i)) & 0xff;
   1119       if (byte == 0xff)
   1120 	ret |= 1 << i;
   1121       else if (byte != 0x00)
   1122 	return -1;
   1123     }
   1124   return ret;
   1125 }
   1126 
   1127 /* Utility inline functions for operand_general_constraint_met_p.  */
   1128 
   1129 static inline void
   1130 set_error (aarch64_operand_error *mismatch_detail,
   1131 	   enum aarch64_operand_error_kind kind, int idx,
   1132 	   const char* error)
   1133 {
   1134   if (mismatch_detail == NULL)
   1135     return;
   1136   mismatch_detail->kind = kind;
   1137   mismatch_detail->index = idx;
   1138   mismatch_detail->error = error;
   1139 }
   1140 
   1141 static inline void
   1142 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
   1143 		  const char* error)
   1144 {
   1145   if (mismatch_detail == NULL)
   1146     return;
   1147   set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
   1148 }
   1149 
   1150 static inline void
   1151 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1152 			int idx, int lower_bound, int upper_bound,
   1153 			const char* error)
   1154 {
   1155   if (mismatch_detail == NULL)
   1156     return;
   1157   set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
   1158   mismatch_detail->data[0] = lower_bound;
   1159   mismatch_detail->data[1] = upper_bound;
   1160 }
   1161 
   1162 static inline void
   1163 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1164 			    int idx, int lower_bound, int upper_bound)
   1165 {
   1166   if (mismatch_detail == NULL)
   1167     return;
   1168   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1169 			  _("immediate value"));
   1170 }
   1171 
   1172 static inline void
   1173 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1174 			       int idx, int lower_bound, int upper_bound)
   1175 {
   1176   if (mismatch_detail == NULL)
   1177     return;
   1178   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1179 			  _("immediate offset"));
   1180 }
   1181 
   1182 static inline void
   1183 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1184 			      int idx, int lower_bound, int upper_bound)
   1185 {
   1186   if (mismatch_detail == NULL)
   1187     return;
   1188   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1189 			  _("register number"));
   1190 }
   1191 
   1192 static inline void
   1193 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1194 				 int idx, int lower_bound, int upper_bound)
   1195 {
   1196   if (mismatch_detail == NULL)
   1197     return;
   1198   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1199 			  _("register element index"));
   1200 }
   1201 
   1202 static inline void
   1203 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1204 				   int idx, int lower_bound, int upper_bound)
   1205 {
   1206   if (mismatch_detail == NULL)
   1207     return;
   1208   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1209 			  _("shift amount"));
   1210 }
   1211 
   1212 static inline void
   1213 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
   1214 		     int alignment)
   1215 {
   1216   if (mismatch_detail == NULL)
   1217     return;
   1218   set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
   1219   mismatch_detail->data[0] = alignment;
   1220 }
   1221 
   1222 static inline void
   1223 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
   1224 		    int expected_num)
   1225 {
   1226   if (mismatch_detail == NULL)
   1227     return;
   1228   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
   1229   mismatch_detail->data[0] = expected_num;
   1230 }
   1231 
   1232 static inline void
   1233 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
   1234 		 const char* error)
   1235 {
   1236   if (mismatch_detail == NULL)
   1237     return;
   1238   set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
   1239 }
   1240 
   1241 /* General constraint checking based on operand code.
   1242 
   1243    Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
   1244    as the IDXth operand of opcode OPCODE.  Otherwise return 0.
   1245 
   1246    This function has to be called after the qualifiers for all operands
   1247    have been resolved.
   1248 
   1249    Mismatching error message is returned in *MISMATCH_DETAIL upon request,
   1250    i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
   1251    of error message during the disassembling where error message is not
   1252    wanted.  We avoid the dynamic construction of strings of error messages
   1253    here (i.e. in libopcodes), as it is costly and complicated; instead, we
   1254    use a combination of error code, static string and some integer data to
   1255    represent an error.  */
   1256 
   1257 static int
   1258 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
   1259 				  enum aarch64_opnd type,
   1260 				  const aarch64_opcode *opcode,
   1261 				  aarch64_operand_error *mismatch_detail)
   1262 {
   1263   unsigned num;
   1264   unsigned char size;
   1265   int64_t imm;
   1266   const aarch64_opnd_info *opnd = opnds + idx;
   1267   aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
   1268 
   1269   assert (opcode->operands[idx] == opnd->type && opnd->type == type);
   1270 
   1271   switch (aarch64_operands[type].op_class)
   1272     {
   1273     case AARCH64_OPND_CLASS_INT_REG:
   1274       /* Check pair reg constraints for cas* instructions.  */
   1275       if (type == AARCH64_OPND_PAIRREG)
   1276 	{
   1277 	  assert (idx == 1 || idx == 3);
   1278 	  if (opnds[idx - 1].reg.regno % 2 != 0)
   1279 	    {
   1280 	      set_syntax_error (mismatch_detail, idx - 1,
   1281 				_("reg pair must start from even reg"));
   1282 	      return 0;
   1283 	    }
   1284 	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
   1285 	    {
   1286 	      set_syntax_error (mismatch_detail, idx,
   1287 				_("reg pair must be contiguous"));
   1288 	      return 0;
   1289 	    }
   1290 	  break;
   1291 	}
   1292 
   1293       /* <Xt> may be optional in some IC and TLBI instructions.  */
   1294       if (type == AARCH64_OPND_Rt_SYS)
   1295 	{
   1296 	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
   1297 			       == AARCH64_OPND_CLASS_SYSTEM));
   1298 	  if (opnds[1].present
   1299 	      && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
   1300 	    {
   1301 	      set_other_error (mismatch_detail, idx, _("extraneous register"));
   1302 	      return 0;
   1303 	    }
   1304 	  if (!opnds[1].present
   1305 	      && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
   1306 	    {
   1307 	      set_other_error (mismatch_detail, idx, _("missing register"));
   1308 	      return 0;
   1309 	    }
   1310 	}
   1311       switch (qualifier)
   1312 	{
   1313 	case AARCH64_OPND_QLF_WSP:
   1314 	case AARCH64_OPND_QLF_SP:
   1315 	  if (!aarch64_stack_pointer_p (opnd))
   1316 	    {
   1317 	      set_other_error (mismatch_detail, idx,
   1318 			       _("stack pointer register expected"));
   1319 	      return 0;
   1320 	    }
   1321 	  break;
   1322 	default:
   1323 	  break;
   1324 	}
   1325       break;
   1326 
   1327     case AARCH64_OPND_CLASS_COND:
   1328       if (type == AARCH64_OPND_COND1
   1329 	  && (opnds[idx].cond->value & 0xe) == 0xe)
   1330 	{
   1331 	  /* Not allow AL or NV.  */
   1332 	  set_syntax_error (mismatch_detail, idx, NULL);
   1333 	}
   1334       break;
   1335 
   1336     case AARCH64_OPND_CLASS_ADDRESS:
   1337       /* Check writeback.  */
   1338       switch (opcode->iclass)
   1339 	{
   1340 	case ldst_pos:
   1341 	case ldst_unscaled:
   1342 	case ldstnapair_offs:
   1343 	case ldstpair_off:
   1344 	case ldst_unpriv:
   1345 	  if (opnd->addr.writeback == 1)
   1346 	    {
   1347 	      set_syntax_error (mismatch_detail, idx,
   1348 				_("unexpected address writeback"));
   1349 	      return 0;
   1350 	    }
   1351 	  break;
   1352 	case ldst_imm9:
   1353 	case ldstpair_indexed:
   1354 	case asisdlsep:
   1355 	case asisdlsop:
   1356 	  if (opnd->addr.writeback == 0)
   1357 	    {
   1358 	      set_syntax_error (mismatch_detail, idx,
   1359 				_("address writeback expected"));
   1360 	      return 0;
   1361 	    }
   1362 	  break;
   1363 	default:
   1364 	  assert (opnd->addr.writeback == 0);
   1365 	  break;
   1366 	}
   1367       switch (type)
   1368 	{
   1369 	case AARCH64_OPND_ADDR_SIMM7:
   1370 	  /* Scaled signed 7 bits immediate offset.  */
   1371 	  /* Get the size of the data element that is accessed, which may be
   1372 	     different from that of the source register size,
   1373 	     e.g. in strb/ldrb.  */
   1374 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1375 	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
   1376 	    {
   1377 	      set_offset_out_of_range_error (mismatch_detail, idx,
   1378 					     -64 * size, 63 * size);
   1379 	      return 0;
   1380 	    }
   1381 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   1382 	    {
   1383 	      set_unaligned_error (mismatch_detail, idx, size);
   1384 	      return 0;
   1385 	    }
   1386 	  break;
   1387 	case AARCH64_OPND_ADDR_SIMM9:
   1388 	  /* Unscaled signed 9 bits immediate offset.  */
   1389 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
   1390 	    {
   1391 	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
   1392 	      return 0;
   1393 	    }
   1394 	  break;
   1395 
   1396 	case AARCH64_OPND_ADDR_SIMM9_2:
   1397 	  /* Unscaled signed 9 bits immediate offset, which has to be negative
   1398 	     or unaligned.  */
   1399 	  size = aarch64_get_qualifier_esize (qualifier);
   1400 	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
   1401 	       && !value_aligned_p (opnd->addr.offset.imm, size))
   1402 	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
   1403 	    return 1;
   1404 	  set_other_error (mismatch_detail, idx,
   1405 			   _("negative or unaligned offset expected"));
   1406 	  return 0;
   1407 
   1408 	case AARCH64_OPND_SIMD_ADDR_POST:
   1409 	  /* AdvSIMD load/store multiple structures, post-index.  */
   1410 	  assert (idx == 1);
   1411 	  if (opnd->addr.offset.is_reg)
   1412 	    {
   1413 	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
   1414 		return 1;
   1415 	      else
   1416 		{
   1417 		  set_other_error (mismatch_detail, idx,
   1418 				   _("invalid register offset"));
   1419 		  return 0;
   1420 		}
   1421 	    }
   1422 	  else
   1423 	    {
   1424 	      const aarch64_opnd_info *prev = &opnds[idx-1];
   1425 	      unsigned num_bytes; /* total number of bytes transferred.  */
   1426 	      /* The opcode dependent area stores the number of elements in
   1427 		 each structure to be loaded/stored.  */
   1428 	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
   1429 	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
   1430 		/* Special handling of loading single structure to all lane.  */
   1431 		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
   1432 		  * aarch64_get_qualifier_esize (prev->qualifier);
   1433 	      else
   1434 		num_bytes = prev->reglist.num_regs
   1435 		  * aarch64_get_qualifier_esize (prev->qualifier)
   1436 		  * aarch64_get_qualifier_nelem (prev->qualifier);
   1437 	      if ((int) num_bytes != opnd->addr.offset.imm)
   1438 		{
   1439 		  set_other_error (mismatch_detail, idx,
   1440 				   _("invalid post-increment amount"));
   1441 		  return 0;
   1442 		}
   1443 	    }
   1444 	  break;
   1445 
   1446 	case AARCH64_OPND_ADDR_REGOFF:
   1447 	  /* Get the size of the data element that is accessed, which may be
   1448 	     different from that of the source register size,
   1449 	     e.g. in strb/ldrb.  */
   1450 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1451 	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
   1452 	  if (opnd->shifter.amount != 0
   1453 	      && opnd->shifter.amount != (int)get_logsz (size))
   1454 	    {
   1455 	      set_other_error (mismatch_detail, idx,
   1456 			       _("invalid shift amount"));
   1457 	      return 0;
   1458 	    }
   1459 	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
   1460 	     operators.  */
   1461 	  switch (opnd->shifter.kind)
   1462 	    {
   1463 	    case AARCH64_MOD_UXTW:
   1464 	    case AARCH64_MOD_LSL:
   1465 	    case AARCH64_MOD_SXTW:
   1466 	    case AARCH64_MOD_SXTX: break;
   1467 	    default:
   1468 	      set_other_error (mismatch_detail, idx,
   1469 			       _("invalid extend/shift operator"));
   1470 	      return 0;
   1471 	    }
   1472 	  break;
   1473 
   1474 	case AARCH64_OPND_ADDR_UIMM12:
   1475 	  imm = opnd->addr.offset.imm;
   1476 	  /* Get the size of the data element that is accessed, which may be
   1477 	     different from that of the source register size,
   1478 	     e.g. in strb/ldrb.  */
   1479 	  size = aarch64_get_qualifier_esize (qualifier);
   1480 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
   1481 	    {
   1482 	      set_offset_out_of_range_error (mismatch_detail, idx,
   1483 					     0, 4095 * size);
   1484 	      return 0;
   1485 	    }
   1486 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   1487 	    {
   1488 	      set_unaligned_error (mismatch_detail, idx, size);
   1489 	      return 0;
   1490 	    }
   1491 	  break;
   1492 
   1493 	case AARCH64_OPND_ADDR_PCREL14:
   1494 	case AARCH64_OPND_ADDR_PCREL19:
   1495 	case AARCH64_OPND_ADDR_PCREL21:
   1496 	case AARCH64_OPND_ADDR_PCREL26:
   1497 	  imm = opnd->imm.value;
   1498 	  if (operand_need_shift_by_two (get_operand_from_code (type)))
   1499 	    {
   1500 	      /* The offset value in a PC-relative branch instruction is alway
   1501 		 4-byte aligned and is encoded without the lowest 2 bits.  */
   1502 	      if (!value_aligned_p (imm, 4))
   1503 		{
   1504 		  set_unaligned_error (mismatch_detail, idx, 4);
   1505 		  return 0;
   1506 		}
   1507 	      /* Right shift by 2 so that we can carry out the following check
   1508 		 canonically.  */
   1509 	      imm >>= 2;
   1510 	    }
   1511 	  size = get_operand_fields_width (get_operand_from_code (type));
   1512 	  if (!value_fit_signed_field_p (imm, size))
   1513 	    {
   1514 	      set_other_error (mismatch_detail, idx,
   1515 			       _("immediate out of range"));
   1516 	      return 0;
   1517 	    }
   1518 	  break;
   1519 
   1520 	default:
   1521 	  break;
   1522 	}
   1523       break;
   1524 
   1525     case AARCH64_OPND_CLASS_SIMD_REGLIST:
   1526       if (type == AARCH64_OPND_LEt)
   1527 	{
   1528 	  /* Get the upper bound for the element index.  */
   1529 	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
   1530 	  if (!value_in_range_p (opnd->reglist.index, 0, num))
   1531 	    {
   1532 	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   1533 	      return 0;
   1534 	    }
   1535 	}
   1536       /* The opcode dependent area stores the number of elements in
   1537 	 each structure to be loaded/stored.  */
   1538       num = get_opcode_dependent_value (opcode);
   1539       switch (type)
   1540 	{
   1541 	case AARCH64_OPND_LVt:
   1542 	  assert (num >= 1 && num <= 4);
   1543 	  /* Unless LD1/ST1, the number of registers should be equal to that
   1544 	     of the structure elements.  */
   1545 	  if (num != 1 && opnd->reglist.num_regs != num)
   1546 	    {
   1547 	      set_reg_list_error (mismatch_detail, idx, num);
   1548 	      return 0;
   1549 	    }
   1550 	  break;
   1551 	case AARCH64_OPND_LVt_AL:
   1552 	case AARCH64_OPND_LEt:
   1553 	  assert (num >= 1 && num <= 4);
   1554 	  /* The number of registers should be equal to that of the structure
   1555 	     elements.  */
   1556 	  if (opnd->reglist.num_regs != num)
   1557 	    {
   1558 	      set_reg_list_error (mismatch_detail, idx, num);
   1559 	      return 0;
   1560 	    }
   1561 	  break;
   1562 	default:
   1563 	  break;
   1564 	}
   1565       break;
   1566 
   1567     case AARCH64_OPND_CLASS_IMMEDIATE:
   1568       /* Constraint check on immediate operand.  */
   1569       imm = opnd->imm.value;
   1570       /* E.g. imm_0_31 constrains value to be 0..31.  */
   1571       if (qualifier_value_in_range_constraint_p (qualifier)
   1572 	  && !value_in_range_p (imm, get_lower_bound (qualifier),
   1573 				get_upper_bound (qualifier)))
   1574 	{
   1575 	  set_imm_out_of_range_error (mismatch_detail, idx,
   1576 				      get_lower_bound (qualifier),
   1577 				      get_upper_bound (qualifier));
   1578 	  return 0;
   1579 	}
   1580 
   1581       switch (type)
   1582 	{
   1583 	case AARCH64_OPND_AIMM:
   1584 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   1585 	    {
   1586 	      set_other_error (mismatch_detail, idx,
   1587 			       _("invalid shift operator"));
   1588 	      return 0;
   1589 	    }
   1590 	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
   1591 	    {
   1592 	      set_other_error (mismatch_detail, idx,
   1593 			       _("shift amount expected to be 0 or 12"));
   1594 	      return 0;
   1595 	    }
   1596 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
   1597 	    {
   1598 	      set_other_error (mismatch_detail, idx,
   1599 			       _("immediate out of range"));
   1600 	      return 0;
   1601 	    }
   1602 	  break;
   1603 
   1604 	case AARCH64_OPND_HALF:
   1605 	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
   1606 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   1607 	    {
   1608 	      set_other_error (mismatch_detail, idx,
   1609 			       _("invalid shift operator"));
   1610 	      return 0;
   1611 	    }
   1612 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   1613 	  if (!value_aligned_p (opnd->shifter.amount, 16))
   1614 	    {
   1615 	      set_other_error (mismatch_detail, idx,
   1616 			       _("shift amount should be a multiple of 16"));
   1617 	      return 0;
   1618 	    }
   1619 	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
   1620 	    {
   1621 	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
   1622 						 0, size * 8 - 16);
   1623 	      return 0;
   1624 	    }
   1625 	  if (opnd->imm.value < 0)
   1626 	    {
   1627 	      set_other_error (mismatch_detail, idx,
   1628 			       _("negative immediate value not allowed"));
   1629 	      return 0;
   1630 	    }
   1631 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
   1632 	    {
   1633 	      set_other_error (mismatch_detail, idx,
   1634 			       _("immediate out of range"));
   1635 	      return 0;
   1636 	    }
   1637 	  break;
   1638 
   1639 	case AARCH64_OPND_IMM_MOV:
   1640 	    {
   1641 	      int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
   1642 	      imm = opnd->imm.value;
   1643 	      assert (idx == 1);
   1644 	      switch (opcode->op)
   1645 		{
   1646 		case OP_MOV_IMM_WIDEN:
   1647 		  imm = ~imm;
   1648 		  /* Fall through...  */
   1649 		case OP_MOV_IMM_WIDE:
   1650 		  if (!aarch64_wide_constant_p (imm, is32, NULL))
   1651 		    {
   1652 		      set_other_error (mismatch_detail, idx,
   1653 				       _("immediate out of range"));
   1654 		      return 0;
   1655 		    }
   1656 		  break;
   1657 		case OP_MOV_IMM_LOG:
   1658 		  if (!aarch64_logical_immediate_p (imm, is32, NULL))
   1659 		    {
   1660 		      set_other_error (mismatch_detail, idx,
   1661 				       _("immediate out of range"));
   1662 		      return 0;
   1663 		    }
   1664 		  break;
   1665 		default:
   1666 		  assert (0);
   1667 		  return 0;
   1668 		}
   1669 	    }
   1670 	  break;
   1671 
   1672 	case AARCH64_OPND_NZCV:
   1673 	case AARCH64_OPND_CCMP_IMM:
   1674 	case AARCH64_OPND_EXCEPTION:
   1675 	case AARCH64_OPND_UIMM4:
   1676 	case AARCH64_OPND_UIMM7:
   1677 	case AARCH64_OPND_UIMM3_OP1:
   1678 	case AARCH64_OPND_UIMM3_OP2:
   1679 	  size = get_operand_fields_width (get_operand_from_code (type));
   1680 	  assert (size < 32);
   1681 	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
   1682 	    {
   1683 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   1684 					  (1 << size) - 1);
   1685 	      return 0;
   1686 	    }
   1687 	  break;
   1688 
   1689 	case AARCH64_OPND_WIDTH:
   1690 	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
   1691 		  && opnds[0].type == AARCH64_OPND_Rd);
   1692 	  size = get_upper_bound (qualifier);
   1693 	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
   1694 	    /* lsb+width <= reg.size  */
   1695 	    {
   1696 	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
   1697 					  size - opnds[idx-1].imm.value);
   1698 	      return 0;
   1699 	    }
   1700 	  break;
   1701 
   1702 	case AARCH64_OPND_LIMM:
   1703 	    {
   1704 	      int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
   1705 	      uint64_t uimm = opnd->imm.value;
   1706 	      if (opcode->op == OP_BIC)
   1707 		uimm = ~uimm;
   1708 	      if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
   1709 		{
   1710 		  set_other_error (mismatch_detail, idx,
   1711 				   _("immediate out of range"));
   1712 		  return 0;
   1713 		}
   1714 	    }
   1715 	  break;
   1716 
   1717 	case AARCH64_OPND_IMM0:
   1718 	case AARCH64_OPND_FPIMM0:
   1719 	  if (opnd->imm.value != 0)
   1720 	    {
   1721 	      set_other_error (mismatch_detail, idx,
   1722 			       _("immediate zero expected"));
   1723 	      return 0;
   1724 	    }
   1725 	  break;
   1726 
   1727 	case AARCH64_OPND_SHLL_IMM:
   1728 	  assert (idx == 2);
   1729 	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
   1730 	  if (opnd->imm.value != size)
   1731 	    {
   1732 	      set_other_error (mismatch_detail, idx,
   1733 			       _("invalid shift amount"));
   1734 	      return 0;
   1735 	    }
   1736 	  break;
   1737 
   1738 	case AARCH64_OPND_IMM_VLSL:
   1739 	  size = aarch64_get_qualifier_esize (qualifier);
   1740 	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
   1741 	    {
   1742 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   1743 					  size * 8 - 1);
   1744 	      return 0;
   1745 	    }
   1746 	  break;
   1747 
   1748 	case AARCH64_OPND_IMM_VLSR:
   1749 	  size = aarch64_get_qualifier_esize (qualifier);
   1750 	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
   1751 	    {
   1752 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
   1753 	      return 0;
   1754 	    }
   1755 	  break;
   1756 
   1757 	case AARCH64_OPND_SIMD_IMM:
   1758 	case AARCH64_OPND_SIMD_IMM_SFT:
   1759 	  /* Qualifier check.  */
   1760 	  switch (qualifier)
   1761 	    {
   1762 	    case AARCH64_OPND_QLF_LSL:
   1763 	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
   1764 		{
   1765 		  set_other_error (mismatch_detail, idx,
   1766 				   _("invalid shift operator"));
   1767 		  return 0;
   1768 		}
   1769 	      break;
   1770 	    case AARCH64_OPND_QLF_MSL:
   1771 	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
   1772 		{
   1773 		  set_other_error (mismatch_detail, idx,
   1774 				   _("invalid shift operator"));
   1775 		  return 0;
   1776 		}
   1777 	      break;
   1778 	    case AARCH64_OPND_QLF_NIL:
   1779 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   1780 		{
   1781 		  set_other_error (mismatch_detail, idx,
   1782 				   _("shift is not permitted"));
   1783 		  return 0;
   1784 		}
   1785 	      break;
   1786 	    default:
   1787 	      assert (0);
   1788 	      return 0;
   1789 	    }
   1790 	  /* Is the immediate valid?  */
   1791 	  assert (idx == 1);
   1792 	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
   1793 	    {
   1794 	      /* uimm8 or simm8 */
   1795 	      if (!value_in_range_p (opnd->imm.value, -128, 255))
   1796 		{
   1797 		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
   1798 		  return 0;
   1799 		}
   1800 	    }
   1801 	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
   1802 	    {
   1803 	      /* uimm64 is not
   1804 		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
   1805 		 ffffffffgggggggghhhhhhhh'.  */
   1806 	      set_other_error (mismatch_detail, idx,
   1807 			       _("invalid value for immediate"));
   1808 	      return 0;
   1809 	    }
   1810 	  /* Is the shift amount valid?  */
   1811 	  switch (opnd->shifter.kind)
   1812 	    {
   1813 	    case AARCH64_MOD_LSL:
   1814 	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   1815 	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
   1816 		{
   1817 		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
   1818 						     (size - 1) * 8);
   1819 		  return 0;
   1820 		}
   1821 	      if (!value_aligned_p (opnd->shifter.amount, 8))
   1822 		{
   1823 		  set_unaligned_error (mismatch_detail, idx, 8);
   1824 		  return 0;
   1825 		}
   1826 	      break;
   1827 	    case AARCH64_MOD_MSL:
   1828 	      /* Only 8 and 16 are valid shift amount.  */
   1829 	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
   1830 		{
   1831 		  set_other_error (mismatch_detail, idx,
   1832 				   _("shift amount expected to be 0 or 16"));
   1833 		  return 0;
   1834 		}
   1835 	      break;
   1836 	    default:
   1837 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   1838 		{
   1839 		  set_other_error (mismatch_detail, idx,
   1840 				   _("invalid shift operator"));
   1841 		  return 0;
   1842 		}
   1843 	      break;
   1844 	    }
   1845 	  break;
   1846 
   1847 	case AARCH64_OPND_FPIMM:
   1848 	case AARCH64_OPND_SIMD_FPIMM:
   1849 	  if (opnd->imm.is_fp == 0)
   1850 	    {
   1851 	      set_other_error (mismatch_detail, idx,
   1852 			       _("floating-point immediate expected"));
   1853 	      return 0;
   1854 	    }
   1855 	  /* The value is expected to be an 8-bit floating-point constant with
   1856 	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
   1857 	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
   1858 	     instruction).  */
   1859 	  if (!value_in_range_p (opnd->imm.value, 0, 255))
   1860 	    {
   1861 	      set_other_error (mismatch_detail, idx,
   1862 			       _("immediate out of range"));
   1863 	      return 0;
   1864 	    }
   1865 	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
   1866 	    {
   1867 	      set_other_error (mismatch_detail, idx,
   1868 			       _("invalid shift operator"));
   1869 	      return 0;
   1870 	    }
   1871 	  break;
   1872 
   1873 	default:
   1874 	  break;
   1875 	}
   1876       break;
   1877 
   1878     case AARCH64_OPND_CLASS_CP_REG:
   1879       /* Cn or Cm: 4-bit opcode field named for historical reasons.
   1880 	 valid range: C0 - C15.  */
   1881       if (opnd->reg.regno > 15)
   1882 	{
   1883 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
   1884 	  return 0;
   1885 	}
   1886       break;
   1887 
   1888     case AARCH64_OPND_CLASS_SYSTEM:
   1889       switch (type)
   1890 	{
   1891 	case AARCH64_OPND_PSTATEFIELD:
   1892 	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
   1893 	  /* MSR UAO, #uimm4
   1894 	     MSR PAN, #uimm4
   1895 	     The immediate must be #0 or #1.  */
   1896 	  if ((opnd->pstatefield == 0x03	/* UAO.  */
   1897 	       || opnd->pstatefield == 0x04)	/* PAN.  */
   1898 	      && opnds[1].imm.value > 1)
   1899 	    {
   1900 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
   1901 	      return 0;
   1902 	    }
   1903 	  /* MSR SPSel, #uimm4
   1904 	     Uses uimm4 as a control value to select the stack pointer: if
   1905 	     bit 0 is set it selects the current exception level's stack
   1906 	     pointer, if bit 0 is clear it selects shared EL0 stack pointer.
   1907 	     Bits 1 to 3 of uimm4 are reserved and should be zero.  */
   1908 	  if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
   1909 	    {
   1910 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
   1911 	      return 0;
   1912 	    }
   1913 	  break;
   1914 	default:
   1915 	  break;
   1916 	}
   1917       break;
   1918 
   1919     case AARCH64_OPND_CLASS_SIMD_ELEMENT:
   1920       /* Get the upper bound for the element index.  */
   1921       num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
   1922       /* Index out-of-range.  */
   1923       if (!value_in_range_p (opnd->reglane.index, 0, num))
   1924 	{
   1925 	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   1926 	  return 0;
   1927 	}
   1928       /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
   1929 	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
   1930 	 number is encoded in "size:M:Rm":
   1931 	 size	<Vm>
   1932 	 00		RESERVED
   1933 	 01		0:Rm
   1934 	 10		M:Rm
   1935 	 11		RESERVED  */
   1936       if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
   1937 	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
   1938 	{
   1939 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
   1940 	  return 0;
   1941 	}
   1942       break;
   1943 
   1944     case AARCH64_OPND_CLASS_MODIFIED_REG:
   1945       assert (idx == 1 || idx == 2);
   1946       switch (type)
   1947 	{
   1948 	case AARCH64_OPND_Rm_EXT:
   1949 	  if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
   1950 	      && opnd->shifter.kind != AARCH64_MOD_LSL)
   1951 	    {
   1952 	      set_other_error (mismatch_detail, idx,
   1953 			       _("extend operator expected"));
   1954 	      return 0;
   1955 	    }
   1956 	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
   1957 	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
   1958 	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
   1959 	     case.  */
   1960 	  if (!aarch64_stack_pointer_p (opnds + 0)
   1961 	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
   1962 	    {
   1963 	      if (!opnd->shifter.operator_present)
   1964 		{
   1965 		  set_other_error (mismatch_detail, idx,
   1966 				   _("missing extend operator"));
   1967 		  return 0;
   1968 		}
   1969 	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
   1970 		{
   1971 		  set_other_error (mismatch_detail, idx,
   1972 				   _("'LSL' operator not allowed"));
   1973 		  return 0;
   1974 		}
   1975 	    }
   1976 	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
   1977 		  || opnd->shifter.kind == AARCH64_MOD_LSL);
   1978 	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
   1979 	    {
   1980 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
   1981 	      return 0;
   1982 	    }
   1983 	  /* In the 64-bit form, the final register operand is written as Wm
   1984 	     for all but the (possibly omitted) UXTX/LSL and SXTX
   1985 	     operators.
   1986 	     N.B. GAS allows X register to be used with any operator as a
   1987 	     programming convenience.  */
   1988 	  if (qualifier == AARCH64_OPND_QLF_X
   1989 	      && opnd->shifter.kind != AARCH64_MOD_LSL
   1990 	      && opnd->shifter.kind != AARCH64_MOD_UXTX
   1991 	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
   1992 	    {
   1993 	      set_other_error (mismatch_detail, idx, _("W register expected"));
   1994 	      return 0;
   1995 	    }
   1996 	  break;
   1997 
   1998 	case AARCH64_OPND_Rm_SFT:
   1999 	  /* ROR is not available to the shifted register operand in
   2000 	     arithmetic instructions.  */
   2001 	  if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
   2002 	    {
   2003 	      set_other_error (mismatch_detail, idx,
   2004 			       _("shift operator expected"));
   2005 	      return 0;
   2006 	    }
   2007 	  if (opnd->shifter.kind == AARCH64_MOD_ROR
   2008 	      && opcode->iclass != log_shift)
   2009 	    {
   2010 	      set_other_error (mismatch_detail, idx,
   2011 			       _("'ROR' operator not allowed"));
   2012 	      return 0;
   2013 	    }
   2014 	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
   2015 	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
   2016 	    {
   2017 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
   2018 	      return 0;
   2019 	    }
   2020 	  break;
   2021 
   2022 	default:
   2023 	  break;
   2024 	}
   2025       break;
   2026 
   2027     default:
   2028       break;
   2029     }
   2030 
   2031   return 1;
   2032 }
   2033 
   2034 /* Main entrypoint for the operand constraint checking.
   2035 
   2036    Return 1 if operands of *INST meet the constraint applied by the operand
   2037    codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
   2038    not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
   2039    adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
   2040    with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
   2041    error kind when it is notified that an instruction does not pass the check).
   2042 
   2043    Un-determined operand qualifiers may get established during the process.  */
   2044 
   2045 int
   2046 aarch64_match_operands_constraint (aarch64_inst *inst,
   2047 				   aarch64_operand_error *mismatch_detail)
   2048 {
   2049   int i;
   2050 
   2051   DEBUG_TRACE ("enter");
   2052 
   2053   /* Match operands' qualifier.
   2054      *INST has already had qualifier establish for some, if not all, of
   2055      its operands; we need to find out whether these established
   2056      qualifiers match one of the qualifier sequence in
   2057      INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
   2058      with the corresponding qualifier in such a sequence.
   2059      Only basic operand constraint checking is done here; the more thorough
   2060      constraint checking will carried out by operand_general_constraint_met_p,
   2061      which has be to called after this in order to get all of the operands'
   2062      qualifiers established.  */
   2063   if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
   2064     {
   2065       DEBUG_TRACE ("FAIL on operand qualifier matching");
   2066       if (mismatch_detail)
   2067 	{
   2068 	  /* Return an error type to indicate that it is the qualifier
   2069 	     matching failure; we don't care about which operand as there
   2070 	     are enough information in the opcode table to reproduce it.  */
   2071 	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
   2072 	  mismatch_detail->index = -1;
   2073 	  mismatch_detail->error = NULL;
   2074 	}
   2075       return 0;
   2076     }
   2077 
   2078   /* Match operands' constraint.  */
   2079   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2080     {
   2081       enum aarch64_opnd type = inst->opcode->operands[i];
   2082       if (type == AARCH64_OPND_NIL)
   2083 	break;
   2084       if (inst->operands[i].skip)
   2085 	{
   2086 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
   2087 	  continue;
   2088 	}
   2089       if (operand_general_constraint_met_p (inst->operands, i, type,
   2090 					    inst->opcode, mismatch_detail) == 0)
   2091 	{
   2092 	  DEBUG_TRACE ("FAIL on operand %d", i);
   2093 	  return 0;
   2094 	}
   2095     }
   2096 
   2097   DEBUG_TRACE ("PASS");
   2098 
   2099   return 1;
   2100 }
   2101 
   2102 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
   2103    Also updates the TYPE of each INST->OPERANDS with the corresponding
   2104    value of OPCODE->OPERANDS.
   2105 
   2106    Note that some operand qualifiers may need to be manually cleared by
   2107    the caller before it further calls the aarch64_opcode_encode; by
   2108    doing this, it helps the qualifier matching facilities work
   2109    properly.  */
   2110 
   2111 const aarch64_opcode*
   2112 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
   2113 {
   2114   int i;
   2115   const aarch64_opcode *old = inst->opcode;
   2116 
   2117   inst->opcode = opcode;
   2118 
   2119   /* Update the operand types.  */
   2120   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2121     {
   2122       inst->operands[i].type = opcode->operands[i];
   2123       if (opcode->operands[i] == AARCH64_OPND_NIL)
   2124 	break;
   2125     }
   2126 
   2127   DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
   2128 
   2129   return old;
   2130 }
   2131 
   2132 int
   2133 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
   2134 {
   2135   int i;
   2136   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2137     if (operands[i] == operand)
   2138       return i;
   2139     else if (operands[i] == AARCH64_OPND_NIL)
   2140       break;
   2141   return -1;
   2142 }
   2143 
   2144 /* [0][0]  32-bit integer regs with sp   Wn
   2146    [0][1]  64-bit integer regs with sp   Xn  sf=1
   2147    [1][0]  32-bit integer regs with #0   Wn
   2148    [1][1]  64-bit integer regs with #0   Xn  sf=1 */
   2149 static const char *int_reg[2][2][32] = {
   2150 #define R32 "w"
   2151 #define R64 "x"
   2152   { { R32  "0", R32  "1", R32  "2", R32  "3", R32  "4", R32  "5", R32  "6", R32  "7",
   2153       R32  "8", R32  "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
   2154       R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
   2155       R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30",    "wsp" },
   2156     { R64  "0", R64  "1", R64  "2", R64  "3", R64  "4", R64  "5", R64  "6", R64  "7",
   2157       R64  "8", R64  "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
   2158       R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
   2159       R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30",     "sp" } },
   2160   { { R32  "0", R32  "1", R32  "2", R32  "3", R32  "4", R32  "5", R32  "6", R32  "7",
   2161       R32  "8", R32  "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
   2162       R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
   2163       R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
   2164     { R64  "0", R64  "1", R64  "2", R64  "3", R64  "4", R64  "5", R64  "6", R64  "7",
   2165       R64  "8", R64  "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
   2166       R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
   2167       R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
   2168 #undef R64
   2169 #undef R32
   2170 };
   2171 
   2172 /* Return the integer register name.
   2173    if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
   2174 
   2175 static inline const char *
   2176 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
   2177 {
   2178   const int has_zr = sp_reg_p ? 0 : 1;
   2179   const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
   2180   return int_reg[has_zr][is_64][regno];
   2181 }
   2182 
   2183 /* Like get_int_reg_name, but IS_64 is always 1.  */
   2184 
   2185 static inline const char *
   2186 get_64bit_int_reg_name (int regno, int sp_reg_p)
   2187 {
   2188   const int has_zr = sp_reg_p ? 0 : 1;
   2189   return int_reg[has_zr][1][regno];
   2190 }
   2191 
   2192 /* Types for expanding an encoded 8-bit value to a floating-point value.  */
   2193 
   2194 typedef union
   2195 {
   2196   uint64_t i;
   2197   double   d;
   2198 } double_conv_t;
   2199 
   2200 typedef union
   2201 {
   2202   uint32_t i;
   2203   float    f;
   2204 } single_conv_t;
   2205 
   2206 typedef union
   2207 {
   2208   uint32_t i;
   2209   float    f;
   2210 } half_conv_t;
   2211 
   2212 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
   2213    normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
   2214    (depending on the type of the instruction).  IMM8 will be expanded to a
   2215    single-precision floating-point value (SIZE == 4) or a double-precision
   2216    floating-point value (SIZE == 8).  A half-precision floating-point value
   2217    (SIZE == 2) is expanded to a single-precision floating-point value.  The
   2218    expanded value is returned.  */
   2219 
   2220 static uint64_t
   2221 expand_fp_imm (int size, uint32_t imm8)
   2222 {
   2223   uint64_t imm = 0;
   2224   uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
   2225 
   2226   imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
   2227   imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
   2228   imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
   2229   imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
   2230     | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
   2231   if (size == 8)
   2232     {
   2233       imm = (imm8_7 << (63-32))		/* imm8<7>  */
   2234 	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
   2235 	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
   2236 	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
   2237 	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
   2238       imm <<= 32;
   2239     }
   2240   else if (size == 4 || size == 2)
   2241     {
   2242       imm = (imm8_7 << 31)	/* imm8<7>              */
   2243 	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
   2244 	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
   2245 	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
   2246     }
   2247   else
   2248     {
   2249       /* An unsupported size.  */
   2250       assert (0);
   2251     }
   2252 
   2253   return imm;
   2254 }
   2255 
   2256 /* Produce the string representation of the register list operand *OPND
   2257    in the buffer pointed by BUF of size SIZE.  */
   2258 static void
   2259 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
   2260 {
   2261   const int num_regs = opnd->reglist.num_regs;
   2262   const int first_reg = opnd->reglist.first_regno;
   2263   const int last_reg = (first_reg + num_regs - 1) & 0x1f;
   2264   const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
   2265   char tb[8];	/* Temporary buffer.  */
   2266 
   2267   assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
   2268   assert (num_regs >= 1 && num_regs <= 4);
   2269 
   2270   /* Prepare the index if any.  */
   2271   if (opnd->reglist.has_index)
   2272     snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
   2273   else
   2274     tb[0] = '\0';
   2275 
   2276   /* The hyphenated form is preferred for disassembly if there are
   2277      more than two registers in the list, and the register numbers
   2278      are monotonically increasing in increments of one.  */
   2279   if (num_regs > 2 && last_reg > first_reg)
   2280     snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
   2281 	      last_reg, qlf_name, tb);
   2282   else
   2283     {
   2284       const int reg0 = first_reg;
   2285       const int reg1 = (first_reg + 1) & 0x1f;
   2286       const int reg2 = (first_reg + 2) & 0x1f;
   2287       const int reg3 = (first_reg + 3) & 0x1f;
   2288 
   2289       switch (num_regs)
   2290 	{
   2291 	case 1:
   2292 	  snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
   2293 	  break;
   2294 	case 2:
   2295 	  snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
   2296 		    reg1, qlf_name, tb);
   2297 	  break;
   2298 	case 3:
   2299 	  snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
   2300 		    reg1, qlf_name, reg2, qlf_name, tb);
   2301 	  break;
   2302 	case 4:
   2303 	  snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
   2304 		    reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
   2305 		    reg3, qlf_name, tb);
   2306 	  break;
   2307 	}
   2308     }
   2309 }
   2310 
   2311 /* Produce the string representation of the register offset address operand
   2312    *OPND in the buffer pointed by BUF of size SIZE.  */
   2313 static void
   2314 print_register_offset_address (char *buf, size_t size,
   2315 			       const aarch64_opnd_info *opnd)
   2316 {
   2317   char tb[16];			/* Temporary buffer.  */
   2318   bfd_boolean lsl_p = FALSE;	/* Is LSL shift operator?  */
   2319   bfd_boolean wm_p = FALSE;	/* Should Rm be Wm?  */
   2320   bfd_boolean print_extend_p = TRUE;
   2321   bfd_boolean print_amount_p = TRUE;
   2322   const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
   2323 
   2324   switch (opnd->shifter.kind)
   2325     {
   2326     case AARCH64_MOD_UXTW: wm_p = TRUE; break;
   2327     case AARCH64_MOD_LSL : lsl_p = TRUE; break;
   2328     case AARCH64_MOD_SXTW: wm_p = TRUE; break;
   2329     case AARCH64_MOD_SXTX: break;
   2330     default: assert (0);
   2331     }
   2332 
   2333   if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
   2334 				|| !opnd->shifter.amount_present))
   2335     {
   2336       /* Not print the shift/extend amount when the amount is zero and
   2337          when it is not the special case of 8-bit load/store instruction.  */
   2338       print_amount_p = FALSE;
   2339       /* Likewise, no need to print the shift operator LSL in such a
   2340 	 situation.  */
   2341       if (lsl_p)
   2342 	print_extend_p = FALSE;
   2343     }
   2344 
   2345   /* Prepare for the extend/shift.  */
   2346   if (print_extend_p)
   2347     {
   2348       if (print_amount_p)
   2349 	snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount);
   2350       else
   2351 	snprintf (tb, sizeof (tb), ",%s", shift_name);
   2352     }
   2353   else
   2354     tb[0] = '\0';
   2355 
   2356   snprintf (buf, size, "[%s,%s%s]",
   2357 	    get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   2358 	    get_int_reg_name (opnd->addr.offset.regno,
   2359 			      wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
   2360 			      0 /* sp_reg_p */),
   2361 	    tb);
   2362 }
   2363 
   2364 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
   2365    in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
   2366    PC, PCREL_P and ADDRESS are used to pass in and return information about
   2367    the PC-relative address calculation, where the PC value is passed in
   2368    PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
   2369    will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
   2370    calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
   2371 
   2372    The function serves both the disassembler and the assembler diagnostics
   2373    issuer, which is the reason why it lives in this file.  */
   2374 
   2375 void
   2376 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
   2377 		       const aarch64_opcode *opcode,
   2378 		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
   2379 		       bfd_vma *address)
   2380 {
   2381   int i;
   2382   const char *name = NULL;
   2383   const aarch64_opnd_info *opnd = opnds + idx;
   2384   enum aarch64_modifier_kind kind;
   2385   uint64_t addr;
   2386 
   2387   buf[0] = '\0';
   2388   if (pcrel_p)
   2389     *pcrel_p = 0;
   2390 
   2391   switch (opnd->type)
   2392     {
   2393     case AARCH64_OPND_Rd:
   2394     case AARCH64_OPND_Rn:
   2395     case AARCH64_OPND_Rm:
   2396     case AARCH64_OPND_Rt:
   2397     case AARCH64_OPND_Rt2:
   2398     case AARCH64_OPND_Rs:
   2399     case AARCH64_OPND_Ra:
   2400     case AARCH64_OPND_Rt_SYS:
   2401     case AARCH64_OPND_PAIRREG:
   2402       /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
   2403 	 the <ic_op>, therefore we we use opnd->present to override the
   2404 	 generic optional-ness information.  */
   2405       if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
   2406 	break;
   2407       /* Omit the operand, e.g. RET.  */
   2408       if (optional_operand_p (opcode, idx)
   2409 	  && opnd->reg.regno == get_optional_operand_default_value (opcode))
   2410 	break;
   2411       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   2412 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   2413       snprintf (buf, size, "%s",
   2414 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   2415       break;
   2416 
   2417     case AARCH64_OPND_Rd_SP:
   2418     case AARCH64_OPND_Rn_SP:
   2419       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   2420 	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
   2421 	      || opnd->qualifier == AARCH64_OPND_QLF_X
   2422 	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
   2423       snprintf (buf, size, "%s",
   2424 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
   2425       break;
   2426 
   2427     case AARCH64_OPND_Rm_EXT:
   2428       kind = opnd->shifter.kind;
   2429       assert (idx == 1 || idx == 2);
   2430       if ((aarch64_stack_pointer_p (opnds)
   2431 	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
   2432 	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
   2433 	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
   2434 	       && kind == AARCH64_MOD_UXTW)
   2435 	      || (opnd->qualifier == AARCH64_OPND_QLF_X
   2436 		  && kind == AARCH64_MOD_UXTX)))
   2437 	{
   2438 	  /* 'LSL' is the preferred form in this case.  */
   2439 	  kind = AARCH64_MOD_LSL;
   2440 	  if (opnd->shifter.amount == 0)
   2441 	    {
   2442 	      /* Shifter omitted.  */
   2443 	      snprintf (buf, size, "%s",
   2444 			get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   2445 	      break;
   2446 	    }
   2447 	}
   2448       if (opnd->shifter.amount)
   2449 	snprintf (buf, size, "%s, %s #%d",
   2450 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   2451 		  aarch64_operand_modifiers[kind].name,
   2452 		  opnd->shifter.amount);
   2453       else
   2454 	snprintf (buf, size, "%s, %s",
   2455 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   2456 		  aarch64_operand_modifiers[kind].name);
   2457       break;
   2458 
   2459     case AARCH64_OPND_Rm_SFT:
   2460       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   2461 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   2462       if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
   2463 	snprintf (buf, size, "%s",
   2464 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   2465       else
   2466 	snprintf (buf, size, "%s, %s #%d",
   2467 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   2468 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
   2469 		  opnd->shifter.amount);
   2470       break;
   2471 
   2472     case AARCH64_OPND_Fd:
   2473     case AARCH64_OPND_Fn:
   2474     case AARCH64_OPND_Fm:
   2475     case AARCH64_OPND_Fa:
   2476     case AARCH64_OPND_Ft:
   2477     case AARCH64_OPND_Ft2:
   2478     case AARCH64_OPND_Sd:
   2479     case AARCH64_OPND_Sn:
   2480     case AARCH64_OPND_Sm:
   2481       snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
   2482 		opnd->reg.regno);
   2483       break;
   2484 
   2485     case AARCH64_OPND_Vd:
   2486     case AARCH64_OPND_Vn:
   2487     case AARCH64_OPND_Vm:
   2488       snprintf (buf, size, "v%d.%s", opnd->reg.regno,
   2489 		aarch64_get_qualifier_name (opnd->qualifier));
   2490       break;
   2491 
   2492     case AARCH64_OPND_Ed:
   2493     case AARCH64_OPND_En:
   2494     case AARCH64_OPND_Em:
   2495       snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
   2496 		aarch64_get_qualifier_name (opnd->qualifier),
   2497 		opnd->reglane.index);
   2498       break;
   2499 
   2500     case AARCH64_OPND_VdD1:
   2501     case AARCH64_OPND_VnD1:
   2502       snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
   2503       break;
   2504 
   2505     case AARCH64_OPND_LVn:
   2506     case AARCH64_OPND_LVt:
   2507     case AARCH64_OPND_LVt_AL:
   2508     case AARCH64_OPND_LEt:
   2509       print_register_list (buf, size, opnd);
   2510       break;
   2511 
   2512     case AARCH64_OPND_Cn:
   2513     case AARCH64_OPND_Cm:
   2514       snprintf (buf, size, "C%d", opnd->reg.regno);
   2515       break;
   2516 
   2517     case AARCH64_OPND_IDX:
   2518     case AARCH64_OPND_IMM:
   2519     case AARCH64_OPND_WIDTH:
   2520     case AARCH64_OPND_UIMM3_OP1:
   2521     case AARCH64_OPND_UIMM3_OP2:
   2522     case AARCH64_OPND_BIT_NUM:
   2523     case AARCH64_OPND_IMM_VLSL:
   2524     case AARCH64_OPND_IMM_VLSR:
   2525     case AARCH64_OPND_SHLL_IMM:
   2526     case AARCH64_OPND_IMM0:
   2527     case AARCH64_OPND_IMMR:
   2528     case AARCH64_OPND_IMMS:
   2529     case AARCH64_OPND_FBITS:
   2530       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
   2531       break;
   2532 
   2533     case AARCH64_OPND_IMM_MOV:
   2534       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   2535 	{
   2536 	case 4:	/* e.g. MOV Wd, #<imm32>.  */
   2537 	    {
   2538 	      int imm32 = opnd->imm.value;
   2539 	      snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
   2540 	    }
   2541 	  break;
   2542 	case 8:	/* e.g. MOV Xd, #<imm64>.  */
   2543 	  snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
   2544 		    opnd->imm.value, opnd->imm.value);
   2545 	  break;
   2546 	default: assert (0);
   2547 	}
   2548       break;
   2549 
   2550     case AARCH64_OPND_FPIMM0:
   2551       snprintf (buf, size, "#0.0");
   2552       break;
   2553 
   2554     case AARCH64_OPND_LIMM:
   2555     case AARCH64_OPND_AIMM:
   2556     case AARCH64_OPND_HALF:
   2557       if (opnd->shifter.amount)
   2558 	snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
   2559 		  opnd->shifter.amount);
   2560       else
   2561 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
   2562       break;
   2563 
   2564     case AARCH64_OPND_SIMD_IMM:
   2565     case AARCH64_OPND_SIMD_IMM_SFT:
   2566       if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
   2567 	  || opnd->shifter.kind == AARCH64_MOD_NONE)
   2568 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
   2569       else
   2570 	snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
   2571 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
   2572 		  opnd->shifter.amount);
   2573       break;
   2574 
   2575     case AARCH64_OPND_FPIMM:
   2576     case AARCH64_OPND_SIMD_FPIMM:
   2577       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   2578 	{
   2579 	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
   2580 	    {
   2581 	      half_conv_t c;
   2582 	      c.i = expand_fp_imm (2, opnd->imm.value);
   2583 	      snprintf (buf, size,  "#%.18e", c.f);
   2584 	    }
   2585 	  break;
   2586 	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
   2587 	    {
   2588 	      single_conv_t c;
   2589 	      c.i = expand_fp_imm (4, opnd->imm.value);
   2590 	      snprintf (buf, size,  "#%.18e", c.f);
   2591 	    }
   2592 	  break;
   2593 	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
   2594 	    {
   2595 	      double_conv_t c;
   2596 	      c.i = expand_fp_imm (8, opnd->imm.value);
   2597 	      snprintf (buf, size,  "#%.18e", c.d);
   2598 	    }
   2599 	  break;
   2600 	default: assert (0);
   2601 	}
   2602       break;
   2603 
   2604     case AARCH64_OPND_CCMP_IMM:
   2605     case AARCH64_OPND_NZCV:
   2606     case AARCH64_OPND_EXCEPTION:
   2607     case AARCH64_OPND_UIMM4:
   2608     case AARCH64_OPND_UIMM7:
   2609       if (optional_operand_p (opcode, idx) == TRUE
   2610 	  && (opnd->imm.value ==
   2611 	      (int64_t) get_optional_operand_default_value (opcode)))
   2612 	/* Omit the operand, e.g. DCPS1.  */
   2613 	break;
   2614       snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
   2615       break;
   2616 
   2617     case AARCH64_OPND_COND:
   2618     case AARCH64_OPND_COND1:
   2619       snprintf (buf, size, "%s", opnd->cond->names[0]);
   2620       break;
   2621 
   2622     case AARCH64_OPND_ADDR_ADRP:
   2623       addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
   2624 	+ opnd->imm.value;
   2625       if (pcrel_p)
   2626 	*pcrel_p = 1;
   2627       if (address)
   2628 	*address = addr;
   2629       /* This is not necessary during the disassembling, as print_address_func
   2630 	 in the disassemble_info will take care of the printing.  But some
   2631 	 other callers may be still interested in getting the string in *STR,
   2632 	 so here we do snprintf regardless.  */
   2633       snprintf (buf, size, "#0x%" PRIx64, addr);
   2634       break;
   2635 
   2636     case AARCH64_OPND_ADDR_PCREL14:
   2637     case AARCH64_OPND_ADDR_PCREL19:
   2638     case AARCH64_OPND_ADDR_PCREL21:
   2639     case AARCH64_OPND_ADDR_PCREL26:
   2640       addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
   2641       if (pcrel_p)
   2642 	*pcrel_p = 1;
   2643       if (address)
   2644 	*address = addr;
   2645       /* This is not necessary during the disassembling, as print_address_func
   2646 	 in the disassemble_info will take care of the printing.  But some
   2647 	 other callers may be still interested in getting the string in *STR,
   2648 	 so here we do snprintf regardless.  */
   2649       snprintf (buf, size, "#0x%" PRIx64, addr);
   2650       break;
   2651 
   2652     case AARCH64_OPND_ADDR_SIMPLE:
   2653     case AARCH64_OPND_SIMD_ADDR_SIMPLE:
   2654     case AARCH64_OPND_SIMD_ADDR_POST:
   2655       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   2656       if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
   2657 	{
   2658 	  if (opnd->addr.offset.is_reg)
   2659 	    snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
   2660 	  else
   2661 	    snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
   2662 	}
   2663       else
   2664 	snprintf (buf, size, "[%s]", name);
   2665       break;
   2666 
   2667     case AARCH64_OPND_ADDR_REGOFF:
   2668       print_register_offset_address (buf, size, opnd);
   2669       break;
   2670 
   2671     case AARCH64_OPND_ADDR_SIMM7:
   2672     case AARCH64_OPND_ADDR_SIMM9:
   2673     case AARCH64_OPND_ADDR_SIMM9_2:
   2674       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   2675       if (opnd->addr.writeback)
   2676 	{
   2677 	  if (opnd->addr.preind)
   2678 	    snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
   2679 	  else
   2680 	    snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
   2681 	}
   2682       else
   2683 	{
   2684 	  if (opnd->addr.offset.imm)
   2685 	    snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
   2686 	  else
   2687 	    snprintf (buf, size, "[%s]", name);
   2688 	}
   2689       break;
   2690 
   2691     case AARCH64_OPND_ADDR_UIMM12:
   2692       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   2693       if (opnd->addr.offset.imm)
   2694 	snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
   2695       else
   2696 	snprintf (buf, size, "[%s]", name);
   2697       break;
   2698 
   2699     case AARCH64_OPND_SYSREG:
   2700       for (i = 0; aarch64_sys_regs[i].name; ++i)
   2701 	if (aarch64_sys_regs[i].value == opnd->sysreg
   2702 	    && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
   2703 	  break;
   2704       if (aarch64_sys_regs[i].name)
   2705 	snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
   2706       else
   2707 	{
   2708 	  /* Implementation defined system register.  */
   2709 	  unsigned int value = opnd->sysreg;
   2710 	  snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
   2711 		    (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
   2712 		    value & 0x7);
   2713 	}
   2714       break;
   2715 
   2716     case AARCH64_OPND_PSTATEFIELD:
   2717       for (i = 0; aarch64_pstatefields[i].name; ++i)
   2718 	if (aarch64_pstatefields[i].value == opnd->pstatefield)
   2719 	  break;
   2720       assert (aarch64_pstatefields[i].name);
   2721       snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
   2722       break;
   2723 
   2724     case AARCH64_OPND_SYSREG_AT:
   2725     case AARCH64_OPND_SYSREG_DC:
   2726     case AARCH64_OPND_SYSREG_IC:
   2727     case AARCH64_OPND_SYSREG_TLBI:
   2728       snprintf (buf, size, "%s", opnd->sysins_op->name);
   2729       break;
   2730 
   2731     case AARCH64_OPND_BARRIER:
   2732       snprintf (buf, size, "%s", opnd->barrier->name);
   2733       break;
   2734 
   2735     case AARCH64_OPND_BARRIER_ISB:
   2736       /* Operand can be omitted, e.g. in DCPS1.  */
   2737       if (! optional_operand_p (opcode, idx)
   2738 	  || (opnd->barrier->value
   2739 	      != get_optional_operand_default_value (opcode)))
   2740 	snprintf (buf, size, "#0x%x", opnd->barrier->value);
   2741       break;
   2742 
   2743     case AARCH64_OPND_PRFOP:
   2744       if (opnd->prfop->name != NULL)
   2745 	snprintf (buf, size, "%s", opnd->prfop->name);
   2746       else
   2747 	snprintf (buf, size, "#0x%02x", opnd->prfop->value);
   2748       break;
   2749 
   2750     case AARCH64_OPND_BARRIER_PSB:
   2751       snprintf (buf, size, "%s", opnd->hint_option->name);
   2752       break;
   2753 
   2754     default:
   2755       assert (0);
   2756     }
   2757 }
   2758 
   2759 #define CPENC(op0,op1,crn,crm,op2) \
   2761   ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
   2762   /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
   2763 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
   2764   /* for 3.9.10 System Instructions */
   2765 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
   2766 
   2767 #define C0  0
   2768 #define C1  1
   2769 #define C2  2
   2770 #define C3  3
   2771 #define C4  4
   2772 #define C5  5
   2773 #define C6  6
   2774 #define C7  7
   2775 #define C8  8
   2776 #define C9  9
   2777 #define C10 10
   2778 #define C11 11
   2779 #define C12 12
   2780 #define C13 13
   2781 #define C14 14
   2782 #define C15 15
   2783 
   2784 #ifdef F_DEPRECATED
   2785 #undef F_DEPRECATED
   2786 #endif
   2787 #define F_DEPRECATED	0x1	/* Deprecated system register.  */
   2788 
   2789 #ifdef F_ARCHEXT
   2790 #undef F_ARCHEXT
   2791 #endif
   2792 #define F_ARCHEXT	0x2	/* Architecture dependent system register.  */
   2793 
   2794 #ifdef F_HASXT
   2795 #undef F_HASXT
   2796 #endif
   2797 #define F_HASXT		0x4	/* System instruction register <Xt>
   2798 				   operand.  */
   2799 
   2800 
   2801 /* TODO there are two more issues need to be resolved
   2802    1. handle read-only and write-only system registers
   2803    2. handle cpu-implementation-defined system registers.  */
   2804 const aarch64_sys_reg aarch64_sys_regs [] =
   2805 {
   2806   { "spsr_el1",         CPEN_(0,C0,0),	0 }, /* = spsr_svc */
   2807   { "spsr_el12",	CPEN_ (5, C0, 0), F_ARCHEXT },
   2808   { "elr_el1",          CPEN_(0,C0,1),	0 },
   2809   { "elr_el12",	CPEN_ (5, C0, 1), F_ARCHEXT },
   2810   { "sp_el0",           CPEN_(0,C1,0),	0 },
   2811   { "spsel",            CPEN_(0,C2,0),	0 },
   2812   { "daif",             CPEN_(3,C2,1),	0 },
   2813   { "currentel",        CPEN_(0,C2,2),	0 }, /* RO */
   2814   { "pan",		CPEN_(0,C2,3),	F_ARCHEXT },
   2815   { "uao",		CPEN_ (0, C2, 4), F_ARCHEXT },
   2816   { "nzcv",             CPEN_(3,C2,0),	0 },
   2817   { "fpcr",             CPEN_(3,C4,0),	0 },
   2818   { "fpsr",             CPEN_(3,C4,1),	0 },
   2819   { "dspsr_el0",        CPEN_(3,C5,0),	0 },
   2820   { "dlr_el0",          CPEN_(3,C5,1),	0 },
   2821   { "spsr_el2",         CPEN_(4,C0,0),	0 }, /* = spsr_hyp */
   2822   { "elr_el2",          CPEN_(4,C0,1),	0 },
   2823   { "sp_el1",           CPEN_(4,C1,0),	0 },
   2824   { "spsr_irq",         CPEN_(4,C3,0),	0 },
   2825   { "spsr_abt",         CPEN_(4,C3,1),	0 },
   2826   { "spsr_und",         CPEN_(4,C3,2),	0 },
   2827   { "spsr_fiq",         CPEN_(4,C3,3),	0 },
   2828   { "spsr_el3",         CPEN_(6,C0,0),	0 },
   2829   { "elr_el3",          CPEN_(6,C0,1),	0 },
   2830   { "sp_el2",           CPEN_(6,C1,0),	0 },
   2831   { "spsr_svc",         CPEN_(0,C0,0),	F_DEPRECATED }, /* = spsr_el1 */
   2832   { "spsr_hyp",         CPEN_(4,C0,0),	F_DEPRECATED }, /* = spsr_el2 */
   2833   { "midr_el1",         CPENC(3,0,C0,C0,0),	0 }, /* RO */
   2834   { "ctr_el0",          CPENC(3,3,C0,C0,1),	0 }, /* RO */
   2835   { "mpidr_el1",        CPENC(3,0,C0,C0,5),	0 }, /* RO */
   2836   { "revidr_el1",       CPENC(3,0,C0,C0,6),	0 }, /* RO */
   2837   { "aidr_el1",         CPENC(3,1,C0,C0,7),	0 }, /* RO */
   2838   { "dczid_el0",        CPENC(3,3,C0,C0,7),	0 }, /* RO */
   2839   { "id_dfr0_el1",      CPENC(3,0,C0,C1,2),	0 }, /* RO */
   2840   { "id_pfr0_el1",      CPENC(3,0,C0,C1,0),	0 }, /* RO */
   2841   { "id_pfr1_el1",      CPENC(3,0,C0,C1,1),	0 }, /* RO */
   2842   { "id_afr0_el1",      CPENC(3,0,C0,C1,3),	0 }, /* RO */
   2843   { "id_mmfr0_el1",     CPENC(3,0,C0,C1,4),	0 }, /* RO */
   2844   { "id_mmfr1_el1",     CPENC(3,0,C0,C1,5),	0 }, /* RO */
   2845   { "id_mmfr2_el1",     CPENC(3,0,C0,C1,6),	0 }, /* RO */
   2846   { "id_mmfr3_el1",     CPENC(3,0,C0,C1,7),	0 }, /* RO */
   2847   { "id_mmfr4_el1",     CPENC(3,0,C0,C2,6),	0 }, /* RO */
   2848   { "id_isar0_el1",     CPENC(3,0,C0,C2,0),	0 }, /* RO */
   2849   { "id_isar1_el1",     CPENC(3,0,C0,C2,1),	0 }, /* RO */
   2850   { "id_isar2_el1",     CPENC(3,0,C0,C2,2),	0 }, /* RO */
   2851   { "id_isar3_el1",     CPENC(3,0,C0,C2,3),	0 }, /* RO */
   2852   { "id_isar4_el1",     CPENC(3,0,C0,C2,4),	0 }, /* RO */
   2853   { "id_isar5_el1",     CPENC(3,0,C0,C2,5),	0 }, /* RO */
   2854   { "mvfr0_el1",        CPENC(3,0,C0,C3,0),	0 }, /* RO */
   2855   { "mvfr1_el1",        CPENC(3,0,C0,C3,1),	0 }, /* RO */
   2856   { "mvfr2_el1",        CPENC(3,0,C0,C3,2),	0 }, /* RO */
   2857   { "ccsidr_el1",       CPENC(3,1,C0,C0,0),	0 }, /* RO */
   2858   { "id_aa64pfr0_el1",  CPENC(3,0,C0,C4,0),	0 }, /* RO */
   2859   { "id_aa64pfr1_el1",  CPENC(3,0,C0,C4,1),	0 }, /* RO */
   2860   { "id_aa64dfr0_el1",  CPENC(3,0,C0,C5,0),	0 }, /* RO */
   2861   { "id_aa64dfr1_el1",  CPENC(3,0,C0,C5,1),	0 }, /* RO */
   2862   { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0),	0 }, /* RO */
   2863   { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1),	0 }, /* RO */
   2864   { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0),	0 }, /* RO */
   2865   { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1),	0 }, /* RO */
   2866   { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
   2867   { "id_aa64afr0_el1",  CPENC(3,0,C0,C5,4),	0 }, /* RO */
   2868   { "id_aa64afr1_el1",  CPENC(3,0,C0,C5,5),	0 }, /* RO */
   2869   { "clidr_el1",        CPENC(3,1,C0,C0,1),	0 }, /* RO */
   2870   { "csselr_el1",       CPENC(3,2,C0,C0,0),	0 }, /* RO */
   2871   { "vpidr_el2",        CPENC(3,4,C0,C0,0),	0 },
   2872   { "vmpidr_el2",       CPENC(3,4,C0,C0,5),	0 },
   2873   { "sctlr_el1",        CPENC(3,0,C1,C0,0),	0 },
   2874   { "sctlr_el2",        CPENC(3,4,C1,C0,0),	0 },
   2875   { "sctlr_el3",        CPENC(3,6,C1,C0,0),	0 },
   2876   { "sctlr_el12",	CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
   2877   { "actlr_el1",        CPENC(3,0,C1,C0,1),	0 },
   2878   { "actlr_el2",        CPENC(3,4,C1,C0,1),	0 },
   2879   { "actlr_el3",        CPENC(3,6,C1,C0,1),	0 },
   2880   { "cpacr_el1",        CPENC(3,0,C1,C0,2),	0 },
   2881   { "cpacr_el12",	CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
   2882   { "cptr_el2",         CPENC(3,4,C1,C1,2),	0 },
   2883   { "cptr_el3",         CPENC(3,6,C1,C1,2),	0 },
   2884   { "scr_el3",          CPENC(3,6,C1,C1,0),	0 },
   2885   { "hcr_el2",          CPENC(3,4,C1,C1,0),	0 },
   2886   { "mdcr_el2",         CPENC(3,4,C1,C1,1),	0 },
   2887   { "mdcr_el3",         CPENC(3,6,C1,C3,1),	0 },
   2888   { "hstr_el2",         CPENC(3,4,C1,C1,3),	0 },
   2889   { "hacr_el2",         CPENC(3,4,C1,C1,7),	0 },
   2890   { "ttbr0_el1",        CPENC(3,0,C2,C0,0),	0 },
   2891   { "ttbr1_el1",        CPENC(3,0,C2,C0,1),	0 },
   2892   { "ttbr0_el2",        CPENC(3,4,C2,C0,0),	0 },
   2893   { "ttbr1_el2",	CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
   2894   { "ttbr0_el3",        CPENC(3,6,C2,C0,0),	0 },
   2895   { "ttbr0_el12",	CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
   2896   { "ttbr1_el12",	CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
   2897   { "vttbr_el2",        CPENC(3,4,C2,C1,0),	0 },
   2898   { "tcr_el1",          CPENC(3,0,C2,C0,2),	0 },
   2899   { "tcr_el2",          CPENC(3,4,C2,C0,2),	0 },
   2900   { "tcr_el3",          CPENC(3,6,C2,C0,2),	0 },
   2901   { "tcr_el12",		CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
   2902   { "vtcr_el2",         CPENC(3,4,C2,C1,2),	0 },
   2903   { "afsr0_el1",        CPENC(3,0,C5,C1,0),	0 },
   2904   { "afsr1_el1",        CPENC(3,0,C5,C1,1),	0 },
   2905   { "afsr0_el2",        CPENC(3,4,C5,C1,0),	0 },
   2906   { "afsr1_el2",        CPENC(3,4,C5,C1,1),	0 },
   2907   { "afsr0_el3",        CPENC(3,6,C5,C1,0),	0 },
   2908   { "afsr0_el12",	CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
   2909   { "afsr1_el3",        CPENC(3,6,C5,C1,1),	0 },
   2910   { "afsr1_el12",	CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
   2911   { "esr_el1",          CPENC(3,0,C5,C2,0),	0 },
   2912   { "esr_el2",          CPENC(3,4,C5,C2,0),	0 },
   2913   { "esr_el3",          CPENC(3,6,C5,C2,0),	0 },
   2914   { "esr_el12",		CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
   2915   { "vsesr_el2",	CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
   2916   { "fpexc32_el2",      CPENC(3,4,C5,C3,0),	0 },
   2917   { "erridr_el1",	CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
   2918   { "errselr_el1",	CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
   2919   { "erxfr_el1",	CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
   2920   { "erxctlr_el1",	CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
   2921   { "erxstatus_el1",	CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
   2922   { "erxaddr_el1",	CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
   2923   { "erxmisc0_el1",	CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
   2924   { "erxmisc1_el1",	CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
   2925   { "far_el1",          CPENC(3,0,C6,C0,0),	0 },
   2926   { "far_el2",          CPENC(3,4,C6,C0,0),	0 },
   2927   { "far_el3",          CPENC(3,6,C6,C0,0),	0 },
   2928   { "far_el12",		CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
   2929   { "hpfar_el2",        CPENC(3,4,C6,C0,4),	0 },
   2930   { "par_el1",          CPENC(3,0,C7,C4,0),	0 },
   2931   { "mair_el1",         CPENC(3,0,C10,C2,0),	0 },
   2932   { "mair_el2",         CPENC(3,4,C10,C2,0),	0 },
   2933   { "mair_el3",         CPENC(3,6,C10,C2,0),	0 },
   2934   { "mair_el12",	CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
   2935   { "amair_el1",        CPENC(3,0,C10,C3,0),	0 },
   2936   { "amair_el2",        CPENC(3,4,C10,C3,0),	0 },
   2937   { "amair_el3",        CPENC(3,6,C10,C3,0),	0 },
   2938   { "amair_el12",	CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
   2939   { "vbar_el1",         CPENC(3,0,C12,C0,0),	0 },
   2940   { "vbar_el2",         CPENC(3,4,C12,C0,0),	0 },
   2941   { "vbar_el3",         CPENC(3,6,C12,C0,0),	0 },
   2942   { "vbar_el12",	CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
   2943   { "rvbar_el1",        CPENC(3,0,C12,C0,1),	0 }, /* RO */
   2944   { "rvbar_el2",        CPENC(3,4,C12,C0,1),	0 }, /* RO */
   2945   { "rvbar_el3",        CPENC(3,6,C12,C0,1),	0 }, /* RO */
   2946   { "rmr_el1",          CPENC(3,0,C12,C0,2),	0 },
   2947   { "rmr_el2",          CPENC(3,4,C12,C0,2),	0 },
   2948   { "rmr_el3",          CPENC(3,6,C12,C0,2),	0 },
   2949   { "isr_el1",          CPENC(3,0,C12,C1,0),	0 }, /* RO */
   2950   { "disr_el1",		CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
   2951   { "vdisr_el2",	CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
   2952   { "contextidr_el1",   CPENC(3,0,C13,C0,1),	0 },
   2953   { "contextidr_el2",	CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
   2954   { "contextidr_el12",	CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
   2955   { "tpidr_el0",        CPENC(3,3,C13,C0,2),	0 },
   2956   { "tpidrro_el0",      CPENC(3,3,C13,C0,3),	0 }, /* RO */
   2957   { "tpidr_el1",        CPENC(3,0,C13,C0,4),	0 },
   2958   { "tpidr_el2",        CPENC(3,4,C13,C0,2),	0 },
   2959   { "tpidr_el3",        CPENC(3,6,C13,C0,2),	0 },
   2960   { "teecr32_el1",      CPENC(2,2,C0, C0,0),	0 }, /* See section 3.9.7.1 */
   2961   { "cntfrq_el0",       CPENC(3,3,C14,C0,0),	0 }, /* RO */
   2962   { "cntpct_el0",       CPENC(3,3,C14,C0,1),	0 }, /* RO */
   2963   { "cntvct_el0",       CPENC(3,3,C14,C0,2),	0 }, /* RO */
   2964   { "cntvoff_el2",      CPENC(3,4,C14,C0,3),	0 },
   2965   { "cntkctl_el1",      CPENC(3,0,C14,C1,0),	0 },
   2966   { "cntkctl_el12",	CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
   2967   { "cnthctl_el2",      CPENC(3,4,C14,C1,0),	0 },
   2968   { "cntp_tval_el0",    CPENC(3,3,C14,C2,0),	0 },
   2969   { "cntp_tval_el02",	CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
   2970   { "cntp_ctl_el0",     CPENC(3,3,C14,C2,1),	0 },
   2971   { "cntp_ctl_el02",	CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
   2972   { "cntp_cval_el0",    CPENC(3,3,C14,C2,2),	0 },
   2973   { "cntp_cval_el02",	CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
   2974   { "cntv_tval_el0",    CPENC(3,3,C14,C3,0),	0 },
   2975   { "cntv_tval_el02",	CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
   2976   { "cntv_ctl_el0",     CPENC(3,3,C14,C3,1),	0 },
   2977   { "cntv_ctl_el02",	CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
   2978   { "cntv_cval_el0",    CPENC(3,3,C14,C3,2),	0 },
   2979   { "cntv_cval_el02",	CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
   2980   { "cnthp_tval_el2",   CPENC(3,4,C14,C2,0),	0 },
   2981   { "cnthp_ctl_el2",    CPENC(3,4,C14,C2,1),	0 },
   2982   { "cnthp_cval_el2",   CPENC(3,4,C14,C2,2),	0 },
   2983   { "cntps_tval_el1",   CPENC(3,7,C14,C2,0),	0 },
   2984   { "cntps_ctl_el1",    CPENC(3,7,C14,C2,1),	0 },
   2985   { "cntps_cval_el1",   CPENC(3,7,C14,C2,2),	0 },
   2986   { "cnthv_tval_el2",	CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
   2987   { "cnthv_ctl_el2",	CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
   2988   { "cnthv_cval_el2",	CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
   2989   { "dacr32_el2",       CPENC(3,4,C3,C0,0),	0 },
   2990   { "ifsr32_el2",       CPENC(3,4,C5,C0,1),	0 },
   2991   { "teehbr32_el1",     CPENC(2,2,C1,C0,0),	0 },
   2992   { "sder32_el3",       CPENC(3,6,C1,C1,1),	0 },
   2993   { "mdscr_el1",         CPENC(2,0,C0, C2, 2),	0 },
   2994   { "mdccsr_el0",        CPENC(2,3,C0, C1, 0),	0 },  /* r */
   2995   { "mdccint_el1",       CPENC(2,0,C0, C2, 0),	0 },
   2996   { "dbgdtr_el0",        CPENC(2,3,C0, C4, 0),	0 },
   2997   { "dbgdtrrx_el0",      CPENC(2,3,C0, C5, 0),	0 },  /* r */
   2998   { "dbgdtrtx_el0",      CPENC(2,3,C0, C5, 0),	0 },  /* w */
   2999   { "osdtrrx_el1",       CPENC(2,0,C0, C0, 2),	0 },  /* r */
   3000   { "osdtrtx_el1",       CPENC(2,0,C0, C3, 2),	0 },  /* w */
   3001   { "oseccr_el1",        CPENC(2,0,C0, C6, 2),	0 },
   3002   { "dbgvcr32_el2",      CPENC(2,4,C0, C7, 0),	0 },
   3003   { "dbgbvr0_el1",       CPENC(2,0,C0, C0, 4),	0 },
   3004   { "dbgbvr1_el1",       CPENC(2,0,C0, C1, 4),	0 },
   3005   { "dbgbvr2_el1",       CPENC(2,0,C0, C2, 4),	0 },
   3006   { "dbgbvr3_el1",       CPENC(2,0,C0, C3, 4),	0 },
   3007   { "dbgbvr4_el1",       CPENC(2,0,C0, C4, 4),	0 },
   3008   { "dbgbvr5_el1",       CPENC(2,0,C0, C5, 4),	0 },
   3009   { "dbgbvr6_el1",       CPENC(2,0,C0, C6, 4),	0 },
   3010   { "dbgbvr7_el1",       CPENC(2,0,C0, C7, 4),	0 },
   3011   { "dbgbvr8_el1",       CPENC(2,0,C0, C8, 4),	0 },
   3012   { "dbgbvr9_el1",       CPENC(2,0,C0, C9, 4),	0 },
   3013   { "dbgbvr10_el1",      CPENC(2,0,C0, C10,4),	0 },
   3014   { "dbgbvr11_el1",      CPENC(2,0,C0, C11,4),	0 },
   3015   { "dbgbvr12_el1",      CPENC(2,0,C0, C12,4),	0 },
   3016   { "dbgbvr13_el1",      CPENC(2,0,C0, C13,4),	0 },
   3017   { "dbgbvr14_el1",      CPENC(2,0,C0, C14,4),	0 },
   3018   { "dbgbvr15_el1",      CPENC(2,0,C0, C15,4),	0 },
   3019   { "dbgbcr0_el1",       CPENC(2,0,C0, C0, 5),	0 },
   3020   { "dbgbcr1_el1",       CPENC(2,0,C0, C1, 5),	0 },
   3021   { "dbgbcr2_el1",       CPENC(2,0,C0, C2, 5),	0 },
   3022   { "dbgbcr3_el1",       CPENC(2,0,C0, C3, 5),	0 },
   3023   { "dbgbcr4_el1",       CPENC(2,0,C0, C4, 5),	0 },
   3024   { "dbgbcr5_el1",       CPENC(2,0,C0, C5, 5),	0 },
   3025   { "dbgbcr6_el1",       CPENC(2,0,C0, C6, 5),	0 },
   3026   { "dbgbcr7_el1",       CPENC(2,0,C0, C7, 5),	0 },
   3027   { "dbgbcr8_el1",       CPENC(2,0,C0, C8, 5),	0 },
   3028   { "dbgbcr9_el1",       CPENC(2,0,C0, C9, 5),	0 },
   3029   { "dbgbcr10_el1",      CPENC(2,0,C0, C10,5),	0 },
   3030   { "dbgbcr11_el1",      CPENC(2,0,C0, C11,5),	0 },
   3031   { "dbgbcr12_el1",      CPENC(2,0,C0, C12,5),	0 },
   3032   { "dbgbcr13_el1",      CPENC(2,0,C0, C13,5),	0 },
   3033   { "dbgbcr14_el1",      CPENC(2,0,C0, C14,5),	0 },
   3034   { "dbgbcr15_el1",      CPENC(2,0,C0, C15,5),	0 },
   3035   { "dbgwvr0_el1",       CPENC(2,0,C0, C0, 6),	0 },
   3036   { "dbgwvr1_el1",       CPENC(2,0,C0, C1, 6),	0 },
   3037   { "dbgwvr2_el1",       CPENC(2,0,C0, C2, 6),	0 },
   3038   { "dbgwvr3_el1",       CPENC(2,0,C0, C3, 6),	0 },
   3039   { "dbgwvr4_el1",       CPENC(2,0,C0, C4, 6),	0 },
   3040   { "dbgwvr5_el1",       CPENC(2,0,C0, C5, 6),	0 },
   3041   { "dbgwvr6_el1",       CPENC(2,0,C0, C6, 6),	0 },
   3042   { "dbgwvr7_el1",       CPENC(2,0,C0, C7, 6),	0 },
   3043   { "dbgwvr8_el1",       CPENC(2,0,C0, C8, 6),	0 },
   3044   { "dbgwvr9_el1",       CPENC(2,0,C0, C9, 6),	0 },
   3045   { "dbgwvr10_el1",      CPENC(2,0,C0, C10,6),	0 },
   3046   { "dbgwvr11_el1",      CPENC(2,0,C0, C11,6),	0 },
   3047   { "dbgwvr12_el1",      CPENC(2,0,C0, C12,6),	0 },
   3048   { "dbgwvr13_el1",      CPENC(2,0,C0, C13,6),	0 },
   3049   { "dbgwvr14_el1",      CPENC(2,0,C0, C14,6),	0 },
   3050   { "dbgwvr15_el1",      CPENC(2,0,C0, C15,6),	0 },
   3051   { "dbgwcr0_el1",       CPENC(2,0,C0, C0, 7),	0 },
   3052   { "dbgwcr1_el1",       CPENC(2,0,C0, C1, 7),	0 },
   3053   { "dbgwcr2_el1",       CPENC(2,0,C0, C2, 7),	0 },
   3054   { "dbgwcr3_el1",       CPENC(2,0,C0, C3, 7),	0 },
   3055   { "dbgwcr4_el1",       CPENC(2,0,C0, C4, 7),	0 },
   3056   { "dbgwcr5_el1",       CPENC(2,0,C0, C5, 7),	0 },
   3057   { "dbgwcr6_el1",       CPENC(2,0,C0, C6, 7),	0 },
   3058   { "dbgwcr7_el1",       CPENC(2,0,C0, C7, 7),	0 },
   3059   { "dbgwcr8_el1",       CPENC(2,0,C0, C8, 7),	0 },
   3060   { "dbgwcr9_el1",       CPENC(2,0,C0, C9, 7),	0 },
   3061   { "dbgwcr10_el1",      CPENC(2,0,C0, C10,7),	0 },
   3062   { "dbgwcr11_el1",      CPENC(2,0,C0, C11,7),	0 },
   3063   { "dbgwcr12_el1",      CPENC(2,0,C0, C12,7),	0 },
   3064   { "dbgwcr13_el1",      CPENC(2,0,C0, C13,7),	0 },
   3065   { "dbgwcr14_el1",      CPENC(2,0,C0, C14,7),	0 },
   3066   { "dbgwcr15_el1",      CPENC(2,0,C0, C15,7),	0 },
   3067   { "mdrar_el1",         CPENC(2,0,C1, C0, 0),	0 },  /* r */
   3068   { "oslar_el1",         CPENC(2,0,C1, C0, 4),	0 },  /* w */
   3069   { "oslsr_el1",         CPENC(2,0,C1, C1, 4),	0 },  /* r */
   3070   { "osdlr_el1",         CPENC(2,0,C1, C3, 4),	0 },
   3071   { "dbgprcr_el1",       CPENC(2,0,C1, C4, 4),	0 },
   3072   { "dbgclaimset_el1",   CPENC(2,0,C7, C8, 6),	0 },
   3073   { "dbgclaimclr_el1",   CPENC(2,0,C7, C9, 6),	0 },
   3074   { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6),	0 },  /* r */
   3075   { "pmblimitr_el1",	 CPENC (3, 0, C9, C10, 0), F_ARCHEXT },  /* rw */
   3076   { "pmbptr_el1",	 CPENC (3, 0, C9, C10, 1), F_ARCHEXT },  /* rw */
   3077   { "pmbsr_el1",	 CPENC (3, 0, C9, C10, 3), F_ARCHEXT },  /* rw */
   3078   { "pmbidr_el1",	 CPENC (3, 0, C9, C10, 7), F_ARCHEXT },  /* ro */
   3079   { "pmscr_el1",	 CPENC (3, 0, C9, C9, 0),  F_ARCHEXT },  /* rw */
   3080   { "pmsicr_el1",	 CPENC (3, 0, C9, C9, 2),  F_ARCHEXT },  /* rw */
   3081   { "pmsirr_el1",	 CPENC (3, 0, C9, C9, 3),  F_ARCHEXT },  /* rw */
   3082   { "pmsfcr_el1",	 CPENC (3, 0, C9, C9, 4),  F_ARCHEXT },  /* rw */
   3083   { "pmsevfr_el1",	 CPENC (3, 0, C9, C9, 5),  F_ARCHEXT },  /* rw */
   3084   { "pmslatfr_el1",	 CPENC (3, 0, C9, C9, 6),  F_ARCHEXT },  /* rw */
   3085   { "pmsidr_el1",	 CPENC (3, 0, C9, C9, 7),  F_ARCHEXT },  /* ro */
   3086   { "pmscr_el2",	 CPENC (3, 4, C9, C9, 0),  F_ARCHEXT },  /* rw */
   3087   { "pmscr_el12",	 CPENC (3, 5, C9, C9, 0),  F_ARCHEXT },  /* rw */
   3088   { "pmcr_el0",          CPENC(3,3,C9,C12, 0),	0 },
   3089   { "pmcntenset_el0",    CPENC(3,3,C9,C12, 1),	0 },
   3090   { "pmcntenclr_el0",    CPENC(3,3,C9,C12, 2),	0 },
   3091   { "pmovsclr_el0",      CPENC(3,3,C9,C12, 3),	0 },
   3092   { "pmswinc_el0",       CPENC(3,3,C9,C12, 4),	0 },  /* w */
   3093   { "pmselr_el0",        CPENC(3,3,C9,C12, 5),	0 },
   3094   { "pmceid0_el0",       CPENC(3,3,C9,C12, 6),	0 },  /* r */
   3095   { "pmceid1_el0",       CPENC(3,3,C9,C12, 7),	0 },  /* r */
   3096   { "pmccntr_el0",       CPENC(3,3,C9,C13, 0),	0 },
   3097   { "pmxevtyper_el0",    CPENC(3,3,C9,C13, 1),	0 },
   3098   { "pmxevcntr_el0",     CPENC(3,3,C9,C13, 2),	0 },
   3099   { "pmuserenr_el0",     CPENC(3,3,C9,C14, 0),	0 },
   3100   { "pmintenset_el1",    CPENC(3,0,C9,C14, 1),	0 },
   3101   { "pmintenclr_el1",    CPENC(3,0,C9,C14, 2),	0 },
   3102   { "pmovsset_el0",      CPENC(3,3,C9,C14, 3),	0 },
   3103   { "pmevcntr0_el0",     CPENC(3,3,C14,C8, 0),	0 },
   3104   { "pmevcntr1_el0",     CPENC(3,3,C14,C8, 1),	0 },
   3105   { "pmevcntr2_el0",     CPENC(3,3,C14,C8, 2),	0 },
   3106   { "pmevcntr3_el0",     CPENC(3,3,C14,C8, 3),	0 },
   3107   { "pmevcntr4_el0",     CPENC(3,3,C14,C8, 4),	0 },
   3108   { "pmevcntr5_el0",     CPENC(3,3,C14,C8, 5),	0 },
   3109   { "pmevcntr6_el0",     CPENC(3,3,C14,C8, 6),	0 },
   3110   { "pmevcntr7_el0",     CPENC(3,3,C14,C8, 7),	0 },
   3111   { "pmevcntr8_el0",     CPENC(3,3,C14,C9, 0),	0 },
   3112   { "pmevcntr9_el0",     CPENC(3,3,C14,C9, 1),	0 },
   3113   { "pmevcntr10_el0",    CPENC(3,3,C14,C9, 2),	0 },
   3114   { "pmevcntr11_el0",    CPENC(3,3,C14,C9, 3),	0 },
   3115   { "pmevcntr12_el0",    CPENC(3,3,C14,C9, 4),	0 },
   3116   { "pmevcntr13_el0",    CPENC(3,3,C14,C9, 5),	0 },
   3117   { "pmevcntr14_el0",    CPENC(3,3,C14,C9, 6),	0 },
   3118   { "pmevcntr15_el0",    CPENC(3,3,C14,C9, 7),	0 },
   3119   { "pmevcntr16_el0",    CPENC(3,3,C14,C10,0),	0 },
   3120   { "pmevcntr17_el0",    CPENC(3,3,C14,C10,1),	0 },
   3121   { "pmevcntr18_el0",    CPENC(3,3,C14,C10,2),	0 },
   3122   { "pmevcntr19_el0",    CPENC(3,3,C14,C10,3),	0 },
   3123   { "pmevcntr20_el0",    CPENC(3,3,C14,C10,4),	0 },
   3124   { "pmevcntr21_el0",    CPENC(3,3,C14,C10,5),	0 },
   3125   { "pmevcntr22_el0",    CPENC(3,3,C14,C10,6),	0 },
   3126   { "pmevcntr23_el0",    CPENC(3,3,C14,C10,7),	0 },
   3127   { "pmevcntr24_el0",    CPENC(3,3,C14,C11,0),	0 },
   3128   { "pmevcntr25_el0",    CPENC(3,3,C14,C11,1),	0 },
   3129   { "pmevcntr26_el0",    CPENC(3,3,C14,C11,2),	0 },
   3130   { "pmevcntr27_el0",    CPENC(3,3,C14,C11,3),	0 },
   3131   { "pmevcntr28_el0",    CPENC(3,3,C14,C11,4),	0 },
   3132   { "pmevcntr29_el0",    CPENC(3,3,C14,C11,5),	0 },
   3133   { "pmevcntr30_el0",    CPENC(3,3,C14,C11,6),	0 },
   3134   { "pmevtyper0_el0",    CPENC(3,3,C14,C12,0),	0 },
   3135   { "pmevtyper1_el0",    CPENC(3,3,C14,C12,1),	0 },
   3136   { "pmevtyper2_el0",    CPENC(3,3,C14,C12,2),	0 },
   3137   { "pmevtyper3_el0",    CPENC(3,3,C14,C12,3),	0 },
   3138   { "pmevtyper4_el0",    CPENC(3,3,C14,C12,4),	0 },
   3139   { "pmevtyper5_el0",    CPENC(3,3,C14,C12,5),	0 },
   3140   { "pmevtyper6_el0",    CPENC(3,3,C14,C12,6),	0 },
   3141   { "pmevtyper7_el0",    CPENC(3,3,C14,C12,7),	0 },
   3142   { "pmevtyper8_el0",    CPENC(3,3,C14,C13,0),	0 },
   3143   { "pmevtyper9_el0",    CPENC(3,3,C14,C13,1),	0 },
   3144   { "pmevtyper10_el0",   CPENC(3,3,C14,C13,2),	0 },
   3145   { "pmevtyper11_el0",   CPENC(3,3,C14,C13,3),	0 },
   3146   { "pmevtyper12_el0",   CPENC(3,3,C14,C13,4),	0 },
   3147   { "pmevtyper13_el0",   CPENC(3,3,C14,C13,5),	0 },
   3148   { "pmevtyper14_el0",   CPENC(3,3,C14,C13,6),	0 },
   3149   { "pmevtyper15_el0",   CPENC(3,3,C14,C13,7),	0 },
   3150   { "pmevtyper16_el0",   CPENC(3,3,C14,C14,0),	0 },
   3151   { "pmevtyper17_el0",   CPENC(3,3,C14,C14,1),	0 },
   3152   { "pmevtyper18_el0",   CPENC(3,3,C14,C14,2),	0 },
   3153   { "pmevtyper19_el0",   CPENC(3,3,C14,C14,3),	0 },
   3154   { "pmevtyper20_el0",   CPENC(3,3,C14,C14,4),	0 },
   3155   { "pmevtyper21_el0",   CPENC(3,3,C14,C14,5),	0 },
   3156   { "pmevtyper22_el0",   CPENC(3,3,C14,C14,6),	0 },
   3157   { "pmevtyper23_el0",   CPENC(3,3,C14,C14,7),	0 },
   3158   { "pmevtyper24_el0",   CPENC(3,3,C14,C15,0),	0 },
   3159   { "pmevtyper25_el0",   CPENC(3,3,C14,C15,1),	0 },
   3160   { "pmevtyper26_el0",   CPENC(3,3,C14,C15,2),	0 },
   3161   { "pmevtyper27_el0",   CPENC(3,3,C14,C15,3),	0 },
   3162   { "pmevtyper28_el0",   CPENC(3,3,C14,C15,4),	0 },
   3163   { "pmevtyper29_el0",   CPENC(3,3,C14,C15,5),	0 },
   3164   { "pmevtyper30_el0",   CPENC(3,3,C14,C15,6),	0 },
   3165   { "pmccfiltr_el0",     CPENC(3,3,C14,C15,7),	0 },
   3166   { 0,          CPENC(0,0,0,0,0),	0 },
   3167 };
   3168 
   3169 bfd_boolean
   3170 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
   3171 {
   3172   return (reg->flags & F_DEPRECATED) != 0;
   3173 }
   3174 
   3175 bfd_boolean
   3176 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
   3177 			     const aarch64_sys_reg *reg)
   3178 {
   3179   if (!(reg->flags & F_ARCHEXT))
   3180     return TRUE;
   3181 
   3182   /* PAN.  Values are from aarch64_sys_regs.  */
   3183   if (reg->value == CPEN_(0,C2,3)
   3184       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
   3185     return FALSE;
   3186 
   3187   /* Virtualization host extensions: system registers.  */
   3188   if ((reg->value == CPENC (3, 4, C2, C0, 1)
   3189        || reg->value == CPENC (3, 4, C13, C0, 1)
   3190        || reg->value == CPENC (3, 4, C14, C3, 0)
   3191        || reg->value == CPENC (3, 4, C14, C3, 1)
   3192        || reg->value == CPENC (3, 4, C14, C3, 2))
   3193       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
   3194       return FALSE;
   3195 
   3196   /* Virtualization host extensions: *_el12 names of *_el1 registers.  */
   3197   if ((reg->value == CPEN_ (5, C0, 0)
   3198        || reg->value == CPEN_ (5, C0, 1)
   3199        || reg->value == CPENC (3, 5, C1, C0, 0)
   3200        || reg->value == CPENC (3, 5, C1, C0, 2)
   3201        || reg->value == CPENC (3, 5, C2, C0, 0)
   3202        || reg->value == CPENC (3, 5, C2, C0, 1)
   3203        || reg->value == CPENC (3, 5, C2, C0, 2)
   3204        || reg->value == CPENC (3, 5, C5, C1, 0)
   3205        || reg->value == CPENC (3, 5, C5, C1, 1)
   3206        || reg->value == CPENC (3, 5, C5, C2, 0)
   3207        || reg->value == CPENC (3, 5, C6, C0, 0)
   3208        || reg->value == CPENC (3, 5, C10, C2, 0)
   3209        || reg->value == CPENC (3, 5, C10, C3, 0)
   3210        || reg->value == CPENC (3, 5, C12, C0, 0)
   3211        || reg->value == CPENC (3, 5, C13, C0, 1)
   3212        || reg->value == CPENC (3, 5, C14, C1, 0))
   3213       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
   3214     return FALSE;
   3215 
   3216   /* Virtualization host extensions: *_el02 names of *_el0 registers.  */
   3217   if ((reg->value == CPENC (3, 5, C14, C2, 0)
   3218        || reg->value == CPENC (3, 5, C14, C2, 1)
   3219        || reg->value == CPENC (3, 5, C14, C2, 2)
   3220        || reg->value == CPENC (3, 5, C14, C3, 0)
   3221        || reg->value == CPENC (3, 5, C14, C3, 1)
   3222        || reg->value == CPENC (3, 5, C14, C3, 2))
   3223       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
   3224     return FALSE;
   3225 
   3226   /* ARMv8.2 features.  */
   3227 
   3228   /* ID_AA64MMFR2_EL1.  */
   3229   if (reg->value == CPENC (3, 0, C0, C7, 2)
   3230       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
   3231     return FALSE;
   3232 
   3233   /* PSTATE.UAO.  */
   3234   if (reg->value == CPEN_ (0, C2, 4)
   3235       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
   3236     return FALSE;
   3237 
   3238   /* RAS extension.  */
   3239 
   3240   /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
   3241      ERXMISC0_EL1 AND ERXMISC1_EL1.  */
   3242   if ((reg->value == CPENC (3, 0, C5, C3, 0)
   3243        || reg->value == CPENC (3, 0, C5, C3, 1)
   3244        || reg->value == CPENC (3, 0, C5, C3, 2)
   3245        || reg->value == CPENC (3, 0, C5, C3, 3)
   3246        || reg->value == CPENC (3, 0, C5, C4, 0)
   3247        || reg->value == CPENC (3, 0, C5, C4, 1)
   3248        || reg->value == CPENC (3, 0, C5, C4, 2)
   3249        || reg->value == CPENC (3, 0, C5, C4, 3)
   3250        || reg->value == CPENC (3, 0, C5, C5, 0)
   3251        || reg->value == CPENC (3, 0, C5, C5, 1))
   3252       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
   3253     return FALSE;
   3254 
   3255   /* VSESR_EL2, DISR_EL1 and VDISR_EL2.  */
   3256   if ((reg->value == CPENC (3, 4, C5, C2, 3)
   3257        || reg->value == CPENC (3, 0, C12, C1, 1)
   3258        || reg->value == CPENC (3, 4, C12, C1, 1))
   3259       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
   3260     return FALSE;
   3261 
   3262   /* Statistical Profiling extension.  */
   3263   if ((reg->value == CPENC (3, 0, C9, C10, 0)
   3264        || reg->value == CPENC (3, 0, C9, C10, 1)
   3265        || reg->value == CPENC (3, 0, C9, C10, 3)
   3266        || reg->value == CPENC (3, 0, C9, C10, 7)
   3267        || reg->value == CPENC (3, 0, C9, C9, 0)
   3268        || reg->value == CPENC (3, 0, C9, C9, 2)
   3269        || reg->value == CPENC (3, 0, C9, C9, 3)
   3270        || reg->value == CPENC (3, 0, C9, C9, 4)
   3271        || reg->value == CPENC (3, 0, C9, C9, 5)
   3272        || reg->value == CPENC (3, 0, C9, C9, 6)
   3273        || reg->value == CPENC (3, 0, C9, C9, 7)
   3274        || reg->value == CPENC (3, 4, C9, C9, 0)
   3275        || reg->value == CPENC (3, 5, C9, C9, 0))
   3276       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
   3277     return FALSE;
   3278 
   3279   return TRUE;
   3280 }
   3281 
   3282 const aarch64_sys_reg aarch64_pstatefields [] =
   3283 {
   3284   { "spsel",            0x05,	0 },
   3285   { "daifset",          0x1e,	0 },
   3286   { "daifclr",          0x1f,	0 },
   3287   { "pan",		0x04,	F_ARCHEXT },
   3288   { "uao",		0x03,	F_ARCHEXT },
   3289   { 0,          CPENC(0,0,0,0,0), 0 },
   3290 };
   3291 
   3292 bfd_boolean
   3293 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
   3294 				 const aarch64_sys_reg *reg)
   3295 {
   3296   if (!(reg->flags & F_ARCHEXT))
   3297     return TRUE;
   3298 
   3299   /* PAN.  Values are from aarch64_pstatefields.  */
   3300   if (reg->value == 0x04
   3301       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
   3302     return FALSE;
   3303 
   3304   /* UAO.  Values are from aarch64_pstatefields.  */
   3305   if (reg->value == 0x03
   3306       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
   3307     return FALSE;
   3308 
   3309   return TRUE;
   3310 }
   3311 
   3312 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
   3313 {
   3314     { "ialluis", CPENS(0,C7,C1,0), 0 },
   3315     { "iallu",   CPENS(0,C7,C5,0), 0 },
   3316     { "ivau",    CPENS (3, C7, C5, 1), F_HASXT },
   3317     { 0, CPENS(0,0,0,0), 0 }
   3318 };
   3319 
   3320 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
   3321 {
   3322     { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT },
   3323     { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT },
   3324     { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT },
   3325     { "cvac",       CPENS (3, C7, C10, 1), F_HASXT },
   3326     { "csw",	    CPENS (0, C7, C10, 2), F_HASXT },
   3327     { "cvau",       CPENS (3, C7, C11, 1), F_HASXT },
   3328     { "cvap",       CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
   3329     { "civac",      CPENS (3, C7, C14, 1), F_HASXT },
   3330     { "cisw",       CPENS (0, C7, C14, 2), F_HASXT },
   3331     { 0,       CPENS(0,0,0,0), 0 }
   3332 };
   3333 
   3334 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
   3335 {
   3336     { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT },
   3337     { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT },
   3338     { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT },
   3339     { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT },
   3340     { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT },
   3341     { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT },
   3342     { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT },
   3343     { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT },
   3344     { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT },
   3345     { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT },
   3346     { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT },
   3347     { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT },
   3348     { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
   3349     { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
   3350     { 0,       CPENS(0,0,0,0), 0 }
   3351 };
   3352 
   3353 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
   3354 {
   3355     { "vmalle1",   CPENS(0,C8,C7,0), 0 },
   3356     { "vae1",      CPENS (0, C8, C7, 1), F_HASXT },
   3357     { "aside1",    CPENS (0, C8, C7, 2), F_HASXT },
   3358     { "vaae1",     CPENS (0, C8, C7, 3), F_HASXT },
   3359     { "vmalle1is", CPENS(0,C8,C3,0), 0 },
   3360     { "vae1is",    CPENS (0, C8, C3, 1), F_HASXT },
   3361     { "aside1is",  CPENS (0, C8, C3, 2), F_HASXT },
   3362     { "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT },
   3363     { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
   3364     { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
   3365     { "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT },
   3366     { "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT },
   3367     { "vae2",      CPENS (4, C8, C7, 1), F_HASXT },
   3368     { "vae2is",    CPENS (4, C8, C3, 1), F_HASXT },
   3369     { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
   3370     { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
   3371     { "vae3",      CPENS (6, C8, C7, 1), F_HASXT },
   3372     { "vae3is",    CPENS (6, C8, C3, 1), F_HASXT },
   3373     { "alle2",     CPENS(4,C8,C7,0), 0 },
   3374     { "alle2is",   CPENS(4,C8,C3,0), 0 },
   3375     { "alle1",     CPENS(4,C8,C7,4), 0 },
   3376     { "alle1is",   CPENS(4,C8,C3,4), 0 },
   3377     { "alle3",     CPENS(6,C8,C7,0), 0 },
   3378     { "alle3is",   CPENS(6,C8,C3,0), 0 },
   3379     { "vale1is",   CPENS (0, C8, C3, 5), F_HASXT },
   3380     { "vale2is",   CPENS (4, C8, C3, 5), F_HASXT },
   3381     { "vale3is",   CPENS (6, C8, C3, 5), F_HASXT },
   3382     { "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT },
   3383     { "vale1",     CPENS (0, C8, C7, 5), F_HASXT },
   3384     { "vale2",     CPENS (4, C8, C7, 5), F_HASXT },
   3385     { "vale3",     CPENS (6, C8, C7, 5), F_HASXT },
   3386     { "vaale1",    CPENS (0, C8, C7, 7), F_HASXT },
   3387     { 0,       CPENS(0,0,0,0), 0 }
   3388 };
   3389 
   3390 bfd_boolean
   3391 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
   3392 {
   3393   return (sys_ins_reg->flags & F_HASXT) != 0;
   3394 }
   3395 
   3396 extern bfd_boolean
   3397 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
   3398 				 const aarch64_sys_ins_reg *reg)
   3399 {
   3400   if (!(reg->flags & F_ARCHEXT))
   3401     return TRUE;
   3402 
   3403   /* DC CVAP.  Values are from aarch64_sys_regs_dc.  */
   3404   if (reg->value == CPENS (3, C7, C12, 1)
   3405       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
   3406     return FALSE;
   3407 
   3408   /* AT S1E1RP, AT S1E1WP.  Values are from aarch64_sys_regs_at.  */
   3409   if ((reg->value == CPENS (0, C7, C9, 0)
   3410        || reg->value == CPENS (0, C7, C9, 1))
   3411       && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
   3412     return FALSE;
   3413 
   3414   return TRUE;
   3415 }
   3416 
   3417 #undef C0
   3418 #undef C1
   3419 #undef C2
   3420 #undef C3
   3421 #undef C4
   3422 #undef C5
   3423 #undef C6
   3424 #undef C7
   3425 #undef C8
   3426 #undef C9
   3427 #undef C10
   3428 #undef C11
   3429 #undef C12
   3430 #undef C13
   3431 #undef C14
   3432 #undef C15
   3433 
   3434 #define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
   3435 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
   3436 
   3437 static bfd_boolean
   3438 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
   3439 	      const aarch64_insn insn)
   3440 {
   3441   int t  = BITS (insn, 4, 0);
   3442   int n  = BITS (insn, 9, 5);
   3443   int t2 = BITS (insn, 14, 10);
   3444 
   3445   if (BIT (insn, 23))
   3446     {
   3447       /* Write back enabled.  */
   3448       if ((t == n || t2 == n) && n != 31)
   3449 	return FALSE;
   3450     }
   3451 
   3452   if (BIT (insn, 22))
   3453     {
   3454       /* Load */
   3455       if (t == t2)
   3456 	return FALSE;
   3457     }
   3458 
   3459   return TRUE;
   3460 }
   3461 
   3462 /* Include the opcode description table as well as the operand description
   3463    table.  */
   3464 #define VERIFIER(x) verify_##x
   3465 #include "aarch64-tbl.h"
   3466