Home | History | Annotate | Download | only in opcodes
      1 /* aarch64-opc.c -- AArch64 opcode support.
      2    Copyright (C) 2009-2014 Free Software Foundation, Inc.
      3    Contributed by ARM Ltd.
      4 
      5    This file is part of the GNU opcodes library.
      6 
      7    This library is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3, or (at your option)
     10    any later version.
     11 
     12    It is distributed in the hope that it will be useful, but WITHOUT
     13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
     15    License for more details.
     16 
     17    You should have received a copy of the GNU General Public License
     18    along with this program; see the file COPYING3. If not,
     19    see <http://www.gnu.org/licenses/>.  */
     20 
     21 #include "sysdep.h"
     22 #include <assert.h>
     23 #include <stdlib.h>
     24 #include <stdio.h>
     25 #include <stdint.h>
     26 #include <stdarg.h>
     27 #include <inttypes.h>
     28 
     29 #include "opintl.h"
     30 
     31 #include "aarch64-opc.h"
     32 
     33 #ifdef DEBUG_AARCH64
     34 int debug_dump = FALSE;
     35 #endif /* DEBUG_AARCH64 */
     36 
     37 /* Helper functions to determine which operand to be used to encode/decode
     38    the size:Q fields for AdvSIMD instructions.  */
     39 
     40 static inline bfd_boolean
     41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
     42 {
     43   return ((qualifier >= AARCH64_OPND_QLF_V_8B
     44 	  && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
     45 	  : FALSE);
     46 }
     47 
     48 static inline bfd_boolean
     49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
     50 {
     51   return ((qualifier >= AARCH64_OPND_QLF_S_B
     52 	  && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
     53 	  : FALSE);
     54 }
     55 
     56 enum data_pattern
     57 {
     58   DP_UNKNOWN,
     59   DP_VECTOR_3SAME,
     60   DP_VECTOR_LONG,
     61   DP_VECTOR_WIDE,
     62   DP_VECTOR_ACROSS_LANES,
     63 };
     64 
     65 static const char significant_operand_index [] =
     66 {
     67   0,	/* DP_UNKNOWN, by default using operand 0.  */
     68   0,	/* DP_VECTOR_3SAME */
     69   1,	/* DP_VECTOR_LONG */
     70   2,	/* DP_VECTOR_WIDE */
     71   1,	/* DP_VECTOR_ACROSS_LANES */
     72 };
     73 
     74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
     75    the data pattern.
     76    N.B. QUALIFIERS is a possible sequence of qualifiers each of which
     77    corresponds to one of a sequence of operands.  */
     78 
     79 static enum data_pattern
     80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
     81 {
     82   if (vector_qualifier_p (qualifiers[0]) == TRUE)
     83     {
     84       /* e.g. v.4s, v.4s, v.4s
     85 	   or v.4h, v.4h, v.h[3].  */
     86       if (qualifiers[0] == qualifiers[1]
     87 	  && vector_qualifier_p (qualifiers[2]) == TRUE
     88 	  && (aarch64_get_qualifier_esize (qualifiers[0])
     89 	      == aarch64_get_qualifier_esize (qualifiers[1]))
     90 	  && (aarch64_get_qualifier_esize (qualifiers[0])
     91 	      == aarch64_get_qualifier_esize (qualifiers[2])))
     92 	return DP_VECTOR_3SAME;
     93       /* e.g. v.8h, v.8b, v.8b.
     94            or v.4s, v.4h, v.h[2].
     95 	   or v.8h, v.16b.  */
     96       if (vector_qualifier_p (qualifiers[1]) == TRUE
     97 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
     98 	  && (aarch64_get_qualifier_esize (qualifiers[0])
     99 	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
    100 	return DP_VECTOR_LONG;
    101       /* e.g. v.8h, v.8h, v.8b.  */
    102       if (qualifiers[0] == qualifiers[1]
    103 	  && vector_qualifier_p (qualifiers[2]) == TRUE
    104 	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
    105 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    106 	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
    107 	  && (aarch64_get_qualifier_esize (qualifiers[0])
    108 	      == aarch64_get_qualifier_esize (qualifiers[1])))
    109 	return DP_VECTOR_WIDE;
    110     }
    111   else if (fp_qualifier_p (qualifiers[0]) == TRUE)
    112     {
    113       /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
    114       if (vector_qualifier_p (qualifiers[1]) == TRUE
    115 	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
    116 	return DP_VECTOR_ACROSS_LANES;
    117     }
    118 
    119   return DP_UNKNOWN;
    120 }
    121 
    122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
    123    the AdvSIMD instructions.  */
    124 /* N.B. it is possible to do some optimization that doesn't call
    125    get_data_pattern each time when we need to select an operand.  We can
    126    either buffer the caculated the result or statically generate the data,
    127    however, it is not obvious that the optimization will bring significant
    128    benefit.  */
    129 
    130 int
    131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
    132 {
    133   return
    134     significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
    135 }
    136 
    137 const aarch64_field fields[] =
    139 {
    140     {  0,  0 },	/* NIL.  */
    141     {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
    142     {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
    143     {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
    144     { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
    145     {  5, 19 },	/* imm19: e.g. in CBZ.  */
    146     {  5, 19 },	/* immhi: e.g. in ADRP.  */
    147     { 29,  2 },	/* immlo: e.g. in ADRP.  */
    148     { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
    149     { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
    150     { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
    151     { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
    152     {  0,  5 },	/* Rt: in load/store instructions.  */
    153     {  0,  5 },	/* Rd: in many integer instructions.  */
    154     {  5,  5 },	/* Rn: in many integer instructions.  */
    155     { 10,  5 },	/* Rt2: in load/store pair instructions.  */
    156     { 10,  5 },	/* Ra: in fp instructions.  */
    157     {  5,  3 },	/* op2: in the system instructions.  */
    158     {  8,  4 },	/* CRm: in the system instructions.  */
    159     { 12,  4 },	/* CRn: in the system instructions.  */
    160     { 16,  3 },	/* op1: in the system instructions.  */
    161     { 19,  2 },	/* op0: in the system instructions.  */
    162     { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
    163     { 12,  4 },	/* cond: condition flags as a source operand.  */
    164     { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
    165     { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
    166     { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
    167     { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
    168     { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
    169     { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
    170     { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
    171     { 12,  1 },	/* S: in load/store reg offset instructions.  */
    172     { 21,  2 },	/* hw: in move wide constant instructions.  */
    173     { 22,  2 },	/* opc: in load/store reg offset instructions.  */
    174     { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
    175     { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
    176     { 22,  2 },	/* type: floating point type field in fp data inst.  */
    177     { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
    178     { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
    179     { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
    180     { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
    181     { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
    182     { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
    183     { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
    184     { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
    185     {  5, 14 },	/* imm14: in test bit and branch instructions.  */
    186     {  5, 16 },	/* imm16: in exception instructions.  */
    187     {  0, 26 },	/* imm26: in unconditional branch instructions.  */
    188     { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
    189     { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
    190     { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
    191     { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
    192     { 22,  1 },	/* N: in logical (immediate) instructions.  */
    193     { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
    194     { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
    195     { 31,  1 },	/* sf: in integer data processing instructions.  */
    196     { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
    197     { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
    198     { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
    199     { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
    200     { 31,  1 },	/* b5: in the test bit and branch instructions.  */
    201     { 19,  5 },	/* b40: in the test bit and branch instructions.  */
    202     { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
    203 };
    204 
    205 enum aarch64_operand_class
    206 aarch64_get_operand_class (enum aarch64_opnd type)
    207 {
    208   return aarch64_operands[type].op_class;
    209 }
    210 
    211 const char *
    212 aarch64_get_operand_name (enum aarch64_opnd type)
    213 {
    214   return aarch64_operands[type].name;
    215 }
    216 
    217 /* Get operand description string.
    218    This is usually for the diagnosis purpose.  */
    219 const char *
    220 aarch64_get_operand_desc (enum aarch64_opnd type)
    221 {
    222   return aarch64_operands[type].desc;
    223 }
    224 
    225 /* Table of all conditional affixes.  */
    226 const aarch64_cond aarch64_conds[16] =
    227 {
    228   {{"eq"}, 0x0},
    229   {{"ne"}, 0x1},
    230   {{"cs", "hs"}, 0x2},
    231   {{"cc", "lo", "ul"}, 0x3},
    232   {{"mi"}, 0x4},
    233   {{"pl"}, 0x5},
    234   {{"vs"}, 0x6},
    235   {{"vc"}, 0x7},
    236   {{"hi"}, 0x8},
    237   {{"ls"}, 0x9},
    238   {{"ge"}, 0xa},
    239   {{"lt"}, 0xb},
    240   {{"gt"}, 0xc},
    241   {{"le"}, 0xd},
    242   {{"al"}, 0xe},
    243   {{"nv"}, 0xf},
    244 };
    245 
    246 const aarch64_cond *
    247 get_cond_from_value (aarch64_insn value)
    248 {
    249   assert (value < 16);
    250   return &aarch64_conds[(unsigned int) value];
    251 }
    252 
    253 const aarch64_cond *
    254 get_inverted_cond (const aarch64_cond *cond)
    255 {
    256   return &aarch64_conds[cond->value ^ 0x1];
    257 }
    258 
    259 /* Table describing the operand extension/shifting operators; indexed by
    260    enum aarch64_modifier_kind.
    261 
    262    The value column provides the most common values for encoding modifiers,
    263    which enables table-driven encoding/decoding for the modifiers.  */
    264 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
    265 {
    266     {"none", 0x0},
    267     {"msl",  0x0},
    268     {"ror",  0x3},
    269     {"asr",  0x2},
    270     {"lsr",  0x1},
    271     {"lsl",  0x0},
    272     {"uxtb", 0x0},
    273     {"uxth", 0x1},
    274     {"uxtw", 0x2},
    275     {"uxtx", 0x3},
    276     {"sxtb", 0x4},
    277     {"sxth", 0x5},
    278     {"sxtw", 0x6},
    279     {"sxtx", 0x7},
    280     {NULL, 0},
    281 };
    282 
    283 enum aarch64_modifier_kind
    284 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
    285 {
    286   return desc - aarch64_operand_modifiers;
    287 }
    288 
    289 aarch64_insn
    290 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
    291 {
    292   return aarch64_operand_modifiers[kind].value;
    293 }
    294 
    295 enum aarch64_modifier_kind
    296 aarch64_get_operand_modifier_from_value (aarch64_insn value,
    297 					 bfd_boolean extend_p)
    298 {
    299   if (extend_p == TRUE)
    300     return AARCH64_MOD_UXTB + value;
    301   else
    302     return AARCH64_MOD_LSL - value;
    303 }
    304 
    305 bfd_boolean
    306 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
    307 {
    308   return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
    309     ? TRUE : FALSE;
    310 }
    311 
    312 static inline bfd_boolean
    313 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
    314 {
    315   return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
    316     ? TRUE : FALSE;
    317 }
    318 
    319 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
    320 {
    321     { "#0x00", 0x0 },
    322     { "oshld", 0x1 },
    323     { "oshst", 0x2 },
    324     { "osh",   0x3 },
    325     { "#0x04", 0x4 },
    326     { "nshld", 0x5 },
    327     { "nshst", 0x6 },
    328     { "nsh",   0x7 },
    329     { "#0x08", 0x8 },
    330     { "ishld", 0x9 },
    331     { "ishst", 0xa },
    332     { "ish",   0xb },
    333     { "#0x0c", 0xc },
    334     { "ld",    0xd },
    335     { "st",    0xe },
    336     { "sy",    0xf },
    337 };
    338 
    339 /* op -> op:       load = 0 instruction = 1 store = 2
    340    l  -> level:    1-3
    341    t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
    342 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
    343 const struct aarch64_name_value_pair aarch64_prfops[32] =
    344 {
    345   { "pldl1keep", B(0, 1, 0) },
    346   { "pldl1strm", B(0, 1, 1) },
    347   { "pldl2keep", B(0, 2, 0) },
    348   { "pldl2strm", B(0, 2, 1) },
    349   { "pldl3keep", B(0, 3, 0) },
    350   { "pldl3strm", B(0, 3, 1) },
    351   { NULL, 0x06 },
    352   { NULL, 0x07 },
    353   { "plil1keep", B(1, 1, 0) },
    354   { "plil1strm", B(1, 1, 1) },
    355   { "plil2keep", B(1, 2, 0) },
    356   { "plil2strm", B(1, 2, 1) },
    357   { "plil3keep", B(1, 3, 0) },
    358   { "plil3strm", B(1, 3, 1) },
    359   { NULL, 0x0e },
    360   { NULL, 0x0f },
    361   { "pstl1keep", B(2, 1, 0) },
    362   { "pstl1strm", B(2, 1, 1) },
    363   { "pstl2keep", B(2, 2, 0) },
    364   { "pstl2strm", B(2, 2, 1) },
    365   { "pstl3keep", B(2, 3, 0) },
    366   { "pstl3strm", B(2, 3, 1) },
    367   { NULL, 0x16 },
    368   { NULL, 0x17 },
    369   { NULL, 0x18 },
    370   { NULL, 0x19 },
    371   { NULL, 0x1a },
    372   { NULL, 0x1b },
    373   { NULL, 0x1c },
    374   { NULL, 0x1d },
    375   { NULL, 0x1e },
    376   { NULL, 0x1f },
    377 };
    378 #undef B
    379 
    380 /* Utilities on value constraint.  */
    382 
    383 static inline int
    384 value_in_range_p (int64_t value, int low, int high)
    385 {
    386   return (value >= low && value <= high) ? 1 : 0;
    387 }
    388 
    389 static inline int
    390 value_aligned_p (int64_t value, int align)
    391 {
    392   return ((value & (align - 1)) == 0) ? 1 : 0;
    393 }
    394 
    395 /* A signed value fits in a field.  */
    396 static inline int
    397 value_fit_signed_field_p (int64_t value, unsigned width)
    398 {
    399   assert (width < 32);
    400   if (width < sizeof (value) * 8)
    401     {
    402       int64_t lim = (int64_t)1 << (width - 1);
    403       if (value >= -lim && value < lim)
    404 	return 1;
    405     }
    406   return 0;
    407 }
    408 
    409 /* An unsigned value fits in a field.  */
    410 static inline int
    411 value_fit_unsigned_field_p (int64_t value, unsigned width)
    412 {
    413   assert (width < 32);
    414   if (width < sizeof (value) * 8)
    415     {
    416       int64_t lim = (int64_t)1 << width;
    417       if (value >= 0 && value < lim)
    418 	return 1;
    419     }
    420   return 0;
    421 }
    422 
    423 /* Return 1 if OPERAND is SP or WSP.  */
    424 int
    425 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
    426 {
    427   return ((aarch64_get_operand_class (operand->type)
    428 	   == AARCH64_OPND_CLASS_INT_REG)
    429 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
    430 	  && operand->reg.regno == 31);
    431 }
    432 
    433 /* Return 1 if OPERAND is XZR or WZP.  */
    434 int
    435 aarch64_zero_register_p (const aarch64_opnd_info *operand)
    436 {
    437   return ((aarch64_get_operand_class (operand->type)
    438 	   == AARCH64_OPND_CLASS_INT_REG)
    439 	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
    440 	  && operand->reg.regno == 31);
    441 }
    442 
    443 /* Return true if the operand *OPERAND that has the operand code
    444    OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
    445    qualified by the qualifier TARGET.  */
    446 
    447 static inline int
    448 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
    449 			  aarch64_opnd_qualifier_t target)
    450 {
    451   switch (operand->qualifier)
    452     {
    453     case AARCH64_OPND_QLF_W:
    454       if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
    455 	return 1;
    456       break;
    457     case AARCH64_OPND_QLF_X:
    458       if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
    459 	return 1;
    460       break;
    461     case AARCH64_OPND_QLF_WSP:
    462       if (target == AARCH64_OPND_QLF_W
    463 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    464 	return 1;
    465       break;
    466     case AARCH64_OPND_QLF_SP:
    467       if (target == AARCH64_OPND_QLF_X
    468 	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
    469 	return 1;
    470       break;
    471     default:
    472       break;
    473     }
    474 
    475   return 0;
    476 }
    477 
    478 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
    479    for operand KNOWN_IDX, return the expected qualifier for operand IDX.
    480 
    481    Return NIL if more than one expected qualifiers are found.  */
    482 
    483 aarch64_opnd_qualifier_t
    484 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
    485 				int idx,
    486 				const aarch64_opnd_qualifier_t known_qlf,
    487 				int known_idx)
    488 {
    489   int i, saved_i;
    490 
    491   /* Special case.
    492 
    493      When the known qualifier is NIL, we have to assume that there is only
    494      one qualifier sequence in the *QSEQ_LIST and return the corresponding
    495      qualifier directly.  One scenario is that for instruction
    496 	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
    497      which has only one possible valid qualifier sequence
    498 	NIL, S_D
    499      the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
    500      determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
    501 
    502      Because the qualifier NIL has dual roles in the qualifier sequence:
    503      it can mean no qualifier for the operand, or the qualifer sequence is
    504      not in use (when all qualifiers in the sequence are NILs), we have to
    505      handle this special case here.  */
    506   if (known_qlf == AARCH64_OPND_NIL)
    507     {
    508       assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
    509       return qseq_list[0][idx];
    510     }
    511 
    512   for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
    513     {
    514       if (qseq_list[i][known_idx] == known_qlf)
    515 	{
    516 	  if (saved_i != -1)
    517 	    /* More than one sequences are found to have KNOWN_QLF at
    518 	       KNOWN_IDX.  */
    519 	    return AARCH64_OPND_NIL;
    520 	  saved_i = i;
    521 	}
    522     }
    523 
    524   return qseq_list[saved_i][idx];
    525 }
    526 
    527 enum operand_qualifier_kind
    528 {
    529   OQK_NIL,
    530   OQK_OPD_VARIANT,
    531   OQK_VALUE_IN_RANGE,
    532   OQK_MISC,
    533 };
    534 
    535 /* Operand qualifier description.  */
    536 struct operand_qualifier_data
    537 {
    538   /* The usage of the three data fields depends on the qualifier kind.  */
    539   int data0;
    540   int data1;
    541   int data2;
    542   /* Description.  */
    543   const char *desc;
    544   /* Kind.  */
    545   enum operand_qualifier_kind kind;
    546 };
    547 
    548 /* Indexed by the operand qualifier enumerators.  */
    549 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
    550 {
    551   {0, 0, 0, "NIL", OQK_NIL},
    552 
    553   /* Operand variant qualifiers.
    554      First 3 fields:
    555      element size, number of elements and common value for encoding.  */
    556 
    557   {4, 1, 0x0, "w", OQK_OPD_VARIANT},
    558   {8, 1, 0x1, "x", OQK_OPD_VARIANT},
    559   {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
    560   {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
    561 
    562   {1, 1, 0x0, "b", OQK_OPD_VARIANT},
    563   {2, 1, 0x1, "h", OQK_OPD_VARIANT},
    564   {4, 1, 0x2, "s", OQK_OPD_VARIANT},
    565   {8, 1, 0x3, "d", OQK_OPD_VARIANT},
    566   {16, 1, 0x4, "q", OQK_OPD_VARIANT},
    567 
    568   {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
    569   {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
    570   {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
    571   {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
    572   {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
    573   {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
    574   {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
    575   {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
    576   {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
    577 
    578   /* Qualifiers constraining the value range.
    579      First 3 fields:
    580      Lower bound, higher bound, unused.  */
    581 
    582   {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
    583   {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
    584   {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
    585   {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
    586   {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
    587   {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
    588 
    589   /* Qualifiers for miscellaneous purpose.
    590      First 3 fields:
    591      unused, unused and unused.  */
    592 
    593   {0, 0, 0, "lsl", 0},
    594   {0, 0, 0, "msl", 0},
    595 
    596   {0, 0, 0, "retrieving", 0},
    597 };
    598 
    599 static inline bfd_boolean
    600 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
    601 {
    602   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
    603     ? TRUE : FALSE;
    604 }
    605 
    606 static inline bfd_boolean
    607 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
    608 {
    609   return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
    610     ? TRUE : FALSE;
    611 }
    612 
    613 const char*
    614 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
    615 {
    616   return aarch64_opnd_qualifiers[qualifier].desc;
    617 }
    618 
    619 /* Given an operand qualifier, return the expected data element size
    620    of a qualified operand.  */
    621 unsigned char
    622 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
    623 {
    624   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    625   return aarch64_opnd_qualifiers[qualifier].data0;
    626 }
    627 
    628 unsigned char
    629 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
    630 {
    631   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    632   return aarch64_opnd_qualifiers[qualifier].data1;
    633 }
    634 
    635 aarch64_insn
    636 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
    637 {
    638   assert (operand_variant_qualifier_p (qualifier) == TRUE);
    639   return aarch64_opnd_qualifiers[qualifier].data2;
    640 }
    641 
    642 static int
    643 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
    644 {
    645   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
    646   return aarch64_opnd_qualifiers[qualifier].data0;
    647 }
    648 
    649 static int
    650 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
    651 {
    652   assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
    653   return aarch64_opnd_qualifiers[qualifier].data1;
    654 }
    655 
    656 #ifdef DEBUG_AARCH64
    657 void
    658 aarch64_verbose (const char *str, ...)
    659 {
    660   va_list ap;
    661   va_start (ap, str);
    662   printf ("#### ");
    663   vprintf (str, ap);
    664   printf ("\n");
    665   va_end (ap);
    666 }
    667 
    668 static inline void
    669 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
    670 {
    671   int i;
    672   printf ("#### \t");
    673   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
    674     printf ("%s,", aarch64_get_qualifier_name (*qualifier));
    675   printf ("\n");
    676 }
    677 
    678 static void
    679 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
    680 		       const aarch64_opnd_qualifier_t *qualifier)
    681 {
    682   int i;
    683   aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
    684 
    685   aarch64_verbose ("dump_match_qualifiers:");
    686   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
    687     curr[i] = opnd[i].qualifier;
    688   dump_qualifier_sequence (curr);
    689   aarch64_verbose ("against");
    690   dump_qualifier_sequence (qualifier);
    691 }
    692 #endif /* DEBUG_AARCH64 */
    693 
    694 /* TODO improve this, we can have an extra field at the runtime to
    695    store the number of operands rather than calculating it every time.  */
    696 
    697 int
    698 aarch64_num_of_operands (const aarch64_opcode *opcode)
    699 {
    700   int i = 0;
    701   const enum aarch64_opnd *opnds = opcode->operands;
    702   while (opnds[i++] != AARCH64_OPND_NIL)
    703     ;
    704   --i;
    705   assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
    706   return i;
    707 }
    708 
    709 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
    710    If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
    711 
    712    N.B. on the entry, it is very likely that only some operands in *INST
    713    have had their qualifiers been established.
    714 
    715    If STOP_AT is not -1, the function will only try to match
    716    the qualifier sequence for operands before and including the operand
    717    of index STOP_AT; and on success *RET will only be filled with the first
    718    (STOP_AT+1) qualifiers.
    719 
    720    A couple examples of the matching algorithm:
    721 
    722    X,W,NIL should match
    723    X,W,NIL
    724 
    725    NIL,NIL should match
    726    X  ,NIL
    727 
    728    Apart from serving the main encoding routine, this can also be called
    729    during or after the operand decoding.  */
    730 
    731 int
    732 aarch64_find_best_match (const aarch64_inst *inst,
    733 			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
    734 			 int stop_at, aarch64_opnd_qualifier_t *ret)
    735 {
    736   int found = 0;
    737   int i, num_opnds;
    738   const aarch64_opnd_qualifier_t *qualifiers;
    739 
    740   num_opnds = aarch64_num_of_operands (inst->opcode);
    741   if (num_opnds == 0)
    742     {
    743       DEBUG_TRACE ("SUCCEED: no operand");
    744       return 1;
    745     }
    746 
    747   if (stop_at < 0 || stop_at >= num_opnds)
    748     stop_at = num_opnds - 1;
    749 
    750   /* For each pattern.  */
    751   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
    752     {
    753       int j;
    754       qualifiers = *qualifiers_list;
    755 
    756       /* Start as positive.  */
    757       found = 1;
    758 
    759       DEBUG_TRACE ("%d", i);
    760 #ifdef DEBUG_AARCH64
    761       if (debug_dump)
    762 	dump_match_qualifiers (inst->operands, qualifiers);
    763 #endif
    764 
    765       /* Most opcodes has much fewer patterns in the list.
    766 	 First NIL qualifier indicates the end in the list.   */
    767       if (empty_qualifier_sequence_p (qualifiers) == TRUE)
    768 	{
    769 	  DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
    770 	  if (i)
    771 	    found = 0;
    772 	  break;
    773 	}
    774 
    775       for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
    776 	{
    777 	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
    778 	    {
    779 	      /* Either the operand does not have qualifier, or the qualifier
    780 		 for the operand needs to be deduced from the qualifier
    781 		 sequence.
    782 		 In the latter case, any constraint checking related with
    783 		 the obtained qualifier should be done later in
    784 		 operand_general_constraint_met_p.  */
    785 	      continue;
    786 	    }
    787 	  else if (*qualifiers != inst->operands[j].qualifier)
    788 	    {
    789 	      /* Unless the target qualifier can also qualify the operand
    790 		 (which has already had a non-nil qualifier), non-equal
    791 		 qualifiers are generally un-matched.  */
    792 	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
    793 		continue;
    794 	      else
    795 		{
    796 		  found = 0;
    797 		  break;
    798 		}
    799 	    }
    800 	  else
    801 	    continue;	/* Equal qualifiers are certainly matched.  */
    802 	}
    803 
    804       /* Qualifiers established.  */
    805       if (found == 1)
    806 	break;
    807     }
    808 
    809   if (found == 1)
    810     {
    811       /* Fill the result in *RET.  */
    812       int j;
    813       qualifiers = *qualifiers_list;
    814 
    815       DEBUG_TRACE ("complete qualifiers using list %d", i);
    816 #ifdef DEBUG_AARCH64
    817       if (debug_dump)
    818 	dump_qualifier_sequence (qualifiers);
    819 #endif
    820 
    821       for (j = 0; j <= stop_at; ++j, ++qualifiers)
    822 	ret[j] = *qualifiers;
    823       for (; j < AARCH64_MAX_OPND_NUM; ++j)
    824 	ret[j] = AARCH64_OPND_QLF_NIL;
    825 
    826       DEBUG_TRACE ("SUCCESS");
    827       return 1;
    828     }
    829 
    830   DEBUG_TRACE ("FAIL");
    831   return 0;
    832 }
    833 
    834 /* Operand qualifier matching and resolving.
    835 
    836    Return 1 if the operand qualifier(s) in *INST match one of the qualifier
    837    sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
    838 
    839    if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
    840    succeeds.  */
    841 
    842 static int
    843 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
    844 {
    845   int i;
    846   aarch64_opnd_qualifier_seq_t qualifiers;
    847 
    848   if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
    849 			       qualifiers))
    850     {
    851       DEBUG_TRACE ("matching FAIL");
    852       return 0;
    853     }
    854 
    855   /* Update the qualifiers.  */
    856   if (update_p == TRUE)
    857     for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
    858       {
    859 	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
    860 	  break;
    861 	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
    862 			"update %s with %s for operand %d",
    863 			aarch64_get_qualifier_name (inst->operands[i].qualifier),
    864 			aarch64_get_qualifier_name (qualifiers[i]), i);
    865 	inst->operands[i].qualifier = qualifiers[i];
    866       }
    867 
    868   DEBUG_TRACE ("matching SUCCESS");
    869   return 1;
    870 }
    871 
    872 /* Return TRUE if VALUE is a wide constant that can be moved into a general
    873    register by MOVZ.
    874 
    875    IS32 indicates whether value is a 32-bit immediate or not.
    876    If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
    877    amount will be returned in *SHIFT_AMOUNT.  */
    878 
    879 bfd_boolean
    880 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
    881 {
    882   int amount;
    883 
    884   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
    885 
    886   if (is32)
    887     {
    888       /* Allow all zeros or all ones in top 32-bits, so that
    889 	 32-bit constant expressions like ~0x80000000 are
    890 	 permitted.  */
    891       uint64_t ext = value;
    892       if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
    893 	/* Immediate out of range.  */
    894 	return FALSE;
    895       value &= (int64_t) 0xffffffff;
    896     }
    897 
    898   /* first, try movz then movn */
    899   amount = -1;
    900   if ((value & ((int64_t) 0xffff << 0)) == value)
    901     amount = 0;
    902   else if ((value & ((int64_t) 0xffff << 16)) == value)
    903     amount = 16;
    904   else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
    905     amount = 32;
    906   else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
    907     amount = 48;
    908 
    909   if (amount == -1)
    910     {
    911       DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
    912       return FALSE;
    913     }
    914 
    915   if (shift_amount != NULL)
    916     *shift_amount = amount;
    917 
    918   DEBUG_TRACE ("exit TRUE with amount %d", amount);
    919 
    920   return TRUE;
    921 }
    922 
    923 /* Build the accepted values for immediate logical SIMD instructions.
    924 
    925    The standard encodings of the immediate value are:
    926      N      imms     immr         SIMD size  R             S
    927      1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
    928      0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
    929      0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
    930      0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
    931      0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
    932      0      11110s   00000r       2       UInt(r)       UInt(s)
    933    where all-ones value of S is reserved.
    934 
    935    Let's call E the SIMD size.
    936 
    937    The immediate value is: S+1 bits '1' rotated to the right by R.
    938 
    939    The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
    940    (remember S != E - 1).  */
    941 
    942 #define TOTAL_IMM_NB  5334
    943 
    944 typedef struct
    945 {
    946   uint64_t imm;
    947   aarch64_insn encoding;
    948 } simd_imm_encoding;
    949 
    950 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
    951 
    952 static int
    953 simd_imm_encoding_cmp(const void *i1, const void *i2)
    954 {
    955   const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
    956   const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
    957 
    958   if (imm1->imm < imm2->imm)
    959     return -1;
    960   if (imm1->imm > imm2->imm)
    961     return +1;
    962   return 0;
    963 }
    964 
    965 /* immediate bitfield standard encoding
    966    imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
    967    1         ssssss     rrrrrr      64        rrrrrr ssssss
    968    0         0sssss     0rrrrr      32        rrrrr  sssss
    969    0         10ssss     00rrrr      16        rrrr   ssss
    970    0         110sss     000rrr      8         rrr    sss
    971    0         1110ss     0000rr      4         rr     ss
    972    0         11110s     00000r      2         r      s  */
    973 static inline int
    974 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
    975 {
    976   return (is64 << 12) | (r << 6) | s;
    977 }
    978 
    979 static void
    980 build_immediate_table (void)
    981 {
    982   uint32_t log_e, e, s, r, s_mask;
    983   uint64_t mask, imm;
    984   int nb_imms;
    985   int is64;
    986 
    987   nb_imms = 0;
    988   for (log_e = 1; log_e <= 6; log_e++)
    989     {
    990       /* Get element size.  */
    991       e = 1u << log_e;
    992       if (log_e == 6)
    993 	{
    994 	  is64 = 1;
    995 	  mask = 0xffffffffffffffffull;
    996 	  s_mask = 0;
    997 	}
    998       else
    999 	{
   1000 	  is64 = 0;
   1001 	  mask = (1ull << e) - 1;
   1002 	  /* log_e  s_mask
   1003 	     1     ((1 << 4) - 1) << 2 = 111100
   1004 	     2     ((1 << 3) - 1) << 3 = 111000
   1005 	     3     ((1 << 2) - 1) << 4 = 110000
   1006 	     4     ((1 << 1) - 1) << 5 = 100000
   1007 	     5     ((1 << 0) - 1) << 6 = 000000  */
   1008 	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
   1009 	}
   1010       for (s = 0; s < e - 1; s++)
   1011 	for (r = 0; r < e; r++)
   1012 	  {
   1013 	    /* s+1 consecutive bits to 1 (s < 63) */
   1014 	    imm = (1ull << (s + 1)) - 1;
   1015 	    /* rotate right by r */
   1016 	    if (r != 0)
   1017 	      imm = (imm >> r) | ((imm << (e - r)) & mask);
   1018 	    /* replicate the constant depending on SIMD size */
   1019 	    switch (log_e)
   1020 	      {
   1021 	      case 1: imm = (imm <<  2) | imm;
   1022 	      case 2: imm = (imm <<  4) | imm;
   1023 	      case 3: imm = (imm <<  8) | imm;
   1024 	      case 4: imm = (imm << 16) | imm;
   1025 	      case 5: imm = (imm << 32) | imm;
   1026 	      case 6: break;
   1027 	      default: abort ();
   1028 	      }
   1029 	    simd_immediates[nb_imms].imm = imm;
   1030 	    simd_immediates[nb_imms].encoding =
   1031 	      encode_immediate_bitfield(is64, s | s_mask, r);
   1032 	    nb_imms++;
   1033 	  }
   1034     }
   1035   assert (nb_imms == TOTAL_IMM_NB);
   1036   qsort(simd_immediates, nb_imms,
   1037 	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1038 }
   1039 
   1040 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
   1041    be accepted by logical (immediate) instructions
   1042    e.g. ORR <Xd|SP>, <Xn>, #<imm>.
   1043 
   1044    IS32 indicates whether or not VALUE is a 32-bit immediate.
   1045    If ENCODING is not NULL, on the return of TRUE, the standard encoding for
   1046    VALUE will be returned in *ENCODING.  */
   1047 
   1048 bfd_boolean
   1049 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
   1050 {
   1051   simd_imm_encoding imm_enc;
   1052   const simd_imm_encoding *imm_encoding;
   1053   static bfd_boolean initialized = FALSE;
   1054 
   1055   DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
   1056 	       value, is32);
   1057 
   1058   if (initialized == FALSE)
   1059     {
   1060       build_immediate_table ();
   1061       initialized = TRUE;
   1062     }
   1063 
   1064   if (is32)
   1065     {
   1066       /* Allow all zeros or all ones in top 32-bits, so that
   1067 	 constant expressions like ~1 are permitted.  */
   1068       if (value >> 32 != 0 && value >> 32 != 0xffffffff)
   1069 	return FALSE;
   1070 
   1071       /* Replicate the 32 lower bits to the 32 upper bits.  */
   1072       value &= 0xffffffff;
   1073       value |= value << 32;
   1074     }
   1075 
   1076   imm_enc.imm = value;
   1077   imm_encoding = (const simd_imm_encoding *)
   1078     bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
   1079             sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
   1080   if (imm_encoding == NULL)
   1081     {
   1082       DEBUG_TRACE ("exit with FALSE");
   1083       return FALSE;
   1084     }
   1085   if (encoding != NULL)
   1086     *encoding = imm_encoding->encoding;
   1087   DEBUG_TRACE ("exit with TRUE");
   1088   return TRUE;
   1089 }
   1090 
   1091 /* If 64-bit immediate IMM is in the format of
   1092    "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
   1093    where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
   1094    of value "abcdefgh".  Otherwise return -1.  */
   1095 int
   1096 aarch64_shrink_expanded_imm8 (uint64_t imm)
   1097 {
   1098   int i, ret;
   1099   uint32_t byte;
   1100 
   1101   ret = 0;
   1102   for (i = 0; i < 8; i++)
   1103     {
   1104       byte = (imm >> (8 * i)) & 0xff;
   1105       if (byte == 0xff)
   1106 	ret |= 1 << i;
   1107       else if (byte != 0x00)
   1108 	return -1;
   1109     }
   1110   return ret;
   1111 }
   1112 
   1113 /* Utility inline functions for operand_general_constraint_met_p.  */
   1114 
   1115 static inline void
   1116 set_error (aarch64_operand_error *mismatch_detail,
   1117 	   enum aarch64_operand_error_kind kind, int idx,
   1118 	   const char* error)
   1119 {
   1120   if (mismatch_detail == NULL)
   1121     return;
   1122   mismatch_detail->kind = kind;
   1123   mismatch_detail->index = idx;
   1124   mismatch_detail->error = error;
   1125 }
   1126 
   1127 static inline void
   1128 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
   1129 		  const char* error)
   1130 {
   1131   if (mismatch_detail == NULL)
   1132     return;
   1133   set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
   1134 }
   1135 
   1136 static inline void
   1137 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1138 			int idx, int lower_bound, int upper_bound,
   1139 			const char* error)
   1140 {
   1141   if (mismatch_detail == NULL)
   1142     return;
   1143   set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
   1144   mismatch_detail->data[0] = lower_bound;
   1145   mismatch_detail->data[1] = upper_bound;
   1146 }
   1147 
   1148 static inline void
   1149 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1150 			    int idx, int lower_bound, int upper_bound)
   1151 {
   1152   if (mismatch_detail == NULL)
   1153     return;
   1154   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1155 			  _("immediate value"));
   1156 }
   1157 
   1158 static inline void
   1159 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1160 			       int idx, int lower_bound, int upper_bound)
   1161 {
   1162   if (mismatch_detail == NULL)
   1163     return;
   1164   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1165 			  _("immediate offset"));
   1166 }
   1167 
   1168 static inline void
   1169 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1170 			      int idx, int lower_bound, int upper_bound)
   1171 {
   1172   if (mismatch_detail == NULL)
   1173     return;
   1174   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1175 			  _("register number"));
   1176 }
   1177 
   1178 static inline void
   1179 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1180 				 int idx, int lower_bound, int upper_bound)
   1181 {
   1182   if (mismatch_detail == NULL)
   1183     return;
   1184   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1185 			  _("register element index"));
   1186 }
   1187 
   1188 static inline void
   1189 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
   1190 				   int idx, int lower_bound, int upper_bound)
   1191 {
   1192   if (mismatch_detail == NULL)
   1193     return;
   1194   set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
   1195 			  _("shift amount"));
   1196 }
   1197 
   1198 static inline void
   1199 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
   1200 		     int alignment)
   1201 {
   1202   if (mismatch_detail == NULL)
   1203     return;
   1204   set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
   1205   mismatch_detail->data[0] = alignment;
   1206 }
   1207 
   1208 static inline void
   1209 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
   1210 		    int expected_num)
   1211 {
   1212   if (mismatch_detail == NULL)
   1213     return;
   1214   set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
   1215   mismatch_detail->data[0] = expected_num;
   1216 }
   1217 
   1218 static inline void
   1219 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
   1220 		 const char* error)
   1221 {
   1222   if (mismatch_detail == NULL)
   1223     return;
   1224   set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
   1225 }
   1226 
   1227 /* General constraint checking based on operand code.
   1228 
   1229    Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
   1230    as the IDXth operand of opcode OPCODE.  Otherwise return 0.
   1231 
   1232    This function has to be called after the qualifiers for all operands
   1233    have been resolved.
   1234 
   1235    Mismatching error message is returned in *MISMATCH_DETAIL upon request,
   1236    i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
   1237    of error message during the disassembling where error message is not
   1238    wanted.  We avoid the dynamic construction of strings of error messages
   1239    here (i.e. in libopcodes), as it is costly and complicated; instead, we
   1240    use a combination of error code, static string and some integer data to
   1241    represent an error.  */
   1242 
   1243 static int
   1244 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
   1245 				  enum aarch64_opnd type,
   1246 				  const aarch64_opcode *opcode,
   1247 				  aarch64_operand_error *mismatch_detail)
   1248 {
   1249   unsigned num;
   1250   unsigned char size;
   1251   int64_t imm;
   1252   const aarch64_opnd_info *opnd = opnds + idx;
   1253   aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
   1254 
   1255   assert (opcode->operands[idx] == opnd->type && opnd->type == type);
   1256 
   1257   switch (aarch64_operands[type].op_class)
   1258     {
   1259     case AARCH64_OPND_CLASS_INT_REG:
   1260       /* Check pair reg constraints for cas* instructions.  */
   1261       if (type == AARCH64_OPND_PAIRREG)
   1262 	{
   1263 	  assert (idx == 1 || idx == 3);
   1264 	  if (opnds[idx - 1].reg.regno % 2 != 0)
   1265 	    {
   1266 	      set_syntax_error (mismatch_detail, idx - 1,
   1267 				_("reg pair must start from even reg"));
   1268 	      return 0;
   1269 	    }
   1270 	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
   1271 	    {
   1272 	      set_syntax_error (mismatch_detail, idx,
   1273 				_("reg pair must be contiguous"));
   1274 	      return 0;
   1275 	    }
   1276 	  break;
   1277 	}
   1278 
   1279       /* <Xt> may be optional in some IC and TLBI instructions.  */
   1280       if (type == AARCH64_OPND_Rt_SYS)
   1281 	{
   1282 	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
   1283 			       == AARCH64_OPND_CLASS_SYSTEM));
   1284 	  if (opnds[1].present && !opnds[0].sysins_op->has_xt)
   1285 	    {
   1286 	      set_other_error (mismatch_detail, idx, _("extraneous register"));
   1287 	      return 0;
   1288 	    }
   1289 	  if (!opnds[1].present && opnds[0].sysins_op->has_xt)
   1290 	    {
   1291 	      set_other_error (mismatch_detail, idx, _("missing register"));
   1292 	      return 0;
   1293 	    }
   1294 	}
   1295       switch (qualifier)
   1296 	{
   1297 	case AARCH64_OPND_QLF_WSP:
   1298 	case AARCH64_OPND_QLF_SP:
   1299 	  if (!aarch64_stack_pointer_p (opnd))
   1300 	    {
   1301 	      set_other_error (mismatch_detail, idx,
   1302 			       _("stack pointer register expected"));
   1303 	      return 0;
   1304 	    }
   1305 	  break;
   1306 	default:
   1307 	  break;
   1308 	}
   1309       break;
   1310 
   1311     case AARCH64_OPND_CLASS_COND:
   1312       if (type == AARCH64_OPND_COND1
   1313 	  && (opnds[idx].cond->value & 0xe) == 0xe)
   1314 	{
   1315 	  /* Not allow AL or NV.  */
   1316 	  set_syntax_error (mismatch_detail, idx, NULL);
   1317 	}
   1318       break;
   1319 
   1320     case AARCH64_OPND_CLASS_ADDRESS:
   1321       /* Check writeback.  */
   1322       switch (opcode->iclass)
   1323 	{
   1324 	case ldst_pos:
   1325 	case ldst_unscaled:
   1326 	case ldstnapair_offs:
   1327 	case ldstpair_off:
   1328 	case ldst_unpriv:
   1329 	  if (opnd->addr.writeback == 1)
   1330 	    {
   1331 	      set_syntax_error (mismatch_detail, idx,
   1332 				_("unexpected address writeback"));
   1333 	      return 0;
   1334 	    }
   1335 	  break;
   1336 	case ldst_imm9:
   1337 	case ldstpair_indexed:
   1338 	case asisdlsep:
   1339 	case asisdlsop:
   1340 	  if (opnd->addr.writeback == 0)
   1341 	    {
   1342 	      set_syntax_error (mismatch_detail, idx,
   1343 				_("address writeback expected"));
   1344 	      return 0;
   1345 	    }
   1346 	  break;
   1347 	default:
   1348 	  assert (opnd->addr.writeback == 0);
   1349 	  break;
   1350 	}
   1351       switch (type)
   1352 	{
   1353 	case AARCH64_OPND_ADDR_SIMM7:
   1354 	  /* Scaled signed 7 bits immediate offset.  */
   1355 	  /* Get the size of the data element that is accessed, which may be
   1356 	     different from that of the source register size,
   1357 	     e.g. in strb/ldrb.  */
   1358 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1359 	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
   1360 	    {
   1361 	      set_offset_out_of_range_error (mismatch_detail, idx,
   1362 					     -64 * size, 63 * size);
   1363 	      return 0;
   1364 	    }
   1365 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   1366 	    {
   1367 	      set_unaligned_error (mismatch_detail, idx, size);
   1368 	      return 0;
   1369 	    }
   1370 	  break;
   1371 	case AARCH64_OPND_ADDR_SIMM9:
   1372 	  /* Unscaled signed 9 bits immediate offset.  */
   1373 	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
   1374 	    {
   1375 	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
   1376 	      return 0;
   1377 	    }
   1378 	  break;
   1379 
   1380 	case AARCH64_OPND_ADDR_SIMM9_2:
   1381 	  /* Unscaled signed 9 bits immediate offset, which has to be negative
   1382 	     or unaligned.  */
   1383 	  size = aarch64_get_qualifier_esize (qualifier);
   1384 	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
   1385 	       && !value_aligned_p (opnd->addr.offset.imm, size))
   1386 	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
   1387 	    return 1;
   1388 	  set_other_error (mismatch_detail, idx,
   1389 			   _("negative or unaligned offset expected"));
   1390 	  return 0;
   1391 
   1392 	case AARCH64_OPND_SIMD_ADDR_POST:
   1393 	  /* AdvSIMD load/store multiple structures, post-index.  */
   1394 	  assert (idx == 1);
   1395 	  if (opnd->addr.offset.is_reg)
   1396 	    {
   1397 	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
   1398 		return 1;
   1399 	      else
   1400 		{
   1401 		  set_other_error (mismatch_detail, idx,
   1402 				   _("invalid register offset"));
   1403 		  return 0;
   1404 		}
   1405 	    }
   1406 	  else
   1407 	    {
   1408 	      const aarch64_opnd_info *prev = &opnds[idx-1];
   1409 	      unsigned num_bytes; /* total number of bytes transferred.  */
   1410 	      /* The opcode dependent area stores the number of elements in
   1411 		 each structure to be loaded/stored.  */
   1412 	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
   1413 	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
   1414 		/* Special handling of loading single structure to all lane.  */
   1415 		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
   1416 		  * aarch64_get_qualifier_esize (prev->qualifier);
   1417 	      else
   1418 		num_bytes = prev->reglist.num_regs
   1419 		  * aarch64_get_qualifier_esize (prev->qualifier)
   1420 		  * aarch64_get_qualifier_nelem (prev->qualifier);
   1421 	      if ((int) num_bytes != opnd->addr.offset.imm)
   1422 		{
   1423 		  set_other_error (mismatch_detail, idx,
   1424 				   _("invalid post-increment amount"));
   1425 		  return 0;
   1426 		}
   1427 	    }
   1428 	  break;
   1429 
   1430 	case AARCH64_OPND_ADDR_REGOFF:
   1431 	  /* Get the size of the data element that is accessed, which may be
   1432 	     different from that of the source register size,
   1433 	     e.g. in strb/ldrb.  */
   1434 	  size = aarch64_get_qualifier_esize (opnd->qualifier);
   1435 	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
   1436 	  if (opnd->shifter.amount != 0
   1437 	      && opnd->shifter.amount != (int)get_logsz (size))
   1438 	    {
   1439 	      set_other_error (mismatch_detail, idx,
   1440 			       _("invalid shift amount"));
   1441 	      return 0;
   1442 	    }
   1443 	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
   1444 	     operators.  */
   1445 	  switch (opnd->shifter.kind)
   1446 	    {
   1447 	    case AARCH64_MOD_UXTW:
   1448 	    case AARCH64_MOD_LSL:
   1449 	    case AARCH64_MOD_SXTW:
   1450 	    case AARCH64_MOD_SXTX: break;
   1451 	    default:
   1452 	      set_other_error (mismatch_detail, idx,
   1453 			       _("invalid extend/shift operator"));
   1454 	      return 0;
   1455 	    }
   1456 	  break;
   1457 
   1458 	case AARCH64_OPND_ADDR_UIMM12:
   1459 	  imm = opnd->addr.offset.imm;
   1460 	  /* Get the size of the data element that is accessed, which may be
   1461 	     different from that of the source register size,
   1462 	     e.g. in strb/ldrb.  */
   1463 	  size = aarch64_get_qualifier_esize (qualifier);
   1464 	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
   1465 	    {
   1466 	      set_offset_out_of_range_error (mismatch_detail, idx,
   1467 					     0, 4095 * size);
   1468 	      return 0;
   1469 	    }
   1470 	  if (!value_aligned_p (opnd->addr.offset.imm, size))
   1471 	    {
   1472 	      set_unaligned_error (mismatch_detail, idx, size);
   1473 	      return 0;
   1474 	    }
   1475 	  break;
   1476 
   1477 	case AARCH64_OPND_ADDR_PCREL14:
   1478 	case AARCH64_OPND_ADDR_PCREL19:
   1479 	case AARCH64_OPND_ADDR_PCREL21:
   1480 	case AARCH64_OPND_ADDR_PCREL26:
   1481 	  imm = opnd->imm.value;
   1482 	  if (operand_need_shift_by_two (get_operand_from_code (type)))
   1483 	    {
   1484 	      /* The offset value in a PC-relative branch instruction is alway
   1485 		 4-byte aligned and is encoded without the lowest 2 bits.  */
   1486 	      if (!value_aligned_p (imm, 4))
   1487 		{
   1488 		  set_unaligned_error (mismatch_detail, idx, 4);
   1489 		  return 0;
   1490 		}
   1491 	      /* Right shift by 2 so that we can carry out the following check
   1492 		 canonically.  */
   1493 	      imm >>= 2;
   1494 	    }
   1495 	  size = get_operand_fields_width (get_operand_from_code (type));
   1496 	  if (!value_fit_signed_field_p (imm, size))
   1497 	    {
   1498 	      set_other_error (mismatch_detail, idx,
   1499 			       _("immediate out of range"));
   1500 	      return 0;
   1501 	    }
   1502 	  break;
   1503 
   1504 	default:
   1505 	  break;
   1506 	}
   1507       break;
   1508 
   1509     case AARCH64_OPND_CLASS_SIMD_REGLIST:
   1510       /* The opcode dependent area stores the number of elements in
   1511 	 each structure to be loaded/stored.  */
   1512       num = get_opcode_dependent_value (opcode);
   1513       switch (type)
   1514 	{
   1515 	case AARCH64_OPND_LVt:
   1516 	  assert (num >= 1 && num <= 4);
   1517 	  /* Unless LD1/ST1, the number of registers should be equal to that
   1518 	     of the structure elements.  */
   1519 	  if (num != 1 && opnd->reglist.num_regs != num)
   1520 	    {
   1521 	      set_reg_list_error (mismatch_detail, idx, num);
   1522 	      return 0;
   1523 	    }
   1524 	  break;
   1525 	case AARCH64_OPND_LVt_AL:
   1526 	case AARCH64_OPND_LEt:
   1527 	  assert (num >= 1 && num <= 4);
   1528 	  /* The number of registers should be equal to that of the structure
   1529 	     elements.  */
   1530 	  if (opnd->reglist.num_regs != num)
   1531 	    {
   1532 	      set_reg_list_error (mismatch_detail, idx, num);
   1533 	      return 0;
   1534 	    }
   1535 	  break;
   1536 	default:
   1537 	  break;
   1538 	}
   1539       break;
   1540 
   1541     case AARCH64_OPND_CLASS_IMMEDIATE:
   1542       /* Constraint check on immediate operand.  */
   1543       imm = opnd->imm.value;
   1544       /* E.g. imm_0_31 constrains value to be 0..31.  */
   1545       if (qualifier_value_in_range_constraint_p (qualifier)
   1546 	  && !value_in_range_p (imm, get_lower_bound (qualifier),
   1547 				get_upper_bound (qualifier)))
   1548 	{
   1549 	  set_imm_out_of_range_error (mismatch_detail, idx,
   1550 				      get_lower_bound (qualifier),
   1551 				      get_upper_bound (qualifier));
   1552 	  return 0;
   1553 	}
   1554 
   1555       switch (type)
   1556 	{
   1557 	case AARCH64_OPND_AIMM:
   1558 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   1559 	    {
   1560 	      set_other_error (mismatch_detail, idx,
   1561 			       _("invalid shift operator"));
   1562 	      return 0;
   1563 	    }
   1564 	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
   1565 	    {
   1566 	      set_other_error (mismatch_detail, idx,
   1567 			       _("shift amount expected to be 0 or 12"));
   1568 	      return 0;
   1569 	    }
   1570 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
   1571 	    {
   1572 	      set_other_error (mismatch_detail, idx,
   1573 			       _("immediate out of range"));
   1574 	      return 0;
   1575 	    }
   1576 	  break;
   1577 
   1578 	case AARCH64_OPND_HALF:
   1579 	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
   1580 	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
   1581 	    {
   1582 	      set_other_error (mismatch_detail, idx,
   1583 			       _("invalid shift operator"));
   1584 	      return 0;
   1585 	    }
   1586 	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   1587 	  if (!value_aligned_p (opnd->shifter.amount, 16))
   1588 	    {
   1589 	      set_other_error (mismatch_detail, idx,
   1590 			       _("shift amount should be a multiple of 16"));
   1591 	      return 0;
   1592 	    }
   1593 	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
   1594 	    {
   1595 	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
   1596 						 0, size * 8 - 16);
   1597 	      return 0;
   1598 	    }
   1599 	  if (opnd->imm.value < 0)
   1600 	    {
   1601 	      set_other_error (mismatch_detail, idx,
   1602 			       _("negative immediate value not allowed"));
   1603 	      return 0;
   1604 	    }
   1605 	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
   1606 	    {
   1607 	      set_other_error (mismatch_detail, idx,
   1608 			       _("immediate out of range"));
   1609 	      return 0;
   1610 	    }
   1611 	  break;
   1612 
   1613 	case AARCH64_OPND_IMM_MOV:
   1614 	    {
   1615 	      int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
   1616 	      imm = opnd->imm.value;
   1617 	      assert (idx == 1);
   1618 	      switch (opcode->op)
   1619 		{
   1620 		case OP_MOV_IMM_WIDEN:
   1621 		  imm = ~imm;
   1622 		  /* Fall through...  */
   1623 		case OP_MOV_IMM_WIDE:
   1624 		  if (!aarch64_wide_constant_p (imm, is32, NULL))
   1625 		    {
   1626 		      set_other_error (mismatch_detail, idx,
   1627 				       _("immediate out of range"));
   1628 		      return 0;
   1629 		    }
   1630 		  break;
   1631 		case OP_MOV_IMM_LOG:
   1632 		  if (!aarch64_logical_immediate_p (imm, is32, NULL))
   1633 		    {
   1634 		      set_other_error (mismatch_detail, idx,
   1635 				       _("immediate out of range"));
   1636 		      return 0;
   1637 		    }
   1638 		  break;
   1639 		default:
   1640 		  assert (0);
   1641 		  return 0;
   1642 		}
   1643 	    }
   1644 	  break;
   1645 
   1646 	case AARCH64_OPND_NZCV:
   1647 	case AARCH64_OPND_CCMP_IMM:
   1648 	case AARCH64_OPND_EXCEPTION:
   1649 	case AARCH64_OPND_UIMM4:
   1650 	case AARCH64_OPND_UIMM7:
   1651 	case AARCH64_OPND_UIMM3_OP1:
   1652 	case AARCH64_OPND_UIMM3_OP2:
   1653 	  size = get_operand_fields_width (get_operand_from_code (type));
   1654 	  assert (size < 32);
   1655 	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
   1656 	    {
   1657 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   1658 					  (1 << size) - 1);
   1659 	      return 0;
   1660 	    }
   1661 	  break;
   1662 
   1663 	case AARCH64_OPND_WIDTH:
   1664 	  assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM
   1665 		  && opnds[0].type == AARCH64_OPND_Rd);
   1666 	  size = get_upper_bound (qualifier);
   1667 	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
   1668 	    /* lsb+width <= reg.size  */
   1669 	    {
   1670 	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
   1671 					  size - opnds[idx-1].imm.value);
   1672 	      return 0;
   1673 	    }
   1674 	  break;
   1675 
   1676 	case AARCH64_OPND_LIMM:
   1677 	    {
   1678 	      int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
   1679 	      uint64_t uimm = opnd->imm.value;
   1680 	      if (opcode->op == OP_BIC)
   1681 		uimm = ~uimm;
   1682 	      if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
   1683 		{
   1684 		  set_other_error (mismatch_detail, idx,
   1685 				   _("immediate out of range"));
   1686 		  return 0;
   1687 		}
   1688 	    }
   1689 	  break;
   1690 
   1691 	case AARCH64_OPND_IMM0:
   1692 	case AARCH64_OPND_FPIMM0:
   1693 	  if (opnd->imm.value != 0)
   1694 	    {
   1695 	      set_other_error (mismatch_detail, idx,
   1696 			       _("immediate zero expected"));
   1697 	      return 0;
   1698 	    }
   1699 	  break;
   1700 
   1701 	case AARCH64_OPND_SHLL_IMM:
   1702 	  assert (idx == 2);
   1703 	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
   1704 	  if (opnd->imm.value != size)
   1705 	    {
   1706 	      set_other_error (mismatch_detail, idx,
   1707 			       _("invalid shift amount"));
   1708 	      return 0;
   1709 	    }
   1710 	  break;
   1711 
   1712 	case AARCH64_OPND_IMM_VLSL:
   1713 	  size = aarch64_get_qualifier_esize (qualifier);
   1714 	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
   1715 	    {
   1716 	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
   1717 					  size * 8 - 1);
   1718 	      return 0;
   1719 	    }
   1720 	  break;
   1721 
   1722 	case AARCH64_OPND_IMM_VLSR:
   1723 	  size = aarch64_get_qualifier_esize (qualifier);
   1724 	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
   1725 	    {
   1726 	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
   1727 	      return 0;
   1728 	    }
   1729 	  break;
   1730 
   1731 	case AARCH64_OPND_SIMD_IMM:
   1732 	case AARCH64_OPND_SIMD_IMM_SFT:
   1733 	  /* Qualifier check.  */
   1734 	  switch (qualifier)
   1735 	    {
   1736 	    case AARCH64_OPND_QLF_LSL:
   1737 	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
   1738 		{
   1739 		  set_other_error (mismatch_detail, idx,
   1740 				   _("invalid shift operator"));
   1741 		  return 0;
   1742 		}
   1743 	      break;
   1744 	    case AARCH64_OPND_QLF_MSL:
   1745 	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
   1746 		{
   1747 		  set_other_error (mismatch_detail, idx,
   1748 				   _("invalid shift operator"));
   1749 		  return 0;
   1750 		}
   1751 	      break;
   1752 	    case AARCH64_OPND_QLF_NIL:
   1753 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   1754 		{
   1755 		  set_other_error (mismatch_detail, idx,
   1756 				   _("shift is not permitted"));
   1757 		  return 0;
   1758 		}
   1759 	      break;
   1760 	    default:
   1761 	      assert (0);
   1762 	      return 0;
   1763 	    }
   1764 	  /* Is the immediate valid?  */
   1765 	  assert (idx == 1);
   1766 	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
   1767 	    {
   1768 	      /* uimm8 or simm8 */
   1769 	      if (!value_in_range_p (opnd->imm.value, -128, 255))
   1770 		{
   1771 		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
   1772 		  return 0;
   1773 		}
   1774 	    }
   1775 	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
   1776 	    {
   1777 	      /* uimm64 is not
   1778 		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
   1779 		 ffffffffgggggggghhhhhhhh'.  */
   1780 	      set_other_error (mismatch_detail, idx,
   1781 			       _("invalid value for immediate"));
   1782 	      return 0;
   1783 	    }
   1784 	  /* Is the shift amount valid?  */
   1785 	  switch (opnd->shifter.kind)
   1786 	    {
   1787 	    case AARCH64_MOD_LSL:
   1788 	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
   1789 	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
   1790 		{
   1791 		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
   1792 						     (size - 1) * 8);
   1793 		  return 0;
   1794 		}
   1795 	      if (!value_aligned_p (opnd->shifter.amount, 8))
   1796 		{
   1797 		  set_unaligned_error (mismatch_detail, idx, 8);
   1798 		  return 0;
   1799 		}
   1800 	      break;
   1801 	    case AARCH64_MOD_MSL:
   1802 	      /* Only 8 and 16 are valid shift amount.  */
   1803 	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
   1804 		{
   1805 		  set_other_error (mismatch_detail, idx,
   1806 				   _("shift amount expected to be 0 or 16"));
   1807 		  return 0;
   1808 		}
   1809 	      break;
   1810 	    default:
   1811 	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
   1812 		{
   1813 		  set_other_error (mismatch_detail, idx,
   1814 				   _("invalid shift operator"));
   1815 		  return 0;
   1816 		}
   1817 	      break;
   1818 	    }
   1819 	  break;
   1820 
   1821 	case AARCH64_OPND_FPIMM:
   1822 	case AARCH64_OPND_SIMD_FPIMM:
   1823 	  if (opnd->imm.is_fp == 0)
   1824 	    {
   1825 	      set_other_error (mismatch_detail, idx,
   1826 			       _("floating-point immediate expected"));
   1827 	      return 0;
   1828 	    }
   1829 	  /* The value is expected to be an 8-bit floating-point constant with
   1830 	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
   1831 	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
   1832 	     instruction).  */
   1833 	  if (!value_in_range_p (opnd->imm.value, 0, 255))
   1834 	    {
   1835 	      set_other_error (mismatch_detail, idx,
   1836 			       _("immediate out of range"));
   1837 	      return 0;
   1838 	    }
   1839 	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
   1840 	    {
   1841 	      set_other_error (mismatch_detail, idx,
   1842 			       _("invalid shift operator"));
   1843 	      return 0;
   1844 	    }
   1845 	  break;
   1846 
   1847 	default:
   1848 	  break;
   1849 	}
   1850       break;
   1851 
   1852     case AARCH64_OPND_CLASS_CP_REG:
   1853       /* Cn or Cm: 4-bit opcode field named for historical reasons.
   1854 	 valid range: C0 - C15.  */
   1855       if (opnd->reg.regno > 15)
   1856 	{
   1857 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
   1858 	  return 0;
   1859 	}
   1860       break;
   1861 
   1862     case AARCH64_OPND_CLASS_SYSTEM:
   1863       switch (type)
   1864 	{
   1865 	case AARCH64_OPND_PSTATEFIELD:
   1866 	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
   1867 	  /* MSR SPSel, #uimm4
   1868 	     Uses uimm4 as a control value to select the stack pointer: if
   1869 	     bit 0 is set it selects the current exception level's stack
   1870 	     pointer, if bit 0 is clear it selects shared EL0 stack pointer.
   1871 	     Bits 1 to 3 of uimm4 are reserved and should be zero.  */
   1872 	  if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
   1873 	    {
   1874 	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
   1875 	      return 0;
   1876 	    }
   1877 	  break;
   1878 	default:
   1879 	  break;
   1880 	}
   1881       break;
   1882 
   1883     case AARCH64_OPND_CLASS_SIMD_ELEMENT:
   1884       /* Get the upper bound for the element index.  */
   1885       num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
   1886       /* Index out-of-range.  */
   1887       if (!value_in_range_p (opnd->reglane.index, 0, num))
   1888 	{
   1889 	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
   1890 	  return 0;
   1891 	}
   1892       /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
   1893 	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
   1894 	 number is encoded in "size:M:Rm":
   1895 	 size	<Vm>
   1896 	 00		RESERVED
   1897 	 01		0:Rm
   1898 	 10		M:Rm
   1899 	 11		RESERVED  */
   1900       if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
   1901 	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
   1902 	{
   1903 	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
   1904 	  return 0;
   1905 	}
   1906       break;
   1907 
   1908     case AARCH64_OPND_CLASS_MODIFIED_REG:
   1909       assert (idx == 1 || idx == 2);
   1910       switch (type)
   1911 	{
   1912 	case AARCH64_OPND_Rm_EXT:
   1913 	  if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
   1914 	      && opnd->shifter.kind != AARCH64_MOD_LSL)
   1915 	    {
   1916 	      set_other_error (mismatch_detail, idx,
   1917 			       _("extend operator expected"));
   1918 	      return 0;
   1919 	    }
   1920 	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
   1921 	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
   1922 	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
   1923 	     case.  */
   1924 	  if (!aarch64_stack_pointer_p (opnds + 0)
   1925 	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
   1926 	    {
   1927 	      if (!opnd->shifter.operator_present)
   1928 		{
   1929 		  set_other_error (mismatch_detail, idx,
   1930 				   _("missing extend operator"));
   1931 		  return 0;
   1932 		}
   1933 	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
   1934 		{
   1935 		  set_other_error (mismatch_detail, idx,
   1936 				   _("'LSL' operator not allowed"));
   1937 		  return 0;
   1938 		}
   1939 	    }
   1940 	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
   1941 		  || opnd->shifter.kind == AARCH64_MOD_LSL);
   1942 	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
   1943 	    {
   1944 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
   1945 	      return 0;
   1946 	    }
   1947 	  /* In the 64-bit form, the final register operand is written as Wm
   1948 	     for all but the (possibly omitted) UXTX/LSL and SXTX
   1949 	     operators.
   1950 	     N.B. GAS allows X register to be used with any operator as a
   1951 	     programming convenience.  */
   1952 	  if (qualifier == AARCH64_OPND_QLF_X
   1953 	      && opnd->shifter.kind != AARCH64_MOD_LSL
   1954 	      && opnd->shifter.kind != AARCH64_MOD_UXTX
   1955 	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
   1956 	    {
   1957 	      set_other_error (mismatch_detail, idx, _("W register expected"));
   1958 	      return 0;
   1959 	    }
   1960 	  break;
   1961 
   1962 	case AARCH64_OPND_Rm_SFT:
   1963 	  /* ROR is not available to the shifted register operand in
   1964 	     arithmetic instructions.  */
   1965 	  if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
   1966 	    {
   1967 	      set_other_error (mismatch_detail, idx,
   1968 			       _("shift operator expected"));
   1969 	      return 0;
   1970 	    }
   1971 	  if (opnd->shifter.kind == AARCH64_MOD_ROR
   1972 	      && opcode->iclass != log_shift)
   1973 	    {
   1974 	      set_other_error (mismatch_detail, idx,
   1975 			       _("'ROR' operator not allowed"));
   1976 	      return 0;
   1977 	    }
   1978 	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
   1979 	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
   1980 	    {
   1981 	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
   1982 	      return 0;
   1983 	    }
   1984 	  break;
   1985 
   1986 	default:
   1987 	  break;
   1988 	}
   1989       break;
   1990 
   1991     default:
   1992       break;
   1993     }
   1994 
   1995   return 1;
   1996 }
   1997 
   1998 /* Main entrypoint for the operand constraint checking.
   1999 
   2000    Return 1 if operands of *INST meet the constraint applied by the operand
   2001    codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
   2002    not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
   2003    adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
   2004    with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
   2005    error kind when it is notified that an instruction does not pass the check).
   2006 
   2007    Un-determined operand qualifiers may get established during the process.  */
   2008 
   2009 int
   2010 aarch64_match_operands_constraint (aarch64_inst *inst,
   2011 				   aarch64_operand_error *mismatch_detail)
   2012 {
   2013   int i;
   2014 
   2015   DEBUG_TRACE ("enter");
   2016 
   2017   /* Match operands' qualifier.
   2018      *INST has already had qualifier establish for some, if not all, of
   2019      its operands; we need to find out whether these established
   2020      qualifiers match one of the qualifier sequence in
   2021      INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
   2022      with the corresponding qualifier in such a sequence.
   2023      Only basic operand constraint checking is done here; the more thorough
   2024      constraint checking will carried out by operand_general_constraint_met_p,
   2025      which has be to called after this in order to get all of the operands'
   2026      qualifiers established.  */
   2027   if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
   2028     {
   2029       DEBUG_TRACE ("FAIL on operand qualifier matching");
   2030       if (mismatch_detail)
   2031 	{
   2032 	  /* Return an error type to indicate that it is the qualifier
   2033 	     matching failure; we don't care about which operand as there
   2034 	     are enough information in the opcode table to reproduce it.  */
   2035 	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
   2036 	  mismatch_detail->index = -1;
   2037 	  mismatch_detail->error = NULL;
   2038 	}
   2039       return 0;
   2040     }
   2041 
   2042   /* Match operands' constraint.  */
   2043   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2044     {
   2045       enum aarch64_opnd type = inst->opcode->operands[i];
   2046       if (type == AARCH64_OPND_NIL)
   2047 	break;
   2048       if (inst->operands[i].skip)
   2049 	{
   2050 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
   2051 	  continue;
   2052 	}
   2053       if (operand_general_constraint_met_p (inst->operands, i, type,
   2054 					    inst->opcode, mismatch_detail) == 0)
   2055 	{
   2056 	  DEBUG_TRACE ("FAIL on operand %d", i);
   2057 	  return 0;
   2058 	}
   2059     }
   2060 
   2061   DEBUG_TRACE ("PASS");
   2062 
   2063   return 1;
   2064 }
   2065 
   2066 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
   2067    Also updates the TYPE of each INST->OPERANDS with the corresponding
   2068    value of OPCODE->OPERANDS.
   2069 
   2070    Note that some operand qualifiers may need to be manually cleared by
   2071    the caller before it further calls the aarch64_opcode_encode; by
   2072    doing this, it helps the qualifier matching facilities work
   2073    properly.  */
   2074 
   2075 const aarch64_opcode*
   2076 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
   2077 {
   2078   int i;
   2079   const aarch64_opcode *old = inst->opcode;
   2080 
   2081   inst->opcode = opcode;
   2082 
   2083   /* Update the operand types.  */
   2084   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2085     {
   2086       inst->operands[i].type = opcode->operands[i];
   2087       if (opcode->operands[i] == AARCH64_OPND_NIL)
   2088 	break;
   2089     }
   2090 
   2091   DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
   2092 
   2093   return old;
   2094 }
   2095 
   2096 int
   2097 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
   2098 {
   2099   int i;
   2100   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   2101     if (operands[i] == operand)
   2102       return i;
   2103     else if (operands[i] == AARCH64_OPND_NIL)
   2104       break;
   2105   return -1;
   2106 }
   2107 
   2108 /* [0][0]  32-bit integer regs with sp   Wn
   2110    [0][1]  64-bit integer regs with sp   Xn  sf=1
   2111    [1][0]  32-bit integer regs with #0   Wn
   2112    [1][1]  64-bit integer regs with #0   Xn  sf=1 */
   2113 static const char *int_reg[2][2][32] = {
   2114 #define R32 "w"
   2115 #define R64 "x"
   2116   { { R32  "0", R32  "1", R32  "2", R32  "3", R32  "4", R32  "5", R32  "6", R32  "7",
   2117       R32  "8", R32  "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
   2118       R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
   2119       R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30",    "wsp" },
   2120     { R64  "0", R64  "1", R64  "2", R64  "3", R64  "4", R64  "5", R64  "6", R64  "7",
   2121       R64  "8", R64  "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
   2122       R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
   2123       R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30",     "sp" } },
   2124   { { R32  "0", R32  "1", R32  "2", R32  "3", R32  "4", R32  "5", R32  "6", R32  "7",
   2125       R32  "8", R32  "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
   2126       R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
   2127       R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
   2128     { R64  "0", R64  "1", R64  "2", R64  "3", R64  "4", R64  "5", R64  "6", R64  "7",
   2129       R64  "8", R64  "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
   2130       R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
   2131       R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
   2132 #undef R64
   2133 #undef R32
   2134 };
   2135 
   2136 /* Return the integer register name.
   2137    if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
   2138 
   2139 static inline const char *
   2140 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
   2141 {
   2142   const int has_zr = sp_reg_p ? 0 : 1;
   2143   const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
   2144   return int_reg[has_zr][is_64][regno];
   2145 }
   2146 
   2147 /* Like get_int_reg_name, but IS_64 is always 1.  */
   2148 
   2149 static inline const char *
   2150 get_64bit_int_reg_name (int regno, int sp_reg_p)
   2151 {
   2152   const int has_zr = sp_reg_p ? 0 : 1;
   2153   return int_reg[has_zr][1][regno];
   2154 }
   2155 
   2156 /* Types for expanding an encoded 8-bit value to a floating-point value.  */
   2157 
   2158 typedef union
   2159 {
   2160   uint64_t i;
   2161   double   d;
   2162 } double_conv_t;
   2163 
   2164 typedef union
   2165 {
   2166   uint32_t i;
   2167   float    f;
   2168 } single_conv_t;
   2169 
   2170 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
   2171    normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
   2172    (depending on the type of the instruction).  IMM8 will be expanded to a
   2173    single-precision floating-point value (IS_DP == 0) or a double-precision
   2174    floating-point value (IS_DP == 1).  The expanded value is returned.  */
   2175 
   2176 static uint64_t
   2177 expand_fp_imm (int is_dp, uint32_t imm8)
   2178 {
   2179   uint64_t imm;
   2180   uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
   2181 
   2182   imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
   2183   imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
   2184   imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
   2185   imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
   2186     | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
   2187   if (is_dp)
   2188     {
   2189       imm = (imm8_7 << (63-32))		/* imm8<7>  */
   2190 	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
   2191 	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
   2192 	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
   2193 	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
   2194       imm <<= 32;
   2195     }
   2196   else
   2197     {
   2198       imm = (imm8_7 << 31)	/* imm8<7>              */
   2199 	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
   2200 	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
   2201 	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
   2202     }
   2203 
   2204   return imm;
   2205 }
   2206 
   2207 /* Produce the string representation of the register list operand *OPND
   2208    in the buffer pointed by BUF of size SIZE.  */
   2209 static void
   2210 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
   2211 {
   2212   const int num_regs = opnd->reglist.num_regs;
   2213   const int first_reg = opnd->reglist.first_regno;
   2214   const int last_reg = (first_reg + num_regs - 1) & 0x1f;
   2215   const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
   2216   char tb[8];	/* Temporary buffer.  */
   2217 
   2218   assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
   2219   assert (num_regs >= 1 && num_regs <= 4);
   2220 
   2221   /* Prepare the index if any.  */
   2222   if (opnd->reglist.has_index)
   2223     snprintf (tb, 8, "[%d]", opnd->reglist.index);
   2224   else
   2225     tb[0] = '\0';
   2226 
   2227   /* The hyphenated form is preferred for disassembly if there are
   2228      more than two registers in the list, and the register numbers
   2229      are monotonically increasing in increments of one.  */
   2230   if (num_regs > 2 && last_reg > first_reg)
   2231     snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
   2232 	      last_reg, qlf_name, tb);
   2233   else
   2234     {
   2235       const int reg0 = first_reg;
   2236       const int reg1 = (first_reg + 1) & 0x1f;
   2237       const int reg2 = (first_reg + 2) & 0x1f;
   2238       const int reg3 = (first_reg + 3) & 0x1f;
   2239 
   2240       switch (num_regs)
   2241 	{
   2242 	case 1:
   2243 	  snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
   2244 	  break;
   2245 	case 2:
   2246 	  snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
   2247 		    reg1, qlf_name, tb);
   2248 	  break;
   2249 	case 3:
   2250 	  snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
   2251 		    reg1, qlf_name, reg2, qlf_name, tb);
   2252 	  break;
   2253 	case 4:
   2254 	  snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
   2255 		    reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
   2256 		    reg3, qlf_name, tb);
   2257 	  break;
   2258 	}
   2259     }
   2260 }
   2261 
   2262 /* Produce the string representation of the register offset address operand
   2263    *OPND in the buffer pointed by BUF of size SIZE.  */
   2264 static void
   2265 print_register_offset_address (char *buf, size_t size,
   2266 			       const aarch64_opnd_info *opnd)
   2267 {
   2268   const size_t tblen = 16;
   2269   char tb[tblen];		/* Temporary buffer.  */
   2270   bfd_boolean lsl_p = FALSE;	/* Is LSL shift operator?  */
   2271   bfd_boolean wm_p = FALSE;	/* Should Rm be Wm?  */
   2272   bfd_boolean print_extend_p = TRUE;
   2273   bfd_boolean print_amount_p = TRUE;
   2274   const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
   2275 
   2276   switch (opnd->shifter.kind)
   2277     {
   2278     case AARCH64_MOD_UXTW: wm_p = TRUE; break;
   2279     case AARCH64_MOD_LSL : lsl_p = TRUE; break;
   2280     case AARCH64_MOD_SXTW: wm_p = TRUE; break;
   2281     case AARCH64_MOD_SXTX: break;
   2282     default: assert (0);
   2283     }
   2284 
   2285   if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
   2286 				|| !opnd->shifter.amount_present))
   2287     {
   2288       /* Not print the shift/extend amount when the amount is zero and
   2289          when it is not the special case of 8-bit load/store instruction.  */
   2290       print_amount_p = FALSE;
   2291       /* Likewise, no need to print the shift operator LSL in such a
   2292 	 situation.  */
   2293       if (lsl_p)
   2294 	print_extend_p = FALSE;
   2295     }
   2296 
   2297   /* Prepare for the extend/shift.  */
   2298   if (print_extend_p)
   2299     {
   2300       if (print_amount_p)
   2301 	snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
   2302       else
   2303 	snprintf (tb, tblen, ",%s", shift_name);
   2304     }
   2305   else
   2306     tb[0] = '\0';
   2307 
   2308   snprintf (buf, size, "[%s,%s%s]",
   2309 	    get_64bit_int_reg_name (opnd->addr.base_regno, 1),
   2310 	    get_int_reg_name (opnd->addr.offset.regno,
   2311 			      wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
   2312 			      0 /* sp_reg_p */),
   2313 	    tb);
   2314 }
   2315 
   2316 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
   2317    in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
   2318    PC, PCREL_P and ADDRESS are used to pass in and return information about
   2319    the PC-relative address calculation, where the PC value is passed in
   2320    PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
   2321    will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
   2322    calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
   2323 
   2324    The function serves both the disassembler and the assembler diagnostics
   2325    issuer, which is the reason why it lives in this file.  */
   2326 
   2327 void
   2328 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
   2329 		       const aarch64_opcode *opcode,
   2330 		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
   2331 		       bfd_vma *address)
   2332 {
   2333   int i;
   2334   const char *name = NULL;
   2335   const aarch64_opnd_info *opnd = opnds + idx;
   2336   enum aarch64_modifier_kind kind;
   2337   uint64_t addr;
   2338 
   2339   buf[0] = '\0';
   2340   if (pcrel_p)
   2341     *pcrel_p = 0;
   2342 
   2343   switch (opnd->type)
   2344     {
   2345     case AARCH64_OPND_Rd:
   2346     case AARCH64_OPND_Rn:
   2347     case AARCH64_OPND_Rm:
   2348     case AARCH64_OPND_Rt:
   2349     case AARCH64_OPND_Rt2:
   2350     case AARCH64_OPND_Rs:
   2351     case AARCH64_OPND_Ra:
   2352     case AARCH64_OPND_Rt_SYS:
   2353     case AARCH64_OPND_PAIRREG:
   2354       /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
   2355 	 the <ic_op>, therefore we we use opnd->present to override the
   2356 	 generic optional-ness information.  */
   2357       if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
   2358 	break;
   2359       /* Omit the operand, e.g. RET.  */
   2360       if (optional_operand_p (opcode, idx)
   2361 	  && opnd->reg.regno == get_optional_operand_default_value (opcode))
   2362 	break;
   2363       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   2364 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   2365       snprintf (buf, size, "%s",
   2366 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   2367       break;
   2368 
   2369     case AARCH64_OPND_Rd_SP:
   2370     case AARCH64_OPND_Rn_SP:
   2371       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   2372 	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
   2373 	      || opnd->qualifier == AARCH64_OPND_QLF_X
   2374 	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
   2375       snprintf (buf, size, "%s",
   2376 		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
   2377       break;
   2378 
   2379     case AARCH64_OPND_Rm_EXT:
   2380       kind = opnd->shifter.kind;
   2381       assert (idx == 1 || idx == 2);
   2382       if ((aarch64_stack_pointer_p (opnds)
   2383 	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
   2384 	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
   2385 	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
   2386 	       && kind == AARCH64_MOD_UXTW)
   2387 	      || (opnd->qualifier == AARCH64_OPND_QLF_X
   2388 		  && kind == AARCH64_MOD_UXTX)))
   2389 	{
   2390 	  /* 'LSL' is the preferred form in this case.  */
   2391 	  kind = AARCH64_MOD_LSL;
   2392 	  if (opnd->shifter.amount == 0)
   2393 	    {
   2394 	      /* Shifter omitted.  */
   2395 	      snprintf (buf, size, "%s",
   2396 			get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   2397 	      break;
   2398 	    }
   2399 	}
   2400       if (opnd->shifter.amount)
   2401 	snprintf (buf, size, "%s, %s #%d",
   2402 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   2403 		  aarch64_operand_modifiers[kind].name,
   2404 		  opnd->shifter.amount);
   2405       else
   2406 	snprintf (buf, size, "%s, %s",
   2407 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   2408 		  aarch64_operand_modifiers[kind].name);
   2409       break;
   2410 
   2411     case AARCH64_OPND_Rm_SFT:
   2412       assert (opnd->qualifier == AARCH64_OPND_QLF_W
   2413 	      || opnd->qualifier == AARCH64_OPND_QLF_X);
   2414       if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
   2415 	snprintf (buf, size, "%s",
   2416 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
   2417       else
   2418 	snprintf (buf, size, "%s, %s #%d",
   2419 		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
   2420 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
   2421 		  opnd->shifter.amount);
   2422       break;
   2423 
   2424     case AARCH64_OPND_Fd:
   2425     case AARCH64_OPND_Fn:
   2426     case AARCH64_OPND_Fm:
   2427     case AARCH64_OPND_Fa:
   2428     case AARCH64_OPND_Ft:
   2429     case AARCH64_OPND_Ft2:
   2430     case AARCH64_OPND_Sd:
   2431     case AARCH64_OPND_Sn:
   2432     case AARCH64_OPND_Sm:
   2433       snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
   2434 		opnd->reg.regno);
   2435       break;
   2436 
   2437     case AARCH64_OPND_Vd:
   2438     case AARCH64_OPND_Vn:
   2439     case AARCH64_OPND_Vm:
   2440       snprintf (buf, size, "v%d.%s", opnd->reg.regno,
   2441 		aarch64_get_qualifier_name (opnd->qualifier));
   2442       break;
   2443 
   2444     case AARCH64_OPND_Ed:
   2445     case AARCH64_OPND_En:
   2446     case AARCH64_OPND_Em:
   2447       snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
   2448 		aarch64_get_qualifier_name (opnd->qualifier),
   2449 		opnd->reglane.index);
   2450       break;
   2451 
   2452     case AARCH64_OPND_VdD1:
   2453     case AARCH64_OPND_VnD1:
   2454       snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
   2455       break;
   2456 
   2457     case AARCH64_OPND_LVn:
   2458     case AARCH64_OPND_LVt:
   2459     case AARCH64_OPND_LVt_AL:
   2460     case AARCH64_OPND_LEt:
   2461       print_register_list (buf, size, opnd);
   2462       break;
   2463 
   2464     case AARCH64_OPND_Cn:
   2465     case AARCH64_OPND_Cm:
   2466       snprintf (buf, size, "C%d", opnd->reg.regno);
   2467       break;
   2468 
   2469     case AARCH64_OPND_IDX:
   2470     case AARCH64_OPND_IMM:
   2471     case AARCH64_OPND_WIDTH:
   2472     case AARCH64_OPND_UIMM3_OP1:
   2473     case AARCH64_OPND_UIMM3_OP2:
   2474     case AARCH64_OPND_BIT_NUM:
   2475     case AARCH64_OPND_IMM_VLSL:
   2476     case AARCH64_OPND_IMM_VLSR:
   2477     case AARCH64_OPND_SHLL_IMM:
   2478     case AARCH64_OPND_IMM0:
   2479     case AARCH64_OPND_IMMR:
   2480     case AARCH64_OPND_IMMS:
   2481     case AARCH64_OPND_FBITS:
   2482       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
   2483       break;
   2484 
   2485     case AARCH64_OPND_IMM_MOV:
   2486       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   2487 	{
   2488 	case 4:	/* e.g. MOV Wd, #<imm32>.  */
   2489 	    {
   2490 	      int imm32 = opnd->imm.value;
   2491 	      snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
   2492 	    }
   2493 	  break;
   2494 	case 8:	/* e.g. MOV Xd, #<imm64>.  */
   2495 	  snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
   2496 		    opnd->imm.value, opnd->imm.value);
   2497 	  break;
   2498 	default: assert (0);
   2499 	}
   2500       break;
   2501 
   2502     case AARCH64_OPND_FPIMM0:
   2503       snprintf (buf, size, "#0.0");
   2504       break;
   2505 
   2506     case AARCH64_OPND_LIMM:
   2507     case AARCH64_OPND_AIMM:
   2508     case AARCH64_OPND_HALF:
   2509       if (opnd->shifter.amount)
   2510 	snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
   2511 		  opnd->shifter.amount);
   2512       else
   2513 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
   2514       break;
   2515 
   2516     case AARCH64_OPND_SIMD_IMM:
   2517     case AARCH64_OPND_SIMD_IMM_SFT:
   2518       if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
   2519 	  || opnd->shifter.kind == AARCH64_MOD_NONE)
   2520 	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
   2521       else
   2522 	snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
   2523 		  aarch64_operand_modifiers[opnd->shifter.kind].name,
   2524 		  opnd->shifter.amount);
   2525       break;
   2526 
   2527     case AARCH64_OPND_FPIMM:
   2528     case AARCH64_OPND_SIMD_FPIMM:
   2529       switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
   2530 	{
   2531 	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
   2532 	    {
   2533 	      single_conv_t c;
   2534 	      c.i = expand_fp_imm (0, opnd->imm.value);
   2535 	      snprintf (buf, size,  "#%.18e", c.f);
   2536 	    }
   2537 	  break;
   2538 	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
   2539 	    {
   2540 	      double_conv_t c;
   2541 	      c.i = expand_fp_imm (1, opnd->imm.value);
   2542 	      snprintf (buf, size,  "#%.18e", c.d);
   2543 	    }
   2544 	  break;
   2545 	default: assert (0);
   2546 	}
   2547       break;
   2548 
   2549     case AARCH64_OPND_CCMP_IMM:
   2550     case AARCH64_OPND_NZCV:
   2551     case AARCH64_OPND_EXCEPTION:
   2552     case AARCH64_OPND_UIMM4:
   2553     case AARCH64_OPND_UIMM7:
   2554       if (optional_operand_p (opcode, idx) == TRUE
   2555 	  && (opnd->imm.value ==
   2556 	      (int64_t) get_optional_operand_default_value (opcode)))
   2557 	/* Omit the operand, e.g. DCPS1.  */
   2558 	break;
   2559       snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
   2560       break;
   2561 
   2562     case AARCH64_OPND_COND:
   2563     case AARCH64_OPND_COND1:
   2564       snprintf (buf, size, "%s", opnd->cond->names[0]);
   2565       break;
   2566 
   2567     case AARCH64_OPND_ADDR_ADRP:
   2568       addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
   2569 	+ opnd->imm.value;
   2570       if (pcrel_p)
   2571 	*pcrel_p = 1;
   2572       if (address)
   2573 	*address = addr;
   2574       /* This is not necessary during the disassembling, as print_address_func
   2575 	 in the disassemble_info will take care of the printing.  But some
   2576 	 other callers may be still interested in getting the string in *STR,
   2577 	 so here we do snprintf regardless.  */
   2578       snprintf (buf, size, "#0x%" PRIx64, addr);
   2579       break;
   2580 
   2581     case AARCH64_OPND_ADDR_PCREL14:
   2582     case AARCH64_OPND_ADDR_PCREL19:
   2583     case AARCH64_OPND_ADDR_PCREL21:
   2584     case AARCH64_OPND_ADDR_PCREL26:
   2585       addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
   2586       if (pcrel_p)
   2587 	*pcrel_p = 1;
   2588       if (address)
   2589 	*address = addr;
   2590       /* This is not necessary during the disassembling, as print_address_func
   2591 	 in the disassemble_info will take care of the printing.  But some
   2592 	 other callers may be still interested in getting the string in *STR,
   2593 	 so here we do snprintf regardless.  */
   2594       snprintf (buf, size, "#0x%" PRIx64, addr);
   2595       break;
   2596 
   2597     case AARCH64_OPND_ADDR_SIMPLE:
   2598     case AARCH64_OPND_SIMD_ADDR_SIMPLE:
   2599     case AARCH64_OPND_SIMD_ADDR_POST:
   2600       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   2601       if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
   2602 	{
   2603 	  if (opnd->addr.offset.is_reg)
   2604 	    snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
   2605 	  else
   2606 	    snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
   2607 	}
   2608       else
   2609 	snprintf (buf, size, "[%s]", name);
   2610       break;
   2611 
   2612     case AARCH64_OPND_ADDR_REGOFF:
   2613       print_register_offset_address (buf, size, opnd);
   2614       break;
   2615 
   2616     case AARCH64_OPND_ADDR_SIMM7:
   2617     case AARCH64_OPND_ADDR_SIMM9:
   2618     case AARCH64_OPND_ADDR_SIMM9_2:
   2619       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   2620       if (opnd->addr.writeback)
   2621 	{
   2622 	  if (opnd->addr.preind)
   2623 	    snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
   2624 	  else
   2625 	    snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
   2626 	}
   2627       else
   2628 	{
   2629 	  if (opnd->addr.offset.imm)
   2630 	    snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
   2631 	  else
   2632 	    snprintf (buf, size, "[%s]", name);
   2633 	}
   2634       break;
   2635 
   2636     case AARCH64_OPND_ADDR_UIMM12:
   2637       name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
   2638       if (opnd->addr.offset.imm)
   2639 	snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
   2640       else
   2641 	snprintf (buf, size, "[%s]", name);
   2642       break;
   2643 
   2644     case AARCH64_OPND_SYSREG:
   2645       for (i = 0; aarch64_sys_regs[i].name; ++i)
   2646 	if (aarch64_sys_regs[i].value == opnd->sysreg
   2647 	    && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
   2648 	  break;
   2649       if (aarch64_sys_regs[i].name)
   2650 	snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
   2651       else
   2652 	{
   2653 	  /* Implementation defined system register.  */
   2654 	  unsigned int value = opnd->sysreg;
   2655 	  snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
   2656 		    (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
   2657 		    value & 0x7);
   2658 	}
   2659       break;
   2660 
   2661     case AARCH64_OPND_PSTATEFIELD:
   2662       for (i = 0; aarch64_pstatefields[i].name; ++i)
   2663 	if (aarch64_pstatefields[i].value == opnd->pstatefield)
   2664 	  break;
   2665       assert (aarch64_pstatefields[i].name);
   2666       snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
   2667       break;
   2668 
   2669     case AARCH64_OPND_SYSREG_AT:
   2670     case AARCH64_OPND_SYSREG_DC:
   2671     case AARCH64_OPND_SYSREG_IC:
   2672     case AARCH64_OPND_SYSREG_TLBI:
   2673       snprintf (buf, size, "%s", opnd->sysins_op->template);
   2674       break;
   2675 
   2676     case AARCH64_OPND_BARRIER:
   2677       snprintf (buf, size, "%s", opnd->barrier->name);
   2678       break;
   2679 
   2680     case AARCH64_OPND_BARRIER_ISB:
   2681       /* Operand can be omitted, e.g. in DCPS1.  */
   2682       if (! optional_operand_p (opcode, idx)
   2683 	  || (opnd->barrier->value
   2684 	      != get_optional_operand_default_value (opcode)))
   2685 	snprintf (buf, size, "#0x%x", opnd->barrier->value);
   2686       break;
   2687 
   2688     case AARCH64_OPND_PRFOP:
   2689       if (opnd->prfop->name != NULL)
   2690 	snprintf (buf, size, "%s", opnd->prfop->name);
   2691       else
   2692 	snprintf (buf, size, "#0x%02x", opnd->prfop->value);
   2693       break;
   2694 
   2695     default:
   2696       assert (0);
   2697     }
   2698 }
   2699 
   2700 #define CPENC(op0,op1,crn,crm,op2) \
   2702   ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
   2703   /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
   2704 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
   2705   /* for 3.9.10 System Instructions */
   2706 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
   2707 
   2708 #define C0  0
   2709 #define C1  1
   2710 #define C2  2
   2711 #define C3  3
   2712 #define C4  4
   2713 #define C5  5
   2714 #define C6  6
   2715 #define C7  7
   2716 #define C8  8
   2717 #define C9  9
   2718 #define C10 10
   2719 #define C11 11
   2720 #define C12 12
   2721 #define C13 13
   2722 #define C14 14
   2723 #define C15 15
   2724 
   2725 #ifdef F_DEPRECATED
   2726 #undef F_DEPRECATED
   2727 #endif
   2728 #define F_DEPRECATED	0x1	/* Deprecated system register.  */
   2729 
   2730 /* TODO there are two more issues need to be resolved
   2731    1. handle read-only and write-only system registers
   2732    2. handle cpu-implementation-defined system registers.  */
   2733 const aarch64_sys_reg aarch64_sys_regs [] =
   2734 {
   2735   { "spsr_el1",         CPEN_(0,C0,0),	0 }, /* = spsr_svc */
   2736   { "elr_el1",          CPEN_(0,C0,1),	0 },
   2737   { "sp_el0",           CPEN_(0,C1,0),	0 },
   2738   { "spsel",            CPEN_(0,C2,0),	0 },
   2739   { "daif",             CPEN_(3,C2,1),	0 },
   2740   { "currentel",        CPEN_(0,C2,2),	0 }, /* RO */
   2741   { "nzcv",             CPEN_(3,C2,0),	0 },
   2742   { "fpcr",             CPEN_(3,C4,0),	0 },
   2743   { "fpsr",             CPEN_(3,C4,1),	0 },
   2744   { "dspsr_el0",        CPEN_(3,C5,0),	0 },
   2745   { "dlr_el0",          CPEN_(3,C5,1),	0 },
   2746   { "spsr_el2",         CPEN_(4,C0,0),	0 }, /* = spsr_hyp */
   2747   { "elr_el2",          CPEN_(4,C0,1),	0 },
   2748   { "sp_el1",           CPEN_(4,C1,0),	0 },
   2749   { "spsr_irq",         CPEN_(4,C3,0),	0 },
   2750   { "spsr_abt",         CPEN_(4,C3,1),	0 },
   2751   { "spsr_und",         CPEN_(4,C3,2),	0 },
   2752   { "spsr_fiq",         CPEN_(4,C3,3),	0 },
   2753   { "spsr_el3",         CPEN_(6,C0,0),	0 },
   2754   { "elr_el3",          CPEN_(6,C0,1),	0 },
   2755   { "sp_el2",           CPEN_(6,C1,0),	0 },
   2756   { "spsr_svc",         CPEN_(0,C0,0),	F_DEPRECATED }, /* = spsr_el1 */
   2757   { "spsr_hyp",         CPEN_(4,C0,0),	F_DEPRECATED }, /* = spsr_el2 */
   2758   { "midr_el1",         CPENC(3,0,C0,C0,0),	0 }, /* RO */
   2759   { "ctr_el0",          CPENC(3,3,C0,C0,1),	0 }, /* RO */
   2760   { "mpidr_el1",        CPENC(3,0,C0,C0,5),	0 }, /* RO */
   2761   { "revidr_el1",       CPENC(3,0,C0,C0,6),	0 }, /* RO */
   2762   { "aidr_el1",         CPENC(3,1,C0,C0,7),	0 }, /* RO */
   2763   { "dczid_el0",        CPENC(3,3,C0,C0,7),	0 }, /* RO */
   2764   { "id_dfr0_el1",      CPENC(3,0,C0,C1,2),	0 }, /* RO */
   2765   { "id_pfr0_el1",      CPENC(3,0,C0,C1,0),	0 }, /* RO */
   2766   { "id_pfr1_el1",      CPENC(3,0,C0,C1,1),	0 }, /* RO */
   2767   { "id_afr0_el1",      CPENC(3,0,C0,C1,3),	0 }, /* RO */
   2768   { "id_mmfr0_el1",     CPENC(3,0,C0,C1,4),	0 }, /* RO */
   2769   { "id_mmfr1_el1",     CPENC(3,0,C0,C1,5),	0 }, /* RO */
   2770   { "id_mmfr2_el1",     CPENC(3,0,C0,C1,6),	0 }, /* RO */
   2771   { "id_mmfr3_el1",     CPENC(3,0,C0,C1,7),	0 }, /* RO */
   2772   { "id_isar0_el1",     CPENC(3,0,C0,C2,0),	0 }, /* RO */
   2773   { "id_isar1_el1",     CPENC(3,0,C0,C2,1),	0 }, /* RO */
   2774   { "id_isar2_el1",     CPENC(3,0,C0,C2,2),	0 }, /* RO */
   2775   { "id_isar3_el1",     CPENC(3,0,C0,C2,3),	0 }, /* RO */
   2776   { "id_isar4_el1",     CPENC(3,0,C0,C2,4),	0 }, /* RO */
   2777   { "id_isar5_el1",     CPENC(3,0,C0,C2,5),	0 }, /* RO */
   2778   { "mvfr0_el1",        CPENC(3,0,C0,C3,0),	0 }, /* RO */
   2779   { "mvfr1_el1",        CPENC(3,0,C0,C3,1),	0 }, /* RO */
   2780   { "mvfr2_el1",        CPENC(3,0,C0,C3,2),	0 }, /* RO */
   2781   { "ccsidr_el1",       CPENC(3,1,C0,C0,0),	0 }, /* RO */
   2782   { "id_aa64pfr0_el1",  CPENC(3,0,C0,C4,0),	0 }, /* RO */
   2783   { "id_aa64pfr1_el1",  CPENC(3,0,C0,C4,1),	0 }, /* RO */
   2784   { "id_aa64dfr0_el1",  CPENC(3,0,C0,C5,0),	0 }, /* RO */
   2785   { "id_aa64dfr1_el1",  CPENC(3,0,C0,C5,1),	0 }, /* RO */
   2786   { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0),	0 }, /* RO */
   2787   { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1),	0 }, /* RO */
   2788   { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0),	0 }, /* RO */
   2789   { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1),	0 }, /* RO */
   2790   { "id_aa64afr0_el1",  CPENC(3,0,C0,C5,4),	0 }, /* RO */
   2791   { "id_aa64afr1_el1",  CPENC(3,0,C0,C5,5),	0 }, /* RO */
   2792   { "clidr_el1",        CPENC(3,1,C0,C0,1),	0 }, /* RO */
   2793   { "csselr_el1",       CPENC(3,2,C0,C0,0),	0 }, /* RO */
   2794   { "vpidr_el2",        CPENC(3,4,C0,C0,0),	0 },
   2795   { "vmpidr_el2",       CPENC(3,4,C0,C0,5),	0 },
   2796   { "sctlr_el1",        CPENC(3,0,C1,C0,0),	0 },
   2797   { "sctlr_el2",        CPENC(3,4,C1,C0,0),	0 },
   2798   { "sctlr_el3",        CPENC(3,6,C1,C0,0),	0 },
   2799   { "actlr_el1",        CPENC(3,0,C1,C0,1),	0 },
   2800   { "actlr_el2",        CPENC(3,4,C1,C0,1),	0 },
   2801   { "actlr_el3",        CPENC(3,6,C1,C0,1),	0 },
   2802   { "cpacr_el1",        CPENC(3,0,C1,C0,2),	0 },
   2803   { "cptr_el2",         CPENC(3,4,C1,C1,2),	0 },
   2804   { "cptr_el3",         CPENC(3,6,C1,C1,2),	0 },
   2805   { "scr_el3",          CPENC(3,6,C1,C1,0),	0 },
   2806   { "hcr_el2",          CPENC(3,4,C1,C1,0),	0 },
   2807   { "mdcr_el2",         CPENC(3,4,C1,C1,1),	0 },
   2808   { "mdcr_el3",         CPENC(3,6,C1,C3,1),	0 },
   2809   { "hstr_el2",         CPENC(3,4,C1,C1,3),	0 },
   2810   { "hacr_el2",         CPENC(3,4,C1,C1,7),	0 },
   2811   { "ttbr0_el1",        CPENC(3,0,C2,C0,0),	0 },
   2812   { "ttbr1_el1",        CPENC(3,0,C2,C0,1),	0 },
   2813   { "ttbr0_el2",        CPENC(3,4,C2,C0,0),	0 },
   2814   { "ttbr0_el3",        CPENC(3,6,C2,C0,0),	0 },
   2815   { "vttbr_el2",        CPENC(3,4,C2,C1,0),	0 },
   2816   { "tcr_el1",          CPENC(3,0,C2,C0,2),	0 },
   2817   { "tcr_el2",          CPENC(3,4,C2,C0,2),	0 },
   2818   { "tcr_el3",          CPENC(3,6,C2,C0,2),	0 },
   2819   { "vtcr_el2",         CPENC(3,4,C2,C1,2),	0 },
   2820   { "afsr0_el1",        CPENC(3,0,C5,C1,0),	0 },
   2821   { "afsr1_el1",        CPENC(3,0,C5,C1,1),	0 },
   2822   { "afsr0_el2",        CPENC(3,4,C5,C1,0),	0 },
   2823   { "afsr1_el2",        CPENC(3,4,C5,C1,1),	0 },
   2824   { "afsr0_el3",        CPENC(3,6,C5,C1,0),	0 },
   2825   { "afsr1_el3",        CPENC(3,6,C5,C1,1),	0 },
   2826   { "esr_el1",          CPENC(3,0,C5,C2,0),	0 },
   2827   { "esr_el2",          CPENC(3,4,C5,C2,0),	0 },
   2828   { "esr_el3",          CPENC(3,6,C5,C2,0),	0 },
   2829   { "fpexc32_el2",      CPENC(3,4,C5,C3,0),	0 },
   2830   { "far_el1",          CPENC(3,0,C6,C0,0),	0 },
   2831   { "far_el2",          CPENC(3,4,C6,C0,0),	0 },
   2832   { "far_el3",          CPENC(3,6,C6,C0,0),	0 },
   2833   { "hpfar_el2",        CPENC(3,4,C6,C0,4),	0 },
   2834   { "par_el1",          CPENC(3,0,C7,C4,0),	0 },
   2835   { "mair_el1",         CPENC(3,0,C10,C2,0),	0 },
   2836   { "mair_el2",         CPENC(3,4,C10,C2,0),	0 },
   2837   { "mair_el3",         CPENC(3,6,C10,C2,0),	0 },
   2838   { "amair_el1",        CPENC(3,0,C10,C3,0),	0 },
   2839   { "amair_el2",        CPENC(3,4,C10,C3,0),	0 },
   2840   { "amair_el3",        CPENC(3,6,C10,C3,0),	0 },
   2841   { "vbar_el1",         CPENC(3,0,C12,C0,0),	0 },
   2842   { "vbar_el2",         CPENC(3,4,C12,C0,0),	0 },
   2843   { "vbar_el3",         CPENC(3,6,C12,C0,0),	0 },
   2844   { "rvbar_el1",        CPENC(3,0,C12,C0,1),	0 }, /* RO */
   2845   { "rvbar_el2",        CPENC(3,4,C12,C0,1),	0 }, /* RO */
   2846   { "rvbar_el3",        CPENC(3,6,C12,C0,1),	0 }, /* RO */
   2847   { "rmr_el1",          CPENC(3,0,C12,C0,2),	0 },
   2848   { "rmr_el2",          CPENC(3,4,C12,C0,2),	0 },
   2849   { "rmr_el3",          CPENC(3,6,C12,C0,2),	0 },
   2850   { "isr_el1",          CPENC(3,0,C12,C1,0),	0 }, /* RO */
   2851   { "contextidr_el1",   CPENC(3,0,C13,C0,1),	0 },
   2852   { "tpidr_el0",        CPENC(3,3,C13,C0,2),	0 },
   2853   { "tpidrro_el0",      CPENC(3,3,C13,C0,3),	0 }, /* RO */
   2854   { "tpidr_el1",        CPENC(3,0,C13,C0,4),	0 },
   2855   { "tpidr_el2",        CPENC(3,4,C13,C0,2),	0 },
   2856   { "tpidr_el3",        CPENC(3,6,C13,C0,2),	0 },
   2857   { "teecr32_el1",      CPENC(2,2,C0, C0,0),	0 }, /* See section 3.9.7.1 */
   2858   { "cntfrq_el0",       CPENC(3,3,C14,C0,0),	0 }, /* RO */
   2859   { "cntpct_el0",       CPENC(3,3,C14,C0,1),	0 }, /* RO */
   2860   { "cntvct_el0",       CPENC(3,3,C14,C0,2),	0 }, /* RO */
   2861   { "cntvoff_el2",      CPENC(3,4,C14,C0,3),	0 },
   2862   { "cntkctl_el1",      CPENC(3,0,C14,C1,0),	0 },
   2863   { "cnthctl_el2",      CPENC(3,4,C14,C1,0),	0 },
   2864   { "cntp_tval_el0",    CPENC(3,3,C14,C2,0),	0 },
   2865   { "cntp_ctl_el0",     CPENC(3,3,C14,C2,1),	0 },
   2866   { "cntp_cval_el0",    CPENC(3,3,C14,C2,2),	0 },
   2867   { "cntv_tval_el0",    CPENC(3,3,C14,C3,0),	0 },
   2868   { "cntv_ctl_el0",     CPENC(3,3,C14,C3,1),	0 },
   2869   { "cntv_cval_el0",    CPENC(3,3,C14,C3,2),	0 },
   2870   { "cnthp_tval_el2",   CPENC(3,4,C14,C2,0),	0 },
   2871   { "cnthp_ctl_el2",    CPENC(3,4,C14,C2,1),	0 },
   2872   { "cnthp_cval_el2",   CPENC(3,4,C14,C2,2),	0 },
   2873   { "cntps_tval_el1",   CPENC(3,7,C14,C2,0),	0 },
   2874   { "cntps_ctl_el1",    CPENC(3,7,C14,C2,1),	0 },
   2875   { "cntps_cval_el1",   CPENC(3,7,C14,C2,2),	0 },
   2876   { "dacr32_el2",       CPENC(3,4,C3,C0,0),	0 },
   2877   { "ifsr32_el2",       CPENC(3,4,C5,C0,1),	0 },
   2878   { "teehbr32_el1",     CPENC(2,2,C1,C0,0),	0 },
   2879   { "sder32_el3",       CPENC(3,6,C1,C1,1),	0 },
   2880   { "mdscr_el1",         CPENC(2,0,C0, C2, 2),	0 },
   2881   { "mdccsr_el0",        CPENC(2,3,C0, C1, 0),	0 },  /* r */
   2882   { "mdccint_el1",       CPENC(2,0,C0, C2, 0),	0 },
   2883   { "dbgdtr_el0",        CPENC(2,3,C0, C4, 0),	0 },
   2884   { "dbgdtrrx_el0",      CPENC(2,3,C0, C5, 0),	0 },  /* r */
   2885   { "dbgdtrtx_el0",      CPENC(2,3,C0, C5, 0),	0 },  /* w */
   2886   { "osdtrrx_el1",       CPENC(2,0,C0, C0, 2),	0 },  /* r */
   2887   { "osdtrtx_el1",       CPENC(2,0,C0, C3, 2),	0 },  /* w */
   2888   { "oseccr_el1",        CPENC(2,0,C0, C6, 2),	0 },
   2889   { "dbgvcr32_el2",      CPENC(2,4,C0, C7, 0),	0 },
   2890   { "dbgbvr0_el1",       CPENC(2,0,C0, C0, 4),	0 },
   2891   { "dbgbvr1_el1",       CPENC(2,0,C0, C1, 4),	0 },
   2892   { "dbgbvr2_el1",       CPENC(2,0,C0, C2, 4),	0 },
   2893   { "dbgbvr3_el1",       CPENC(2,0,C0, C3, 4),	0 },
   2894   { "dbgbvr4_el1",       CPENC(2,0,C0, C4, 4),	0 },
   2895   { "dbgbvr5_el1",       CPENC(2,0,C0, C5, 4),	0 },
   2896   { "dbgbvr6_el1",       CPENC(2,0,C0, C6, 4),	0 },
   2897   { "dbgbvr7_el1",       CPENC(2,0,C0, C7, 4),	0 },
   2898   { "dbgbvr8_el1",       CPENC(2,0,C0, C8, 4),	0 },
   2899   { "dbgbvr9_el1",       CPENC(2,0,C0, C9, 4),	0 },
   2900   { "dbgbvr10_el1",      CPENC(2,0,C0, C10,4),	0 },
   2901   { "dbgbvr11_el1",      CPENC(2,0,C0, C11,4),	0 },
   2902   { "dbgbvr12_el1",      CPENC(2,0,C0, C12,4),	0 },
   2903   { "dbgbvr13_el1",      CPENC(2,0,C0, C13,4),	0 },
   2904   { "dbgbvr14_el1",      CPENC(2,0,C0, C14,4),	0 },
   2905   { "dbgbvr15_el1",      CPENC(2,0,C0, C15,4),	0 },
   2906   { "dbgbcr0_el1",       CPENC(2,0,C0, C0, 5),	0 },
   2907   { "dbgbcr1_el1",       CPENC(2,0,C0, C1, 5),	0 },
   2908   { "dbgbcr2_el1",       CPENC(2,0,C0, C2, 5),	0 },
   2909   { "dbgbcr3_el1",       CPENC(2,0,C0, C3, 5),	0 },
   2910   { "dbgbcr4_el1",       CPENC(2,0,C0, C4, 5),	0 },
   2911   { "dbgbcr5_el1",       CPENC(2,0,C0, C5, 5),	0 },
   2912   { "dbgbcr6_el1",       CPENC(2,0,C0, C6, 5),	0 },
   2913   { "dbgbcr7_el1",       CPENC(2,0,C0, C7, 5),	0 },
   2914   { "dbgbcr8_el1",       CPENC(2,0,C0, C8, 5),	0 },
   2915   { "dbgbcr9_el1",       CPENC(2,0,C0, C9, 5),	0 },
   2916   { "dbgbcr10_el1",      CPENC(2,0,C0, C10,5),	0 },
   2917   { "dbgbcr11_el1",      CPENC(2,0,C0, C11,5),	0 },
   2918   { "dbgbcr12_el1",      CPENC(2,0,C0, C12,5),	0 },
   2919   { "dbgbcr13_el1",      CPENC(2,0,C0, C13,5),	0 },
   2920   { "dbgbcr14_el1",      CPENC(2,0,C0, C14,5),	0 },
   2921   { "dbgbcr15_el1",      CPENC(2,0,C0, C15,5),	0 },
   2922   { "dbgwvr0_el1",       CPENC(2,0,C0, C0, 6),	0 },
   2923   { "dbgwvr1_el1",       CPENC(2,0,C0, C1, 6),	0 },
   2924   { "dbgwvr2_el1",       CPENC(2,0,C0, C2, 6),	0 },
   2925   { "dbgwvr3_el1",       CPENC(2,0,C0, C3, 6),	0 },
   2926   { "dbgwvr4_el1",       CPENC(2,0,C0, C4, 6),	0 },
   2927   { "dbgwvr5_el1",       CPENC(2,0,C0, C5, 6),	0 },
   2928   { "dbgwvr6_el1",       CPENC(2,0,C0, C6, 6),	0 },
   2929   { "dbgwvr7_el1",       CPENC(2,0,C0, C7, 6),	0 },
   2930   { "dbgwvr8_el1",       CPENC(2,0,C0, C8, 6),	0 },
   2931   { "dbgwvr9_el1",       CPENC(2,0,C0, C9, 6),	0 },
   2932   { "dbgwvr10_el1",      CPENC(2,0,C0, C10,6),	0 },
   2933   { "dbgwvr11_el1",      CPENC(2,0,C0, C11,6),	0 },
   2934   { "dbgwvr12_el1",      CPENC(2,0,C0, C12,6),	0 },
   2935   { "dbgwvr13_el1",      CPENC(2,0,C0, C13,6),	0 },
   2936   { "dbgwvr14_el1",      CPENC(2,0,C0, C14,6),	0 },
   2937   { "dbgwvr15_el1",      CPENC(2,0,C0, C15,6),	0 },
   2938   { "dbgwcr0_el1",       CPENC(2,0,C0, C0, 7),	0 },
   2939   { "dbgwcr1_el1",       CPENC(2,0,C0, C1, 7),	0 },
   2940   { "dbgwcr2_el1",       CPENC(2,0,C0, C2, 7),	0 },
   2941   { "dbgwcr3_el1",       CPENC(2,0,C0, C3, 7),	0 },
   2942   { "dbgwcr4_el1",       CPENC(2,0,C0, C4, 7),	0 },
   2943   { "dbgwcr5_el1",       CPENC(2,0,C0, C5, 7),	0 },
   2944   { "dbgwcr6_el1",       CPENC(2,0,C0, C6, 7),	0 },
   2945   { "dbgwcr7_el1",       CPENC(2,0,C0, C7, 7),	0 },
   2946   { "dbgwcr8_el1",       CPENC(2,0,C0, C8, 7),	0 },
   2947   { "dbgwcr9_el1",       CPENC(2,0,C0, C9, 7),	0 },
   2948   { "dbgwcr10_el1",      CPENC(2,0,C0, C10,7),	0 },
   2949   { "dbgwcr11_el1",      CPENC(2,0,C0, C11,7),	0 },
   2950   { "dbgwcr12_el1",      CPENC(2,0,C0, C12,7),	0 },
   2951   { "dbgwcr13_el1",      CPENC(2,0,C0, C13,7),	0 },
   2952   { "dbgwcr14_el1",      CPENC(2,0,C0, C14,7),	0 },
   2953   { "dbgwcr15_el1",      CPENC(2,0,C0, C15,7),	0 },
   2954   { "mdrar_el1",         CPENC(2,0,C1, C0, 0),	0 },  /* r */
   2955   { "oslar_el1",         CPENC(2,0,C1, C0, 4),	0 },  /* w */
   2956   { "oslsr_el1",         CPENC(2,0,C1, C1, 4),	0 },  /* r */
   2957   { "osdlr_el1",         CPENC(2,0,C1, C3, 4),	0 },
   2958   { "dbgprcr_el1",       CPENC(2,0,C1, C4, 4),	0 },
   2959   { "dbgclaimset_el1",   CPENC(2,0,C7, C8, 6),	0 },
   2960   { "dbgclaimclr_el1",   CPENC(2,0,C7, C9, 6),	0 },
   2961   { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6),	0 },  /* r */
   2962 
   2963   { "pmcr_el0",          CPENC(3,3,C9,C12, 0),	0 },
   2964   { "pmcntenset_el0",    CPENC(3,3,C9,C12, 1),	0 },
   2965   { "pmcntenclr_el0",    CPENC(3,3,C9,C12, 2),	0 },
   2966   { "pmovsclr_el0",      CPENC(3,3,C9,C12, 3),	0 },
   2967   { "pmswinc_el0",       CPENC(3,3,C9,C12, 4),	0 },  /* w */
   2968   { "pmselr_el0",        CPENC(3,3,C9,C12, 5),	0 },
   2969   { "pmceid0_el0",       CPENC(3,3,C9,C12, 6),	0 },  /* r */
   2970   { "pmceid1_el0",       CPENC(3,3,C9,C12, 7),	0 },  /* r */
   2971   { "pmccntr_el0",       CPENC(3,3,C9,C13, 0),	0 },
   2972   { "pmxevtyper_el0",    CPENC(3,3,C9,C13, 1),	0 },
   2973   { "pmxevcntr_el0",     CPENC(3,3,C9,C13, 2),	0 },
   2974   { "pmuserenr_el0",     CPENC(3,3,C9,C14, 0),	0 },
   2975   { "pmintenset_el1",    CPENC(3,0,C9,C14, 1),	0 },
   2976   { "pmintenclr_el1",    CPENC(3,0,C9,C14, 2),	0 },
   2977   { "pmovsset_el0",      CPENC(3,3,C9,C14, 3),	0 },
   2978   { "pmevcntr0_el0",     CPENC(3,3,C14,C8, 0),	0 },
   2979   { "pmevcntr1_el0",     CPENC(3,3,C14,C8, 1),	0 },
   2980   { "pmevcntr2_el0",     CPENC(3,3,C14,C8, 2),	0 },
   2981   { "pmevcntr3_el0",     CPENC(3,3,C14,C8, 3),	0 },
   2982   { "pmevcntr4_el0",     CPENC(3,3,C14,C8, 4),	0 },
   2983   { "pmevcntr5_el0",     CPENC(3,3,C14,C8, 5),	0 },
   2984   { "pmevcntr6_el0",     CPENC(3,3,C14,C8, 6),	0 },
   2985   { "pmevcntr7_el0",     CPENC(3,3,C14,C8, 7),	0 },
   2986   { "pmevcntr8_el0",     CPENC(3,3,C14,C9, 0),	0 },
   2987   { "pmevcntr9_el0",     CPENC(3,3,C14,C9, 1),	0 },
   2988   { "pmevcntr10_el0",    CPENC(3,3,C14,C9, 2),	0 },
   2989   { "pmevcntr11_el0",    CPENC(3,3,C14,C9, 3),	0 },
   2990   { "pmevcntr12_el0",    CPENC(3,3,C14,C9, 4),	0 },
   2991   { "pmevcntr13_el0",    CPENC(3,3,C14,C9, 5),	0 },
   2992   { "pmevcntr14_el0",    CPENC(3,3,C14,C9, 6),	0 },
   2993   { "pmevcntr15_el0",    CPENC(3,3,C14,C9, 7),	0 },
   2994   { "pmevcntr16_el0",    CPENC(3,3,C14,C10,0),	0 },
   2995   { "pmevcntr17_el0",    CPENC(3,3,C14,C10,1),	0 },
   2996   { "pmevcntr18_el0",    CPENC(3,3,C14,C10,2),	0 },
   2997   { "pmevcntr19_el0",    CPENC(3,3,C14,C10,3),	0 },
   2998   { "pmevcntr20_el0",    CPENC(3,3,C14,C10,4),	0 },
   2999   { "pmevcntr21_el0",    CPENC(3,3,C14,C10,5),	0 },
   3000   { "pmevcntr22_el0",    CPENC(3,3,C14,C10,6),	0 },
   3001   { "pmevcntr23_el0",    CPENC(3,3,C14,C10,7),	0 },
   3002   { "pmevcntr24_el0",    CPENC(3,3,C14,C11,0),	0 },
   3003   { "pmevcntr25_el0",    CPENC(3,3,C14,C11,1),	0 },
   3004   { "pmevcntr26_el0",    CPENC(3,3,C14,C11,2),	0 },
   3005   { "pmevcntr27_el0",    CPENC(3,3,C14,C11,3),	0 },
   3006   { "pmevcntr28_el0",    CPENC(3,3,C14,C11,4),	0 },
   3007   { "pmevcntr29_el0",    CPENC(3,3,C14,C11,5),	0 },
   3008   { "pmevcntr30_el0",    CPENC(3,3,C14,C11,6),	0 },
   3009   { "pmevtyper0_el0",    CPENC(3,3,C14,C12,0),	0 },
   3010   { "pmevtyper1_el0",    CPENC(3,3,C14,C12,1),	0 },
   3011   { "pmevtyper2_el0",    CPENC(3,3,C14,C12,2),	0 },
   3012   { "pmevtyper3_el0",    CPENC(3,3,C14,C12,3),	0 },
   3013   { "pmevtyper4_el0",    CPENC(3,3,C14,C12,4),	0 },
   3014   { "pmevtyper5_el0",    CPENC(3,3,C14,C12,5),	0 },
   3015   { "pmevtyper6_el0",    CPENC(3,3,C14,C12,6),	0 },
   3016   { "pmevtyper7_el0",    CPENC(3,3,C14,C12,7),	0 },
   3017   { "pmevtyper8_el0",    CPENC(3,3,C14,C13,0),	0 },
   3018   { "pmevtyper9_el0",    CPENC(3,3,C14,C13,1),	0 },
   3019   { "pmevtyper10_el0",   CPENC(3,3,C14,C13,2),	0 },
   3020   { "pmevtyper11_el0",   CPENC(3,3,C14,C13,3),	0 },
   3021   { "pmevtyper12_el0",   CPENC(3,3,C14,C13,4),	0 },
   3022   { "pmevtyper13_el0",   CPENC(3,3,C14,C13,5),	0 },
   3023   { "pmevtyper14_el0",   CPENC(3,3,C14,C13,6),	0 },
   3024   { "pmevtyper15_el0",   CPENC(3,3,C14,C13,7),	0 },
   3025   { "pmevtyper16_el0",   CPENC(3,3,C14,C14,0),	0 },
   3026   { "pmevtyper17_el0",   CPENC(3,3,C14,C14,1),	0 },
   3027   { "pmevtyper18_el0",   CPENC(3,3,C14,C14,2),	0 },
   3028   { "pmevtyper19_el0",   CPENC(3,3,C14,C14,3),	0 },
   3029   { "pmevtyper20_el0",   CPENC(3,3,C14,C14,4),	0 },
   3030   { "pmevtyper21_el0",   CPENC(3,3,C14,C14,5),	0 },
   3031   { "pmevtyper22_el0",   CPENC(3,3,C14,C14,6),	0 },
   3032   { "pmevtyper23_el0",   CPENC(3,3,C14,C14,7),	0 },
   3033   { "pmevtyper24_el0",   CPENC(3,3,C14,C15,0),	0 },
   3034   { "pmevtyper25_el0",   CPENC(3,3,C14,C15,1),	0 },
   3035   { "pmevtyper26_el0",   CPENC(3,3,C14,C15,2),	0 },
   3036   { "pmevtyper27_el0",   CPENC(3,3,C14,C15,3),	0 },
   3037   { "pmevtyper28_el0",   CPENC(3,3,C14,C15,4),	0 },
   3038   { "pmevtyper29_el0",   CPENC(3,3,C14,C15,5),	0 },
   3039   { "pmevtyper30_el0",   CPENC(3,3,C14,C15,6),	0 },
   3040   { "pmccfiltr_el0",     CPENC(3,3,C14,C15,7),	0 },
   3041   { 0,          CPENC(0,0,0,0,0),	0 },
   3042 };
   3043 
   3044 bfd_boolean
   3045 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
   3046 {
   3047   return (reg->flags & F_DEPRECATED) != 0;
   3048 }
   3049 
   3050 const aarch64_sys_reg aarch64_pstatefields [] =
   3051 {
   3052   { "spsel",            0x05,	0 },
   3053   { "daifset",          0x1e,	0 },
   3054   { "daifclr",          0x1f,	0 },
   3055   { 0,          CPENC(0,0,0,0,0), 0 },
   3056 };
   3057 
   3058 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
   3059 {
   3060     { "ialluis", CPENS(0,C7,C1,0), 0 },
   3061     { "iallu",   CPENS(0,C7,C5,0), 0 },
   3062     { "ivau",    CPENS(3,C7,C5,1), 1 },
   3063     { 0, CPENS(0,0,0,0), 0 }
   3064 };
   3065 
   3066 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
   3067 {
   3068     { "zva",        CPENS(3,C7,C4,1),  1 },
   3069     { "ivac",       CPENS(0,C7,C6,1),  1 },
   3070     { "isw",        CPENS(0,C7,C6,2),  1 },
   3071     { "cvac",       CPENS(3,C7,C10,1), 1 },
   3072     { "csw",        CPENS(0,C7,C10,2), 1 },
   3073     { "cvau",       CPENS(3,C7,C11,1), 1 },
   3074     { "civac",      CPENS(3,C7,C14,1), 1 },
   3075     { "cisw",       CPENS(0,C7,C14,2), 1 },
   3076     { 0,       CPENS(0,0,0,0), 0 }
   3077 };
   3078 
   3079 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
   3080 {
   3081     { "s1e1r",      CPENS(0,C7,C8,0), 1 },
   3082     { "s1e1w",      CPENS(0,C7,C8,1), 1 },
   3083     { "s1e0r",      CPENS(0,C7,C8,2), 1 },
   3084     { "s1e0w",      CPENS(0,C7,C8,3), 1 },
   3085     { "s12e1r",     CPENS(4,C7,C8,4), 1 },
   3086     { "s12e1w",     CPENS(4,C7,C8,5), 1 },
   3087     { "s12e0r",     CPENS(4,C7,C8,6), 1 },
   3088     { "s12e0w",     CPENS(4,C7,C8,7), 1 },
   3089     { "s1e2r",      CPENS(4,C7,C8,0), 1 },
   3090     { "s1e2w",      CPENS(4,C7,C8,1), 1 },
   3091     { "s1e3r",      CPENS(6,C7,C8,0), 1 },
   3092     { "s1e3w",      CPENS(6,C7,C8,1), 1 },
   3093     { 0,       CPENS(0,0,0,0), 0 }
   3094 };
   3095 
   3096 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
   3097 {
   3098     { "vmalle1",   CPENS(0,C8,C7,0), 0 },
   3099     { "vae1",      CPENS(0,C8,C7,1), 1 },
   3100     { "aside1",    CPENS(0,C8,C7,2), 1 },
   3101     { "vaae1",     CPENS(0,C8,C7,3), 1 },
   3102     { "vmalle1is", CPENS(0,C8,C3,0), 0 },
   3103     { "vae1is",    CPENS(0,C8,C3,1), 1 },
   3104     { "aside1is",  CPENS(0,C8,C3,2), 1 },
   3105     { "vaae1is",   CPENS(0,C8,C3,3), 1 },
   3106     { "ipas2e1is", CPENS(4,C8,C0,1), 1 },
   3107     { "ipas2le1is",CPENS(4,C8,C0,5), 1 },
   3108     { "ipas2e1",   CPENS(4,C8,C4,1), 1 },
   3109     { "ipas2le1",  CPENS(4,C8,C4,5), 1 },
   3110     { "vae2",      CPENS(4,C8,C7,1), 1 },
   3111     { "vae2is",    CPENS(4,C8,C3,1), 1 },
   3112     { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
   3113     { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
   3114     { "vae3",      CPENS(6,C8,C7,1), 1 },
   3115     { "vae3is",    CPENS(6,C8,C3,1), 1 },
   3116     { "alle2",     CPENS(4,C8,C7,0), 0 },
   3117     { "alle2is",   CPENS(4,C8,C3,0), 0 },
   3118     { "alle1",     CPENS(4,C8,C7,4), 0 },
   3119     { "alle1is",   CPENS(4,C8,C3,4), 0 },
   3120     { "alle3",     CPENS(6,C8,C7,0), 0 },
   3121     { "alle3is",   CPENS(6,C8,C3,0), 0 },
   3122     { "vale1is",   CPENS(0,C8,C3,5), 1 },
   3123     { "vale2is",   CPENS(4,C8,C3,5), 1 },
   3124     { "vale3is",   CPENS(6,C8,C3,5), 1 },
   3125     { "vaale1is",  CPENS(0,C8,C3,7), 1 },
   3126     { "vale1",     CPENS(0,C8,C7,5), 1 },
   3127     { "vale2",     CPENS(4,C8,C7,5), 1 },
   3128     { "vale3",     CPENS(6,C8,C7,5), 1 },
   3129     { "vaale1",    CPENS(0,C8,C7,7), 1 },
   3130     { 0,       CPENS(0,0,0,0), 0 }
   3131 };
   3132 
   3133 #undef C0
   3134 #undef C1
   3135 #undef C2
   3136 #undef C3
   3137 #undef C4
   3138 #undef C5
   3139 #undef C6
   3140 #undef C7
   3141 #undef C8
   3142 #undef C9
   3143 #undef C10
   3144 #undef C11
   3145 #undef C12
   3146 #undef C13
   3147 #undef C14
   3148 #undef C15
   3149 
   3150 /* Include the opcode description table as well as the operand description
   3151    table.  */
   3152 #include "aarch64-tbl.h"
   3153