Home | History | Annotate | Download | only in opcodes
      1 /* aarch64-asm.c -- AArch64 assembler support.
      2    Copyright (C) 2012-2016 Free Software Foundation, Inc.
      3    Contributed by ARM Ltd.
      4 
      5    This file is part of the GNU opcodes library.
      6 
      7    This library is free software; you can redistribute it and/or modify
      8    it under the terms of the GNU General Public License as published by
      9    the Free Software Foundation; either version 3, or (at your option)
     10    any later version.
     11 
     12    It is distributed in the hope that it will be useful, but WITHOUT
     13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
     15    License for more details.
     16 
     17    You should have received a copy of the GNU General Public License
     18    along with this program; see the file COPYING3. If not,
     19    see <http://www.gnu.org/licenses/>.  */
     20 
     21 #include "sysdep.h"
     22 #include <stdarg.h>
     23 #include "aarch64-asm.h"
     24 
     25 /* Utilities.  */
     26 
     27 /* The unnamed arguments consist of the number of fields and information about
     28    these fields where the VALUE will be inserted into CODE.  MASK can be zero or
     29    the base mask of the opcode.
     30 
     31    N.B. the fields are required to be in such an order than the least signficant
     32    field for VALUE comes the first, e.g. the <index> in
     33     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
     34    is encoded in H:L:M in some cases, the fields H:L:M should be passed in
     35    the order of M, L, H.  */
     36 
     37 static inline void
     38 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
     39 {
     40   uint32_t num;
     41   const aarch64_field *field;
     42   enum aarch64_field_kind kind;
     43   va_list va;
     44 
     45   va_start (va, mask);
     46   num = va_arg (va, uint32_t);
     47   assert (num <= 5);
     48   while (num--)
     49     {
     50       kind = va_arg (va, enum aarch64_field_kind);
     51       field = &fields[kind];
     52       insert_field (kind, code, value, mask);
     53       value >>= field->width;
     54     }
     55   va_end (va);
     56 }
     57 
     58 /* Operand inserters.  */
     59 
     60 /* Insert register number.  */
     61 const char *
     62 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
     63 		   aarch64_insn *code,
     64 		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
     65 {
     66   insert_field (self->fields[0], code, info->reg.regno, 0);
     67   return NULL;
     68 }
     69 
     70 /* Insert register number, index and/or other data for SIMD register element
     71    operand, e.g. the last source operand in
     72      SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
     73 const char *
     74 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
     75 		     aarch64_insn *code, const aarch64_inst *inst)
     76 {
     77   /* regno */
     78   insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
     79   /* index and/or type */
     80   if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
     81     {
     82       int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
     83       if (info->type == AARCH64_OPND_En
     84 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
     85 	{
     86 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
     87 	  assert (info->idx == 1);	/* Vn */
     88 	  aarch64_insn value = info->reglane.index << pos;
     89 	  insert_field (FLD_imm4, code, value, 0);
     90 	}
     91       else
     92 	{
     93 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
     94 	     imm5<3:0>	<V>
     95 	     0000	RESERVED
     96 	     xxx1	B
     97 	     xx10	H
     98 	     x100	S
     99 	     1000	D  */
    100 	  aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
    101 	  insert_field (FLD_imm5, code, value, 0);
    102 	}
    103     }
    104   else
    105     {
    106       /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
    107          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
    108       switch (info->qualifier)
    109 	{
    110 	case AARCH64_OPND_QLF_S_H:
    111 	  /* H:L:M */
    112 	  insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
    113 	  break;
    114 	case AARCH64_OPND_QLF_S_S:
    115 	  /* H:L */
    116 	  insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
    117 	  break;
    118 	case AARCH64_OPND_QLF_S_D:
    119 	  /* H */
    120 	  insert_field (FLD_H, code, info->reglane.index, 0);
    121 	  break;
    122 	default:
    123 	  assert (0);
    124 	}
    125     }
    126   return NULL;
    127 }
    128 
    129 /* Insert regno and len field of a register list operand, e.g. Vn in TBL.  */
    130 const char *
    131 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
    132 		     aarch64_insn *code,
    133 		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
    134 {
    135   /* R */
    136   insert_field (self->fields[0], code, info->reglist.first_regno, 0);
    137   /* len */
    138   insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
    139   return NULL;
    140 }
    141 
    142 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
    143    in AdvSIMD load/store instructions.  */
    144 const char *
    145 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
    146 			  const aarch64_opnd_info *info, aarch64_insn *code,
    147 			  const aarch64_inst *inst)
    148 {
    149   aarch64_insn value = 0;
    150   /* Number of elements in each structure to be loaded/stored.  */
    151   unsigned num = get_opcode_dependent_value (inst->opcode);
    152 
    153   /* Rt */
    154   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
    155   /* opcode */
    156   switch (num)
    157     {
    158     case 1:
    159       switch (info->reglist.num_regs)
    160 	{
    161 	case 1: value = 0x7; break;
    162 	case 2: value = 0xa; break;
    163 	case 3: value = 0x6; break;
    164 	case 4: value = 0x2; break;
    165 	default: assert (0);
    166 	}
    167       break;
    168     case 2:
    169       value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
    170       break;
    171     case 3:
    172       value = 0x4;
    173       break;
    174     case 4:
    175       value = 0x0;
    176       break;
    177     default:
    178       assert (0);
    179     }
    180   insert_field (FLD_opcode, code, value, 0);
    181 
    182   return NULL;
    183 }
    184 
    185 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
    186    single structure to all lanes instructions.  */
    187 const char *
    188 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
    189 			    const aarch64_opnd_info *info, aarch64_insn *code,
    190 			    const aarch64_inst *inst)
    191 {
    192   aarch64_insn value;
    193   /* The opcode dependent area stores the number of elements in
    194      each structure to be loaded/stored.  */
    195   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
    196 
    197   /* Rt */
    198   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
    199   /* S */
    200   value = (aarch64_insn) 0;
    201   if (is_ld1r && info->reglist.num_regs == 2)
    202     /* OP_LD1R does not have alternating variant, but have "two consecutive"
    203        instead.  */
    204     value = (aarch64_insn) 1;
    205   insert_field (FLD_S, code, value, 0);
    206 
    207   return NULL;
    208 }
    209 
    210 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
    211    operand e.g. Vt in AdvSIMD load/store single element instructions.  */
    212 const char *
    213 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
    214 			   const aarch64_opnd_info *info, aarch64_insn *code,
    215 			   const aarch64_inst *inst ATTRIBUTE_UNUSED)
    216 {
    217   aarch64_field field = {0, 0};
    218   aarch64_insn QSsize = 0;	/* fields Q:S:size.  */
    219   aarch64_insn opcodeh2 = 0;	/* opcode<2:1> */
    220 
    221   assert (info->reglist.has_index);
    222 
    223   /* Rt */
    224   insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
    225   /* Encode the index, opcode<2:1> and size.  */
    226   switch (info->qualifier)
    227     {
    228     case AARCH64_OPND_QLF_S_B:
    229       /* Index encoded in "Q:S:size".  */
    230       QSsize = info->reglist.index;
    231       opcodeh2 = 0x0;
    232       break;
    233     case AARCH64_OPND_QLF_S_H:
    234       /* Index encoded in "Q:S:size<1>".  */
    235       QSsize = info->reglist.index << 1;
    236       opcodeh2 = 0x1;
    237       break;
    238     case AARCH64_OPND_QLF_S_S:
    239       /* Index encoded in "Q:S".  */
    240       QSsize = info->reglist.index << 2;
    241       opcodeh2 = 0x2;
    242       break;
    243     case AARCH64_OPND_QLF_S_D:
    244       /* Index encoded in "Q".  */
    245       QSsize = info->reglist.index << 3 | 0x1;
    246       opcodeh2 = 0x2;
    247       break;
    248     default:
    249       assert (0);
    250     }
    251   insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
    252   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
    253   insert_field_2 (&field, code, opcodeh2, 0);
    254 
    255   return NULL;
    256 }
    257 
    258 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
    259    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
    260    or SSHR <V><d>, <V><n>, #<shift>.  */
    261 const char *
    262 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
    263 			       const aarch64_opnd_info *info,
    264 			       aarch64_insn *code, const aarch64_inst *inst)
    265 {
    266   unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
    267   aarch64_insn Q, imm;
    268 
    269   if (inst->opcode->iclass == asimdshf)
    270     {
    271       /* Q
    272 	 immh	Q	<T>
    273 	 0000	x	SEE AdvSIMD modified immediate
    274 	 0001	0	8B
    275 	 0001	1	16B
    276 	 001x	0	4H
    277 	 001x	1	8H
    278 	 01xx	0	2S
    279 	 01xx	1	4S
    280 	 1xxx	0	RESERVED
    281 	 1xxx	1	2D  */
    282       Q = (val & 0x1) ? 1 : 0;
    283       insert_field (FLD_Q, code, Q, inst->opcode->mask);
    284       val >>= 1;
    285     }
    286 
    287   assert (info->type == AARCH64_OPND_IMM_VLSR
    288 	  || info->type == AARCH64_OPND_IMM_VLSL);
    289 
    290   if (info->type == AARCH64_OPND_IMM_VLSR)
    291     /* immh:immb
    292        immh	<shift>
    293        0000	SEE AdvSIMD modified immediate
    294        0001	(16-UInt(immh:immb))
    295        001x	(32-UInt(immh:immb))
    296        01xx	(64-UInt(immh:immb))
    297        1xxx	(128-UInt(immh:immb))  */
    298     imm = (16 << (unsigned)val) - info->imm.value;
    299   else
    300     /* immh:immb
    301        immh	<shift>
    302        0000	SEE AdvSIMD modified immediate
    303        0001	(UInt(immh:immb)-8)
    304        001x	(UInt(immh:immb)-16)
    305        01xx	(UInt(immh:immb)-32)
    306        1xxx	(UInt(immh:immb)-64)  */
    307     imm = info->imm.value + (8 << (unsigned)val);
    308   insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
    309 
    310   return NULL;
    311 }
    312 
    313 /* Insert fields for e.g. the immediate operands in
    314    BFM <Wd>, <Wn>, #<immr>, #<imms>.  */
    315 const char *
    316 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
    317 		 aarch64_insn *code,
    318 		 const aarch64_inst *inst ATTRIBUTE_UNUSED)
    319 {
    320   int64_t imm;
    321   /* Maximum of two fields to insert.  */
    322   assert (self->fields[2] == FLD_NIL);
    323 
    324   imm = info->imm.value;
    325   if (operand_need_shift_by_two (self))
    326     imm >>= 2;
    327   if (self->fields[1] == FLD_NIL)
    328     insert_field (self->fields[0], code, imm, 0);
    329   else
    330     /* e.g. TBZ b5:b40.  */
    331     insert_fields (code, imm, 0, 2, self->fields[1], self->fields[0]);
    332   return NULL;
    333 }
    334 
    335 /* Insert immediate and its shift amount for e.g. the last operand in
    336      MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
    337 const char *
    338 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
    339 		      aarch64_insn *code, const aarch64_inst *inst)
    340 {
    341   /* imm16 */
    342   aarch64_ins_imm (self, info, code, inst);
    343   /* hw */
    344   insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
    345   return NULL;
    346 }
    347 
    348 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
    349      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
    350 const char *
    351 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
    352 				  const aarch64_opnd_info *info,
    353 				  aarch64_insn *code,
    354 				  const aarch64_inst *inst ATTRIBUTE_UNUSED)
    355 {
    356   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
    357   uint64_t imm = info->imm.value;
    358   enum aarch64_modifier_kind kind = info->shifter.kind;
    359   int amount = info->shifter.amount;
    360   aarch64_field field = {0, 0};
    361 
    362   /* a:b:c:d:e:f:g:h */
    363   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
    364     {
    365       /* Either MOVI <Dd>, #<imm>
    366 	 or     MOVI <Vd>.2D, #<imm>.
    367 	 <imm> is a 64-bit immediate
    368 	 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
    369 	 encoded in "a:b:c:d:e:f:g:h".	*/
    370       imm = aarch64_shrink_expanded_imm8 (imm);
    371       assert ((int)imm >= 0);
    372     }
    373   insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
    374 
    375   if (kind == AARCH64_MOD_NONE)
    376     return NULL;
    377 
    378   /* shift amount partially in cmode */
    379   assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
    380   if (kind == AARCH64_MOD_LSL)
    381     {
    382       /* AARCH64_MOD_LSL: shift zeros.  */
    383       int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
    384       assert (esize == 4 || esize == 2 || esize == 1);
    385       /* For 8-bit move immediate, the optional LSL #0 does not require
    386 	 encoding.  */
    387       if (esize == 1)
    388 	return NULL;
    389       amount >>= 3;
    390       if (esize == 4)
    391 	gen_sub_field (FLD_cmode, 1, 2, &field);	/* per word */
    392       else
    393 	gen_sub_field (FLD_cmode, 1, 1, &field);	/* per halfword */
    394     }
    395   else
    396     {
    397       /* AARCH64_MOD_MSL: shift ones.  */
    398       amount >>= 4;
    399       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
    400     }
    401   insert_field_2 (&field, code, amount, 0);
    402 
    403   return NULL;
    404 }
    405 
    406 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
    407    e.g.  SCVTF <Dd>, <Wn>, #<fbits>.  */
    408 const char *
    409 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
    410 		   aarch64_insn *code,
    411 		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
    412 {
    413   insert_field (self->fields[0], code, 64 - info->imm.value, 0);
    414   return NULL;
    415 }
    416 
    417 /* Insert arithmetic immediate for e.g. the last operand in
    418      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
    419 const char *
    420 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
    421 		  aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
    422 {
    423   /* shift */
    424   aarch64_insn value = info->shifter.amount ? 1 : 0;
    425   insert_field (self->fields[0], code, value, 0);
    426   /* imm12 (unsigned) */
    427   insert_field (self->fields[1], code, info->imm.value, 0);
    428   return NULL;
    429 }
    430 
    431 /* Insert logical/bitmask immediate for e.g. the last operand in
    432      ORR <Wd|WSP>, <Wn>, #<imm>.  */
    433 const char *
    434 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
    435 		  aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
    436 {
    437   aarch64_insn value;
    438   uint64_t imm = info->imm.value;
    439   int is32 = aarch64_get_qualifier_esize (inst->operands[0].qualifier) == 4;
    440 
    441   if (inst->opcode->op == OP_BIC)
    442     imm = ~imm;
    443   if (aarch64_logical_immediate_p (imm, is32, &value) == FALSE)
    444     /* The constraint check should have guaranteed this wouldn't happen.  */
    445     assert (0);
    446 
    447   insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
    448 		 self->fields[0]);
    449   return NULL;
    450 }
    451 
    452 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
    453    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
    454 const char *
    455 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
    456 		aarch64_insn *code, const aarch64_inst *inst)
    457 {
    458   aarch64_insn value = 0;
    459 
    460   assert (info->idx == 0);
    461 
    462   /* Rt */
    463   aarch64_ins_regno (self, info, code, inst);
    464   if (inst->opcode->iclass == ldstpair_indexed
    465       || inst->opcode->iclass == ldstnapair_offs
    466       || inst->opcode->iclass == ldstpair_off
    467       || inst->opcode->iclass == loadlit)
    468     {
    469       /* size */
    470       switch (info->qualifier)
    471 	{
    472 	case AARCH64_OPND_QLF_S_S: value = 0; break;
    473 	case AARCH64_OPND_QLF_S_D: value = 1; break;
    474 	case AARCH64_OPND_QLF_S_Q: value = 2; break;
    475 	default: assert (0);
    476 	}
    477       insert_field (FLD_ldst_size, code, value, 0);
    478     }
    479   else
    480     {
    481       /* opc[1]:size */
    482       value = aarch64_get_qualifier_standard_value (info->qualifier);
    483       insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
    484     }
    485 
    486   return NULL;
    487 }
    488 
    489 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
    490 const char *
    491 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
    492 			 const aarch64_opnd_info *info, aarch64_insn *code,
    493 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
    494 {
    495   /* Rn */
    496   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
    497   return NULL;
    498 }
    499 
    500 /* Encode the address operand for e.g.
    501      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
    502 const char *
    503 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
    504 			 const aarch64_opnd_info *info, aarch64_insn *code,
    505 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
    506 {
    507   aarch64_insn S;
    508   enum aarch64_modifier_kind kind = info->shifter.kind;
    509 
    510   /* Rn */
    511   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
    512   /* Rm */
    513   insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
    514   /* option */
    515   if (kind == AARCH64_MOD_LSL)
    516     kind = AARCH64_MOD_UXTX;	/* Trick to enable the table-driven.  */
    517   insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
    518   /* S */
    519   if (info->qualifier != AARCH64_OPND_QLF_S_B)
    520     S = info->shifter.amount != 0;
    521   else
    522     /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
    523        S	<amount>
    524        0	[absent]
    525        1	#0
    526        Must be #0 if <extend> is explicitly LSL.  */
    527     S = info->shifter.operator_present && info->shifter.amount_present;
    528   insert_field (FLD_S, code, S, 0);
    529 
    530   return NULL;
    531 }
    532 
    533 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!.  */
    534 const char *
    535 aarch64_ins_addr_simm (const aarch64_operand *self,
    536 		       const aarch64_opnd_info *info,
    537 		       aarch64_insn *code,
    538 		       const aarch64_inst *inst ATTRIBUTE_UNUSED)
    539 {
    540   int imm;
    541 
    542   /* Rn */
    543   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
    544   /* simm (imm9 or imm7) */
    545   imm = info->addr.offset.imm;
    546   if (self->fields[0] == FLD_imm7)
    547     /* scaled immediate in ld/st pair instructions..  */
    548     imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
    549   insert_field (self->fields[0], code, imm, 0);
    550   /* pre/post- index */
    551   if (info->addr.writeback)
    552     {
    553       assert (inst->opcode->iclass != ldst_unscaled
    554 	      && inst->opcode->iclass != ldstnapair_offs
    555 	      && inst->opcode->iclass != ldstpair_off
    556 	      && inst->opcode->iclass != ldst_unpriv);
    557       assert (info->addr.preind != info->addr.postind);
    558       if (info->addr.preind)
    559 	insert_field (self->fields[1], code, 1, 0);
    560     }
    561 
    562   return NULL;
    563 }
    564 
    565 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}].  */
    566 const char *
    567 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
    568 			 const aarch64_opnd_info *info,
    569 			 aarch64_insn *code,
    570 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
    571 {
    572   int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
    573 
    574   /* Rn */
    575   insert_field (self->fields[0], code, info->addr.base_regno, 0);
    576   /* uimm12 */
    577   insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
    578   return NULL;
    579 }
    580 
    581 /* Encode the address operand for e.g.
    582      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
    583 const char *
    584 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
    585 			    const aarch64_opnd_info *info, aarch64_insn *code,
    586 			    const aarch64_inst *inst ATTRIBUTE_UNUSED)
    587 {
    588   /* Rn */
    589   insert_field (FLD_Rn, code, info->addr.base_regno, 0);
    590   /* Rm | #<amount>  */
    591   if (info->addr.offset.is_reg)
    592     insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
    593   else
    594     insert_field (FLD_Rm, code, 0x1f, 0);
    595   return NULL;
    596 }
    597 
    598 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
    599 const char *
    600 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
    601 		  const aarch64_opnd_info *info, aarch64_insn *code,
    602 		  const aarch64_inst *inst ATTRIBUTE_UNUSED)
    603 {
    604   /* cond */
    605   insert_field (FLD_cond, code, info->cond->value, 0);
    606   return NULL;
    607 }
    608 
    609 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
    610 const char *
    611 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
    612 		    const aarch64_opnd_info *info, aarch64_insn *code,
    613 		    const aarch64_inst *inst ATTRIBUTE_UNUSED)
    614 {
    615   /* op0:op1:CRn:CRm:op2 */
    616   insert_fields (code, info->sysreg, inst->opcode->mask, 5,
    617 		 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
    618   return NULL;
    619 }
    620 
    621 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
    622 const char *
    623 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
    624 			 const aarch64_opnd_info *info, aarch64_insn *code,
    625 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
    626 {
    627   /* op1:op2 */
    628   insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
    629 		 FLD_op2, FLD_op1);
    630   return NULL;
    631 }
    632 
    633 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
    634 const char *
    635 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
    636 		       const aarch64_opnd_info *info, aarch64_insn *code,
    637 		       const aarch64_inst *inst ATTRIBUTE_UNUSED)
    638 {
    639   /* op1:CRn:CRm:op2 */
    640   insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
    641 		 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
    642   return NULL;
    643 }
    644 
    645 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
    646 
    647 const char *
    648 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
    649 		     const aarch64_opnd_info *info, aarch64_insn *code,
    650 		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
    651 {
    652   /* CRm */
    653   insert_field (FLD_CRm, code, info->barrier->value, 0);
    654   return NULL;
    655 }
    656 
    657 /* Encode the prefetch operation option operand for e.g.
    658      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
    659 
    660 const char *
    661 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
    662 		   const aarch64_opnd_info *info, aarch64_insn *code,
    663 		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
    664 {
    665   /* prfop in Rt */
    666   insert_field (FLD_Rt, code, info->prfop->value, 0);
    667   return NULL;
    668 }
    669 
    670 /* Encode the hint number for instructions that alias HINT but take an
    671    operand.  */
    672 
    673 const char *
    674 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
    675 		  const aarch64_opnd_info *info, aarch64_insn *code,
    676 		  const aarch64_inst *inst ATTRIBUTE_UNUSED)
    677 {
    678   /* CRm:op2.  */
    679   insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
    680   return NULL;
    681 }
    682 
    683 /* Encode the extended register operand for e.g.
    684      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
    685 const char *
    686 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
    687 			  const aarch64_opnd_info *info, aarch64_insn *code,
    688 			  const aarch64_inst *inst ATTRIBUTE_UNUSED)
    689 {
    690   enum aarch64_modifier_kind kind;
    691 
    692   /* Rm */
    693   insert_field (FLD_Rm, code, info->reg.regno, 0);
    694   /* option */
    695   kind = info->shifter.kind;
    696   if (kind == AARCH64_MOD_LSL)
    697     kind = info->qualifier == AARCH64_OPND_QLF_W
    698       ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
    699   insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
    700   /* imm3 */
    701   insert_field (FLD_imm3, code, info->shifter.amount, 0);
    702 
    703   return NULL;
    704 }
    705 
    706 /* Encode the shifted register operand for e.g.
    707      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
    708 const char *
    709 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
    710 			 const aarch64_opnd_info *info, aarch64_insn *code,
    711 			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
    712 {
    713   /* Rm */
    714   insert_field (FLD_Rm, code, info->reg.regno, 0);
    715   /* shift */
    716   insert_field (FLD_shift, code,
    717 		aarch64_get_operand_modifier_value (info->shifter.kind), 0);
    718   /* imm6 */
    719   insert_field (FLD_imm6, code, info->shifter.amount, 0);
    720 
    721   return NULL;
    722 }
    723 
    724 /* Miscellaneous encoding functions.  */
    725 
    726 /* Encode size[0], i.e. bit 22, for
    727      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
    728 
    729 static void
    730 encode_asimd_fcvt (aarch64_inst *inst)
    731 {
    732   aarch64_insn value;
    733   aarch64_field field = {0, 0};
    734   enum aarch64_opnd_qualifier qualifier;
    735 
    736   switch (inst->opcode->op)
    737     {
    738     case OP_FCVTN:
    739     case OP_FCVTN2:
    740       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
    741       qualifier = inst->operands[1].qualifier;
    742       break;
    743     case OP_FCVTL:
    744     case OP_FCVTL2:
    745       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
    746       qualifier = inst->operands[0].qualifier;
    747       break;
    748     default:
    749       assert (0);
    750     }
    751   assert (qualifier == AARCH64_OPND_QLF_V_4S
    752 	  || qualifier == AARCH64_OPND_QLF_V_2D);
    753   value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
    754   gen_sub_field (FLD_size, 0, 1, &field);
    755   insert_field_2 (&field, &inst->value, value, 0);
    756 }
    757 
    758 /* Encode size[0], i.e. bit 22, for
    759      e.g. FCVTXN <Vb><d>, <Va><n>.  */
    760 
    761 static void
    762 encode_asisd_fcvtxn (aarch64_inst *inst)
    763 {
    764   aarch64_insn val = 1;
    765   aarch64_field field = {0, 0};
    766   assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
    767   gen_sub_field (FLD_size, 0, 1, &field);
    768   insert_field_2 (&field, &inst->value, val, 0);
    769 }
    770 
    771 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
    772 static void
    773 encode_fcvt (aarch64_inst *inst)
    774 {
    775   aarch64_insn val;
    776   const aarch64_field field = {15, 2};
    777 
    778   /* opc dstsize */
    779   switch (inst->operands[0].qualifier)
    780     {
    781     case AARCH64_OPND_QLF_S_S: val = 0; break;
    782     case AARCH64_OPND_QLF_S_D: val = 1; break;
    783     case AARCH64_OPND_QLF_S_H: val = 3; break;
    784     default: abort ();
    785     }
    786   insert_field_2 (&field, &inst->value, val, 0);
    787 
    788   return;
    789 }
    790 
    791 /* Do miscellaneous encodings that are not common enough to be driven by
    792    flags.  */
    793 
    794 static void
    795 do_misc_encoding (aarch64_inst *inst)
    796 {
    797   switch (inst->opcode->op)
    798     {
    799     case OP_FCVT:
    800       encode_fcvt (inst);
    801       break;
    802     case OP_FCVTN:
    803     case OP_FCVTN2:
    804     case OP_FCVTL:
    805     case OP_FCVTL2:
    806       encode_asimd_fcvt (inst);
    807       break;
    808     case OP_FCVTXN_S:
    809       encode_asisd_fcvtxn (inst);
    810       break;
    811     default: break;
    812     }
    813 }
    814 
    815 /* Encode the 'size' and 'Q' field for e.g. SHADD.  */
    816 static void
    817 encode_sizeq (aarch64_inst *inst)
    818 {
    819   aarch64_insn sizeq;
    820   enum aarch64_field_kind kind;
    821   int idx;
    822 
    823   /* Get the index of the operand whose information we are going to use
    824      to encode the size and Q fields.
    825      This is deduced from the possible valid qualifier lists.  */
    826   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
    827   DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
    828 	       aarch64_get_qualifier_name (inst->operands[idx].qualifier));
    829   sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
    830   /* Q */
    831   insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
    832   /* size */
    833   if (inst->opcode->iclass == asisdlse
    834      || inst->opcode->iclass == asisdlsep
    835      || inst->opcode->iclass == asisdlso
    836      || inst->opcode->iclass == asisdlsop)
    837     kind = FLD_vldst_size;
    838   else
    839     kind = FLD_size;
    840   insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
    841 }
    842 
    843 /* Opcodes that have fields shared by multiple operands are usually flagged
    844    with flags.  In this function, we detect such flags and use the
    845    information in one of the related operands to do the encoding.  The 'one'
    846    operand is not any operand but one of the operands that has the enough
    847    information for such an encoding.  */
    848 
    849 static void
    850 do_special_encoding (struct aarch64_inst *inst)
    851 {
    852   int idx;
    853   aarch64_insn value = 0;
    854 
    855   DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
    856 
    857   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
    858   if (inst->opcode->flags & F_COND)
    859     {
    860       insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
    861     }
    862   if (inst->opcode->flags & F_SF)
    863     {
    864       idx = select_operand_for_sf_field_coding (inst->opcode);
    865       value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
    866 	       || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
    867 	? 1 : 0;
    868       insert_field (FLD_sf, &inst->value, value, 0);
    869       if (inst->opcode->flags & F_N)
    870 	insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
    871     }
    872   if (inst->opcode->flags & F_LSE_SZ)
    873     {
    874       idx = select_operand_for_sf_field_coding (inst->opcode);
    875       value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
    876 	       || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
    877 	? 1 : 0;
    878       insert_field (FLD_lse_sz, &inst->value, value, 0);
    879     }
    880   if (inst->opcode->flags & F_SIZEQ)
    881     encode_sizeq (inst);
    882   if (inst->opcode->flags & F_FPTYPE)
    883     {
    884       idx = select_operand_for_fptype_field_coding (inst->opcode);
    885       switch (inst->operands[idx].qualifier)
    886 	{
    887 	case AARCH64_OPND_QLF_S_S: value = 0; break;
    888 	case AARCH64_OPND_QLF_S_D: value = 1; break;
    889 	case AARCH64_OPND_QLF_S_H: value = 3; break;
    890 	default: assert (0);
    891 	}
    892       insert_field (FLD_type, &inst->value, value, 0);
    893     }
    894   if (inst->opcode->flags & F_SSIZE)
    895     {
    896       enum aarch64_opnd_qualifier qualifier;
    897       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
    898       qualifier = inst->operands[idx].qualifier;
    899       assert (qualifier >= AARCH64_OPND_QLF_S_B
    900 	      && qualifier <= AARCH64_OPND_QLF_S_Q);
    901       value = aarch64_get_qualifier_standard_value (qualifier);
    902       insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
    903     }
    904   if (inst->opcode->flags & F_T)
    905     {
    906       int num;	/* num of consecutive '0's on the right side of imm5<3:0>.  */
    907       aarch64_field field = {0, 0};
    908       enum aarch64_opnd_qualifier qualifier;
    909 
    910       idx = 0;
    911       qualifier = inst->operands[idx].qualifier;
    912       assert (aarch64_get_operand_class (inst->opcode->operands[0])
    913 	      == AARCH64_OPND_CLASS_SIMD_REG
    914 	      && qualifier >= AARCH64_OPND_QLF_V_8B
    915 	      && qualifier <= AARCH64_OPND_QLF_V_2D);
    916       /* imm5<3:0>	q	<t>
    917 	 0000		x	reserved
    918 	 xxx1		0	8b
    919 	 xxx1		1	16b
    920 	 xx10		0	4h
    921 	 xx10		1	8h
    922 	 x100		0	2s
    923 	 x100		1	4s
    924 	 1000		0	reserved
    925 	 1000		1	2d  */
    926       value = aarch64_get_qualifier_standard_value (qualifier);
    927       insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
    928       num = (int) value >> 1;
    929       assert (num >= 0 && num <= 3);
    930       gen_sub_field (FLD_imm5, 0, num + 1, &field);
    931       insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
    932     }
    933   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
    934     {
    935       /* Use Rt to encode in the case of e.g.
    936 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
    937       enum aarch64_opnd_qualifier qualifier;
    938       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
    939       if (idx == -1)
    940 	/* Otherwise use the result operand, which has to be a integer
    941 	   register.  */
    942 	idx = 0;
    943       assert (idx == 0 || idx == 1);
    944       assert (aarch64_get_operand_class (inst->opcode->operands[idx])
    945 	      == AARCH64_OPND_CLASS_INT_REG);
    946       qualifier = inst->operands[idx].qualifier;
    947       insert_field (FLD_Q, &inst->value,
    948 		    aarch64_get_qualifier_standard_value (qualifier), 0);
    949     }
    950   if (inst->opcode->flags & F_LDS_SIZE)
    951     {
    952       /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
    953       enum aarch64_opnd_qualifier qualifier;
    954       aarch64_field field = {0, 0};
    955       assert (aarch64_get_operand_class (inst->opcode->operands[0])
    956 	      == AARCH64_OPND_CLASS_INT_REG);
    957       gen_sub_field (FLD_opc, 0, 1, &field);
    958       qualifier = inst->operands[0].qualifier;
    959       insert_field_2 (&field, &inst->value,
    960 		      1 - aarch64_get_qualifier_standard_value (qualifier), 0);
    961     }
    962   /* Miscellaneous encoding as the last step.  */
    963   if (inst->opcode->flags & F_MISC)
    964     do_misc_encoding (inst);
    965 
    966   DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
    967 }
    968 
    969 /* Converters converting an alias opcode instruction to its real form.  */
    970 
    971 /* ROR <Wd>, <Ws>, #<shift>
    972      is equivalent to:
    973    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
    974 static void
    975 convert_ror_to_extr (aarch64_inst *inst)
    976 {
    977   copy_operand_info (inst, 3, 2);
    978   copy_operand_info (inst, 2, 1);
    979 }
    980 
    981 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
    982      is equivalent to:
    983    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
    984 static void
    985 convert_xtl_to_shll (aarch64_inst *inst)
    986 {
    987   inst->operands[2].qualifier = inst->operands[1].qualifier;
    988   inst->operands[2].imm.value = 0;
    989 }
    990 
    991 /* Convert
    992      LSR <Xd>, <Xn>, #<shift>
    993    to
    994      UBFM <Xd>, <Xn>, #<shift>, #63.  */
    995 static void
    996 convert_sr_to_bfm (aarch64_inst *inst)
    997 {
    998   inst->operands[3].imm.value =
    999     inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
   1000 }
   1001 
   1002 /* Convert MOV to ORR.  */
   1003 static void
   1004 convert_mov_to_orr (aarch64_inst *inst)
   1005 {
   1006   /* MOV <Vd>.<T>, <Vn>.<T>
   1007      is equivalent to:
   1008      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
   1009   copy_operand_info (inst, 2, 1);
   1010 }
   1011 
   1012 /* When <imms> >= <immr>, the instruction written:
   1013      SBFX <Xd>, <Xn>, #<lsb>, #<width>
   1014    is equivalent to:
   1015      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
   1016 
   1017 static void
   1018 convert_bfx_to_bfm (aarch64_inst *inst)
   1019 {
   1020   int64_t lsb, width;
   1021 
   1022   /* Convert the operand.  */
   1023   lsb = inst->operands[2].imm.value;
   1024   width = inst->operands[3].imm.value;
   1025   inst->operands[2].imm.value = lsb;
   1026   inst->operands[3].imm.value = lsb + width - 1;
   1027 }
   1028 
   1029 /* When <imms> < <immr>, the instruction written:
   1030      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
   1031    is equivalent to:
   1032      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
   1033 
   1034 static void
   1035 convert_bfi_to_bfm (aarch64_inst *inst)
   1036 {
   1037   int64_t lsb, width;
   1038 
   1039   /* Convert the operand.  */
   1040   lsb = inst->operands[2].imm.value;
   1041   width = inst->operands[3].imm.value;
   1042   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
   1043     {
   1044       inst->operands[2].imm.value = (32 - lsb) & 0x1f;
   1045       inst->operands[3].imm.value = width - 1;
   1046     }
   1047   else
   1048     {
   1049       inst->operands[2].imm.value = (64 - lsb) & 0x3f;
   1050       inst->operands[3].imm.value = width - 1;
   1051     }
   1052 }
   1053 
   1054 /* The instruction written:
   1055      BFC <Xd>, #<lsb>, #<width>
   1056    is equivalent to:
   1057      BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1).  */
   1058 
   1059 static void
   1060 convert_bfc_to_bfm (aarch64_inst *inst)
   1061 {
   1062   int64_t lsb, width;
   1063 
   1064   /* Insert XZR.  */
   1065   copy_operand_info (inst, 3, 2);
   1066   copy_operand_info (inst, 2, 1);
   1067   copy_operand_info (inst, 2, 0);
   1068   inst->operands[1].reg.regno = 0x1f;
   1069 
   1070   /* Convert the immedate operand.  */
   1071   lsb = inst->operands[2].imm.value;
   1072   width = inst->operands[3].imm.value;
   1073   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
   1074     {
   1075       inst->operands[2].imm.value = (32 - lsb) & 0x1f;
   1076       inst->operands[3].imm.value = width - 1;
   1077     }
   1078   else
   1079     {
   1080       inst->operands[2].imm.value = (64 - lsb) & 0x3f;
   1081       inst->operands[3].imm.value = width - 1;
   1082     }
   1083 }
   1084 
   1085 /* The instruction written:
   1086      LSL <Xd>, <Xn>, #<shift>
   1087    is equivalent to:
   1088      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
   1089 
   1090 static void
   1091 convert_lsl_to_ubfm (aarch64_inst *inst)
   1092 {
   1093   int64_t shift = inst->operands[2].imm.value;
   1094 
   1095   if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
   1096     {
   1097       inst->operands[2].imm.value = (32 - shift) & 0x1f;
   1098       inst->operands[3].imm.value = 31 - shift;
   1099     }
   1100   else
   1101     {
   1102       inst->operands[2].imm.value = (64 - shift) & 0x3f;
   1103       inst->operands[3].imm.value = 63 - shift;
   1104     }
   1105 }
   1106 
   1107 /* CINC <Wd>, <Wn>, <cond>
   1108      is equivalent to:
   1109    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>).  */
   1110 
   1111 static void
   1112 convert_to_csel (aarch64_inst *inst)
   1113 {
   1114   copy_operand_info (inst, 3, 2);
   1115   copy_operand_info (inst, 2, 1);
   1116   inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
   1117 }
   1118 
   1119 /* CSET <Wd>, <cond>
   1120      is equivalent to:
   1121    CSINC <Wd>, WZR, WZR, invert(<cond>).  */
   1122 
   1123 static void
   1124 convert_cset_to_csinc (aarch64_inst *inst)
   1125 {
   1126   copy_operand_info (inst, 3, 1);
   1127   copy_operand_info (inst, 2, 0);
   1128   copy_operand_info (inst, 1, 0);
   1129   inst->operands[1].reg.regno = 0x1f;
   1130   inst->operands[2].reg.regno = 0x1f;
   1131   inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
   1132 }
   1133 
   1134 /* MOV <Wd>, #<imm>
   1135    is equivalent to:
   1136    MOVZ <Wd>, #<imm16>, LSL #<shift>.  */
   1137 
   1138 static void
   1139 convert_mov_to_movewide (aarch64_inst *inst)
   1140 {
   1141   int is32;
   1142   uint32_t shift_amount;
   1143   uint64_t value;
   1144 
   1145   switch (inst->opcode->op)
   1146     {
   1147     case OP_MOV_IMM_WIDE:
   1148       value = inst->operands[1].imm.value;
   1149       break;
   1150     case OP_MOV_IMM_WIDEN:
   1151       value = ~inst->operands[1].imm.value;
   1152       break;
   1153     default:
   1154       assert (0);
   1155     }
   1156   inst->operands[1].type = AARCH64_OPND_HALF;
   1157   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
   1158   if (! aarch64_wide_constant_p (value, is32, &shift_amount))
   1159     /* The constraint check should have guaranteed this wouldn't happen.  */
   1160     assert (0);
   1161   value >>= shift_amount;
   1162   value &= 0xffff;
   1163   inst->operands[1].imm.value = value;
   1164   inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
   1165   inst->operands[1].shifter.amount = shift_amount;
   1166 }
   1167 
   1168 /* MOV <Wd>, #<imm>
   1169      is equivalent to:
   1170    ORR <Wd>, WZR, #<imm>.  */
   1171 
   1172 static void
   1173 convert_mov_to_movebitmask (aarch64_inst *inst)
   1174 {
   1175   copy_operand_info (inst, 2, 1);
   1176   inst->operands[1].reg.regno = 0x1f;
   1177   inst->operands[1].skip = 0;
   1178 }
   1179 
   1180 /* Some alias opcodes are assembled by being converted to their real-form.  */
   1181 
   1182 static void
   1183 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
   1184 {
   1185   const aarch64_opcode *alias = inst->opcode;
   1186 
   1187   if ((alias->flags & F_CONV) == 0)
   1188     goto convert_to_real_return;
   1189 
   1190   switch (alias->op)
   1191     {
   1192     case OP_ASR_IMM:
   1193     case OP_LSR_IMM:
   1194       convert_sr_to_bfm (inst);
   1195       break;
   1196     case OP_LSL_IMM:
   1197       convert_lsl_to_ubfm (inst);
   1198       break;
   1199     case OP_CINC:
   1200     case OP_CINV:
   1201     case OP_CNEG:
   1202       convert_to_csel (inst);
   1203       break;
   1204     case OP_CSET:
   1205     case OP_CSETM:
   1206       convert_cset_to_csinc (inst);
   1207       break;
   1208     case OP_UBFX:
   1209     case OP_BFXIL:
   1210     case OP_SBFX:
   1211       convert_bfx_to_bfm (inst);
   1212       break;
   1213     case OP_SBFIZ:
   1214     case OP_BFI:
   1215     case OP_UBFIZ:
   1216       convert_bfi_to_bfm (inst);
   1217       break;
   1218     case OP_BFC:
   1219       convert_bfc_to_bfm (inst);
   1220       break;
   1221     case OP_MOV_V:
   1222       convert_mov_to_orr (inst);
   1223       break;
   1224     case OP_MOV_IMM_WIDE:
   1225     case OP_MOV_IMM_WIDEN:
   1226       convert_mov_to_movewide (inst);
   1227       break;
   1228     case OP_MOV_IMM_LOG:
   1229       convert_mov_to_movebitmask (inst);
   1230       break;
   1231     case OP_ROR_IMM:
   1232       convert_ror_to_extr (inst);
   1233       break;
   1234     case OP_SXTL:
   1235     case OP_SXTL2:
   1236     case OP_UXTL:
   1237     case OP_UXTL2:
   1238       convert_xtl_to_shll (inst);
   1239       break;
   1240     default:
   1241       break;
   1242     }
   1243 
   1244 convert_to_real_return:
   1245   aarch64_replace_opcode (inst, real);
   1246 }
   1247 
   1248 /* Encode *INST_ORI of the opcode code OPCODE.
   1249    Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
   1250    matched operand qualifier sequence in *QLF_SEQ.  */
   1251 
   1252 int
   1253 aarch64_opcode_encode (const aarch64_opcode *opcode,
   1254 		       const aarch64_inst *inst_ori, aarch64_insn *code,
   1255 		       aarch64_opnd_qualifier_t *qlf_seq,
   1256 		       aarch64_operand_error *mismatch_detail)
   1257 {
   1258   int i;
   1259   const aarch64_opcode *aliased;
   1260   aarch64_inst copy, *inst;
   1261 
   1262   DEBUG_TRACE ("enter with %s", opcode->name);
   1263 
   1264   /* Create a copy of *INST_ORI, so that we can do any change we want.  */
   1265   copy = *inst_ori;
   1266   inst = &copy;
   1267 
   1268   assert (inst->opcode == NULL || inst->opcode == opcode);
   1269   if (inst->opcode == NULL)
   1270     inst->opcode = opcode;
   1271 
   1272   /* Constrain the operands.
   1273      After passing this, the encoding is guaranteed to succeed.  */
   1274   if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
   1275     {
   1276       DEBUG_TRACE ("FAIL since operand constraint not met");
   1277       return 0;
   1278     }
   1279 
   1280   /* Get the base value.
   1281      Note: this has to be before the aliasing handling below in order to
   1282      get the base value from the alias opcode before we move on to the
   1283      aliased opcode for encoding.  */
   1284   inst->value = opcode->opcode;
   1285 
   1286   /* No need to do anything else if the opcode does not have any operand.  */
   1287   if (aarch64_num_of_operands (opcode) == 0)
   1288     goto encoding_exit;
   1289 
   1290   /* Assign operand indexes and check types.  Also put the matched
   1291      operand qualifiers in *QLF_SEQ to return.  */
   1292   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
   1293     {
   1294       assert (opcode->operands[i] == inst->operands[i].type);
   1295       inst->operands[i].idx = i;
   1296       if (qlf_seq != NULL)
   1297 	*qlf_seq = inst->operands[i].qualifier;
   1298     }
   1299 
   1300   aliased = aarch64_find_real_opcode (opcode);
   1301   /* If the opcode is an alias and it does not ask for direct encoding by
   1302      itself, the instruction will be transformed to the form of real opcode
   1303      and the encoding will be carried out using the rules for the aliased
   1304      opcode.  */
   1305   if (aliased != NULL && (opcode->flags & F_CONV))
   1306     {
   1307       DEBUG_TRACE ("real opcode '%s' has been found for the alias  %s",
   1308 		   aliased->name, opcode->name);
   1309       /* Convert the operands to the form of the real opcode.  */
   1310       convert_to_real (inst, aliased);
   1311       opcode = aliased;
   1312     }
   1313 
   1314   aarch64_opnd_info *info = inst->operands;
   1315 
   1316   /* Call the inserter of each operand.  */
   1317   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
   1318     {
   1319       const aarch64_operand *opnd;
   1320       enum aarch64_opnd type = opcode->operands[i];
   1321       if (type == AARCH64_OPND_NIL)
   1322 	break;
   1323       if (info->skip)
   1324 	{
   1325 	  DEBUG_TRACE ("skip the incomplete operand %d", i);
   1326 	  continue;
   1327 	}
   1328       opnd = &aarch64_operands[type];
   1329       if (operand_has_inserter (opnd))
   1330 	aarch64_insert_operand (opnd, info, &inst->value, inst);
   1331     }
   1332 
   1333   /* Call opcode encoders indicated by flags.  */
   1334   if (opcode_has_special_coder (opcode))
   1335     do_special_encoding (inst);
   1336 
   1337 encoding_exit:
   1338   DEBUG_TRACE ("exit with %s", opcode->name);
   1339 
   1340   *code = inst->value;
   1341 
   1342   return 1;
   1343 }
   1344