Home | History | Annotate | Download | only in config
      1 /* tc-arm.c -- Assemble for the ARM
      2    Copyright (C) 1994-2016 Free Software Foundation, Inc.
      3    Contributed by Richard Earnshaw (rwe (at) pegasus.esprit.ec.org)
      4 	Modified by David Taylor (dtaylor (at) armltd.co.uk)
      5 	Cirrus coprocessor mods by Aldy Hernandez (aldyh (at) redhat.com)
      6 	Cirrus coprocessor fixes by Petko Manolov (petkan (at) nucleusys.com)
      7 	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx (at) nucleusys.com)
      8 
      9    This file is part of GAS, the GNU Assembler.
     10 
     11    GAS is free software; you can redistribute it and/or modify
     12    it under the terms of the GNU General Public License as published by
     13    the Free Software Foundation; either version 3, or (at your option)
     14    any later version.
     15 
     16    GAS is distributed in the hope that it will be useful,
     17    but WITHOUT ANY WARRANTY; without even the implied warranty of
     18    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
     19    GNU General Public License for more details.
     20 
     21    You should have received a copy of the GNU General Public License
     22    along with GAS; see the file COPYING.  If not, write to the Free
     23    Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
     24    02110-1301, USA.  */
     25 
     26 #include "as.h"
     27 #include <limits.h>
     28 #include <stdarg.h>
     29 #define	 NO_RELOC 0
     30 #include "safe-ctype.h"
     31 #include "subsegs.h"
     32 #include "obstack.h"
     33 #include "libiberty.h"
     34 #include "opcode/arm.h"
     35 
     36 #ifdef OBJ_ELF
     37 #include "elf/arm.h"
     38 #include "dw2gencfi.h"
     39 #endif
     40 
     41 #include "dwarf2dbg.h"
     42 
     43 #ifdef OBJ_ELF
     44 /* Must be at least the size of the largest unwind opcode (currently two).  */
     45 #define ARM_OPCODE_CHUNK_SIZE 8
     46 
     47 /* This structure holds the unwinding state.  */
     48 
     49 static struct
     50 {
     51   symbolS *	  proc_start;
     52   symbolS *	  table_entry;
     53   symbolS *	  personality_routine;
     54   int		  personality_index;
     55   /* The segment containing the function.  */
     56   segT		  saved_seg;
     57   subsegT	  saved_subseg;
     58   /* Opcodes generated from this function.  */
     59   unsigned char * opcodes;
     60   int		  opcode_count;
     61   int		  opcode_alloc;
     62   /* The number of bytes pushed to the stack.  */
     63   offsetT	  frame_size;
     64   /* We don't add stack adjustment opcodes immediately so that we can merge
     65      multiple adjustments.  We can also omit the final adjustment
     66      when using a frame pointer.  */
     67   offsetT	  pending_offset;
     68   /* These two fields are set by both unwind_movsp and unwind_setfp.  They
     69      hold the reg+offset to use when restoring sp from a frame pointer.	 */
     70   offsetT	  fp_offset;
     71   int		  fp_reg;
     72   /* Nonzero if an unwind_setfp directive has been seen.  */
     73   unsigned	  fp_used:1;
     74   /* Nonzero if the last opcode restores sp from fp_reg.  */
     75   unsigned	  sp_restored:1;
     76 } unwind;
     77 
     78 #endif /* OBJ_ELF */
     79 
     80 /* Results from operand parsing worker functions.  */
     81 
     82 typedef enum
     83 {
     84   PARSE_OPERAND_SUCCESS,
     85   PARSE_OPERAND_FAIL,
     86   PARSE_OPERAND_FAIL_NO_BACKTRACK
     87 } parse_operand_result;
     88 
     89 enum arm_float_abi
     90 {
     91   ARM_FLOAT_ABI_HARD,
     92   ARM_FLOAT_ABI_SOFTFP,
     93   ARM_FLOAT_ABI_SOFT
     94 };
     95 
     96 /* Types of processor to assemble for.	*/
     97 #ifndef CPU_DEFAULT
     98 /* The code that was here used to select a default CPU depending on compiler
     99    pre-defines which were only present when doing native builds, thus
    100    changing gas' default behaviour depending upon the build host.
    101 
    102    If you have a target that requires a default CPU option then the you
    103    should define CPU_DEFAULT here.  */
    104 #endif
    105 
    106 #ifndef FPU_DEFAULT
    107 # ifdef TE_LINUX
    108 #  define FPU_DEFAULT FPU_ARCH_FPA
    109 # elif defined (TE_NetBSD)
    110 #  ifdef OBJ_ELF
    111 #   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
    112 #  else
    113     /* Legacy a.out format.  */
    114 #   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
    115 #  endif
    116 # elif defined (TE_VXWORKS)
    117 #  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
    118 # else
    119    /* For backwards compatibility, default to FPA.  */
    120 #  define FPU_DEFAULT FPU_ARCH_FPA
    121 # endif
    122 #endif /* ifndef FPU_DEFAULT */
    123 
    124 #define streq(a, b)	      (strcmp (a, b) == 0)
    125 
    126 static arm_feature_set cpu_variant;
    127 static arm_feature_set arm_arch_used;
    128 static arm_feature_set thumb_arch_used;
    129 
    130 /* Flags stored in private area of BFD structure.  */
    131 static int uses_apcs_26	     = FALSE;
    132 static int atpcs	     = FALSE;
    133 static int support_interwork = FALSE;
    134 static int uses_apcs_float   = FALSE;
    135 static int pic_code	     = FALSE;
    136 static int fix_v4bx	     = FALSE;
    137 /* Warn on using deprecated features.  */
    138 static int warn_on_deprecated = TRUE;
    139 
    140 /* Understand CodeComposer Studio assembly syntax.  */
    141 bfd_boolean codecomposer_syntax = FALSE;
    142 
    143 /* Variables that we set while parsing command-line options.  Once all
    144    options have been read we re-process these values to set the real
    145    assembly flags.  */
    146 static const arm_feature_set *legacy_cpu = NULL;
    147 static const arm_feature_set *legacy_fpu = NULL;
    148 
    149 static const arm_feature_set *mcpu_cpu_opt = NULL;
    150 static const arm_feature_set *mcpu_fpu_opt = NULL;
    151 static const arm_feature_set *march_cpu_opt = NULL;
    152 static const arm_feature_set *march_fpu_opt = NULL;
    153 static const arm_feature_set *mfpu_opt = NULL;
    154 static const arm_feature_set *object_arch = NULL;
    155 
    156 /* Constants for known architecture features.  */
    157 static const arm_feature_set fpu_default = FPU_DEFAULT;
    158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
    159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
    160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
    161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
    162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
    163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
    164 #ifdef OBJ_ELF
    165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
    166 #endif
    167 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
    168 
    169 #ifdef CPU_DEFAULT
    170 static const arm_feature_set cpu_default = CPU_DEFAULT;
    171 #endif
    172 
    173 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
    174 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
    175 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
    176 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
    177 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
    178 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
    179 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
    180 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
    181 static const arm_feature_set arm_ext_v4t_5 =
    182   ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
    183 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
    184 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
    185 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
    186 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
    187 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
    188 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
    189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
    190 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
    191 static const arm_feature_set arm_ext_v6_notm =
    192   ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
    193 static const arm_feature_set arm_ext_v6_dsp =
    194   ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
    195 static const arm_feature_set arm_ext_barrier =
    196   ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
    197 static const arm_feature_set arm_ext_msr =
    198   ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
    199 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
    200 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
    201 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
    202 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
    203 #ifdef OBJ_ELF
    204 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
    205 #endif
    206 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
    207 static const arm_feature_set arm_ext_m =
    208   ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
    209 		    ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
    210 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
    211 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
    212 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
    213 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
    214 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
    215 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
    216 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
    217 static const arm_feature_set arm_ext_v8m_main =
    218   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
    219 /* Instructions in ARMv8-M only found in M profile architectures.  */
    220 static const arm_feature_set arm_ext_v8m_m_only =
    221   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
    222 static const arm_feature_set arm_ext_v6t2_v8m =
    223   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
    224 /* Instructions shared between ARMv8-A and ARMv8-M.  */
    225 static const arm_feature_set arm_ext_atomics =
    226   ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
    227 #ifdef OBJ_ELF
    228 /* DSP instructions Tag_DSP_extension refers to.  */
    229 static const arm_feature_set arm_ext_dsp =
    230   ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
    231 #endif
    232 static const arm_feature_set arm_ext_ras =
    233   ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
    234 /* FP16 instructions.  */
    235 static const arm_feature_set arm_ext_fp16 =
    236   ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
    237 
    238 static const arm_feature_set arm_arch_any = ARM_ANY;
    239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
    240 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
    241 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
    242 #ifdef OBJ_ELF
    243 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
    244 #endif
    245 
    246 static const arm_feature_set arm_cext_iwmmxt2 =
    247   ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
    248 static const arm_feature_set arm_cext_iwmmxt =
    249   ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
    250 static const arm_feature_set arm_cext_xscale =
    251   ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
    252 static const arm_feature_set arm_cext_maverick =
    253   ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
    254 static const arm_feature_set fpu_fpa_ext_v1 =
    255   ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
    256 static const arm_feature_set fpu_fpa_ext_v2 =
    257   ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
    258 static const arm_feature_set fpu_vfp_ext_v1xd =
    259   ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
    260 static const arm_feature_set fpu_vfp_ext_v1 =
    261   ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
    262 static const arm_feature_set fpu_vfp_ext_v2 =
    263   ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
    264 static const arm_feature_set fpu_vfp_ext_v3xd =
    265   ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
    266 static const arm_feature_set fpu_vfp_ext_v3 =
    267   ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
    268 static const arm_feature_set fpu_vfp_ext_d32 =
    269   ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
    270 static const arm_feature_set fpu_neon_ext_v1 =
    271   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
    272 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
    273   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
    274 #ifdef OBJ_ELF
    275 static const arm_feature_set fpu_vfp_fp16 =
    276   ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
    277 static const arm_feature_set fpu_neon_ext_fma =
    278   ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
    279 #endif
    280 static const arm_feature_set fpu_vfp_ext_fma =
    281   ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
    282 static const arm_feature_set fpu_vfp_ext_armv8 =
    283   ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
    284 static const arm_feature_set fpu_vfp_ext_armv8xd =
    285   ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
    286 static const arm_feature_set fpu_neon_ext_armv8 =
    287   ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
    288 static const arm_feature_set fpu_crypto_ext_armv8 =
    289   ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
    290 static const arm_feature_set crc_ext_armv8 =
    291   ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
    292 static const arm_feature_set fpu_neon_ext_v8_1 =
    293   ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
    294 
    295 static int mfloat_abi_opt = -1;
    296 /* Record user cpu selection for object attributes.  */
    297 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
    298 /* Must be long enough to hold any of the names in arm_cpus.  */
    299 static char selected_cpu_name[20];
    300 
    301 extern FLONUM_TYPE generic_floating_point_number;
    302 
    303 /* Return if no cpu was selected on command-line.  */
    304 static bfd_boolean
    305 no_cpu_selected (void)
    306 {
    307   return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
    308 }
    309 
    310 #ifdef OBJ_ELF
    311 # ifdef EABI_DEFAULT
    312 static int meabi_flags = EABI_DEFAULT;
    313 # else
    314 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
    315 # endif
    316 
    317 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
    318 
    319 bfd_boolean
    320 arm_is_eabi (void)
    321 {
    322   return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
    323 }
    324 #endif
    325 
    326 #ifdef OBJ_ELF
    327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
    328 symbolS * GOT_symbol;
    329 #endif
    330 
    331 /* 0: assemble for ARM,
    332    1: assemble for Thumb,
    333    2: assemble for Thumb even though target CPU does not support thumb
    334       instructions.  */
    335 static int thumb_mode = 0;
    336 /* A value distinct from the possible values for thumb_mode that we
    337    can use to record whether thumb_mode has been copied into the
    338    tc_frag_data field of a frag.  */
    339 #define MODE_RECORDED (1 << 4)
    340 
    341 /* Specifies the intrinsic IT insn behavior mode.  */
    342 enum implicit_it_mode
    343 {
    344   IMPLICIT_IT_MODE_NEVER  = 0x00,
    345   IMPLICIT_IT_MODE_ARM    = 0x01,
    346   IMPLICIT_IT_MODE_THUMB  = 0x02,
    347   IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
    348 };
    349 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
    350 
    351 /* If unified_syntax is true, we are processing the new unified
    352    ARM/Thumb syntax.  Important differences from the old ARM mode:
    353 
    354      - Immediate operands do not require a # prefix.
    355      - Conditional affixes always appear at the end of the
    356        instruction.  (For backward compatibility, those instructions
    357        that formerly had them in the middle, continue to accept them
    358        there.)
    359      - The IT instruction may appear, and if it does is validated
    360        against subsequent conditional affixes.  It does not generate
    361        machine code.
    362 
    363    Important differences from the old Thumb mode:
    364 
    365      - Immediate operands do not require a # prefix.
    366      - Most of the V6T2 instructions are only available in unified mode.
    367      - The .N and .W suffixes are recognized and honored (it is an error
    368        if they cannot be honored).
    369      - All instructions set the flags if and only if they have an 's' affix.
    370      - Conditional affixes may be used.  They are validated against
    371        preceding IT instructions.  Unlike ARM mode, you cannot use a
    372        conditional affix except in the scope of an IT instruction.  */
    373 
    374 static bfd_boolean unified_syntax = FALSE;
    375 
    376 /* An immediate operand can start with #, and ld*, st*, pld operands
    377    can contain [ and ].  We need to tell APP not to elide whitespace
    378    before a [, which can appear as the first operand for pld.
    379    Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
    380 const char arm_symbol_chars[] = "#[]{}";
    381 
    382 enum neon_el_type
    383 {
    384   NT_invtype,
    385   NT_untyped,
    386   NT_integer,
    387   NT_float,
    388   NT_poly,
    389   NT_signed,
    390   NT_unsigned
    391 };
    392 
    393 struct neon_type_el
    394 {
    395   enum neon_el_type type;
    396   unsigned size;
    397 };
    398 
    399 #define NEON_MAX_TYPE_ELS 4
    400 
    401 struct neon_type
    402 {
    403   struct neon_type_el el[NEON_MAX_TYPE_ELS];
    404   unsigned elems;
    405 };
    406 
    407 enum it_instruction_type
    408 {
    409    OUTSIDE_IT_INSN,
    410    INSIDE_IT_INSN,
    411    INSIDE_IT_LAST_INSN,
    412    IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
    413 			      if inside, should be the last one.  */
    414    NEUTRAL_IT_INSN,        /* This could be either inside or outside,
    415 			      i.e. BKPT and NOP.  */
    416    IT_INSN                 /* The IT insn has been parsed.  */
    417 };
    418 
    419 /* The maximum number of operands we need.  */
    420 #define ARM_IT_MAX_OPERANDS 6
    421 
    422 struct arm_it
    423 {
    424   const char *	error;
    425   unsigned long instruction;
    426   int		size;
    427   int		size_req;
    428   int		cond;
    429   /* "uncond_value" is set to the value in place of the conditional field in
    430      unconditional versions of the instruction, or -1 if nothing is
    431      appropriate.  */
    432   int		uncond_value;
    433   struct neon_type vectype;
    434   /* This does not indicate an actual NEON instruction, only that
    435      the mnemonic accepts neon-style type suffixes.  */
    436   int		is_neon;
    437   /* Set to the opcode if the instruction needs relaxation.
    438      Zero if the instruction is not relaxed.  */
    439   unsigned long	relax;
    440   struct
    441   {
    442     bfd_reloc_code_real_type type;
    443     expressionS		     exp;
    444     int			     pc_rel;
    445   } reloc;
    446 
    447   enum it_instruction_type it_insn_type;
    448 
    449   struct
    450   {
    451     unsigned reg;
    452     signed int imm;
    453     struct neon_type_el vectype;
    454     unsigned present	: 1;  /* Operand present.  */
    455     unsigned isreg	: 1;  /* Operand was a register.  */
    456     unsigned immisreg	: 1;  /* .imm field is a second register.  */
    457     unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
    458     unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
    459     unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
    460     /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
    461        instructions. This allows us to disambiguate ARM <-> vector insns.  */
    462     unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
    463     unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
    464     unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
    465     unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
    466     unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
    467     unsigned writeback	: 1;  /* Operand has trailing !  */
    468     unsigned preind	: 1;  /* Preindexed address.  */
    469     unsigned postind	: 1;  /* Postindexed address.  */
    470     unsigned negative	: 1;  /* Index register was negated.  */
    471     unsigned shifted	: 1;  /* Shift applied to operation.  */
    472     unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
    473   } operands[ARM_IT_MAX_OPERANDS];
    474 };
    475 
    476 static struct arm_it inst;
    477 
    478 #define NUM_FLOAT_VALS 8
    479 
    480 const char * fp_const[] =
    481 {
    482   "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
    483 };
    484 
    485 /* Number of littlenums required to hold an extended precision number.	*/
    486 #define MAX_LITTLENUMS 6
    487 
    488 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
    489 
    490 #define FAIL	(-1)
    491 #define SUCCESS (0)
    492 
    493 #define SUFF_S 1
    494 #define SUFF_D 2
    495 #define SUFF_E 3
    496 #define SUFF_P 4
    497 
    498 #define CP_T_X	 0x00008000
    499 #define CP_T_Y	 0x00400000
    500 
    501 #define CONDS_BIT	 0x00100000
    502 #define LOAD_BIT	 0x00100000
    503 
    504 #define DOUBLE_LOAD_FLAG 0x00000001
    505 
    506 struct asm_cond
    507 {
    508   const char *	 template_name;
    509   unsigned long  value;
    510 };
    511 
    512 #define COND_ALWAYS 0xE
    513 
    514 struct asm_psr
    515 {
    516   const char *   template_name;
    517   unsigned long  field;
    518 };
    519 
    520 struct asm_barrier_opt
    521 {
    522   const char *    template_name;
    523   unsigned long   value;
    524   const arm_feature_set arch;
    525 };
    526 
    527 /* The bit that distinguishes CPSR and SPSR.  */
    528 #define SPSR_BIT   (1 << 22)
    529 
    530 /* The individual PSR flag bits.  */
    531 #define PSR_c	(1 << 16)
    532 #define PSR_x	(1 << 17)
    533 #define PSR_s	(1 << 18)
    534 #define PSR_f	(1 << 19)
    535 
    536 struct reloc_entry
    537 {
    538   const char *                    name;
    539   bfd_reloc_code_real_type  reloc;
    540 };
    541 
    542 enum vfp_reg_pos
    543 {
    544   VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
    545   VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
    546 };
    547 
    548 enum vfp_ldstm_type
    549 {
    550   VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
    551 };
    552 
    553 /* Bits for DEFINED field in neon_typed_alias.  */
    554 #define NTA_HASTYPE  1
    555 #define NTA_HASINDEX 2
    556 
    557 struct neon_typed_alias
    558 {
    559   unsigned char        defined;
    560   unsigned char        index;
    561   struct neon_type_el  eltype;
    562 };
    563 
    564 /* ARM register categories.  This includes coprocessor numbers and various
    565    architecture extensions' registers.	*/
    566 enum arm_reg_type
    567 {
    568   REG_TYPE_RN,
    569   REG_TYPE_CP,
    570   REG_TYPE_CN,
    571   REG_TYPE_FN,
    572   REG_TYPE_VFS,
    573   REG_TYPE_VFD,
    574   REG_TYPE_NQ,
    575   REG_TYPE_VFSD,
    576   REG_TYPE_NDQ,
    577   REG_TYPE_NSDQ,
    578   REG_TYPE_VFC,
    579   REG_TYPE_MVF,
    580   REG_TYPE_MVD,
    581   REG_TYPE_MVFX,
    582   REG_TYPE_MVDX,
    583   REG_TYPE_MVAX,
    584   REG_TYPE_DSPSC,
    585   REG_TYPE_MMXWR,
    586   REG_TYPE_MMXWC,
    587   REG_TYPE_MMXWCG,
    588   REG_TYPE_XSCALE,
    589   REG_TYPE_RNB
    590 };
    591 
    592 /* Structure for a hash table entry for a register.
    593    If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
    594    information which states whether a vector type or index is specified (for a
    595    register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
    596 struct reg_entry
    597 {
    598   const char *               name;
    599   unsigned int               number;
    600   unsigned char              type;
    601   unsigned char              builtin;
    602   struct neon_typed_alias *  neon;
    603 };
    604 
    605 /* Diagnostics used when we don't get a register of the expected type.	*/
    606 const char * const reg_expected_msgs[] =
    607 {
    608   N_("ARM register expected"),
    609   N_("bad or missing co-processor number"),
    610   N_("co-processor register expected"),
    611   N_("FPA register expected"),
    612   N_("VFP single precision register expected"),
    613   N_("VFP/Neon double precision register expected"),
    614   N_("Neon quad precision register expected"),
    615   N_("VFP single or double precision register expected"),
    616   N_("Neon double or quad precision register expected"),
    617   N_("VFP single, double or Neon quad precision register expected"),
    618   N_("VFP system register expected"),
    619   N_("Maverick MVF register expected"),
    620   N_("Maverick MVD register expected"),
    621   N_("Maverick MVFX register expected"),
    622   N_("Maverick MVDX register expected"),
    623   N_("Maverick MVAX register expected"),
    624   N_("Maverick DSPSC register expected"),
    625   N_("iWMMXt data register expected"),
    626   N_("iWMMXt control register expected"),
    627   N_("iWMMXt scalar register expected"),
    628   N_("XScale accumulator register expected"),
    629 };
    630 
    631 /* Some well known registers that we refer to directly elsewhere.  */
    632 #define REG_R12	12
    633 #define REG_SP	13
    634 #define REG_LR	14
    635 #define REG_PC	15
    636 
    637 /* ARM instructions take 4bytes in the object file, Thumb instructions
    638    take 2:  */
    639 #define INSN_SIZE	4
    640 
    641 struct asm_opcode
    642 {
    643   /* Basic string to match.  */
    644   const char * template_name;
    645 
    646   /* Parameters to instruction.	 */
    647   unsigned int operands[8];
    648 
    649   /* Conditional tag - see opcode_lookup.  */
    650   unsigned int tag : 4;
    651 
    652   /* Basic instruction code.  */
    653   unsigned int avalue : 28;
    654 
    655   /* Thumb-format instruction code.  */
    656   unsigned int tvalue;
    657 
    658   /* Which architecture variant provides this instruction.  */
    659   const arm_feature_set * avariant;
    660   const arm_feature_set * tvariant;
    661 
    662   /* Function to call to encode instruction in ARM format.  */
    663   void (* aencode) (void);
    664 
    665   /* Function to call to encode instruction in Thumb format.  */
    666   void (* tencode) (void);
    667 };
    668 
    669 /* Defines for various bits that we will want to toggle.  */
    670 #define INST_IMMEDIATE	0x02000000
    671 #define OFFSET_REG	0x02000000
    672 #define HWOFFSET_IMM	0x00400000
    673 #define SHIFT_BY_REG	0x00000010
    674 #define PRE_INDEX	0x01000000
    675 #define INDEX_UP	0x00800000
    676 #define WRITE_BACK	0x00200000
    677 #define LDM_TYPE_2_OR_3	0x00400000
    678 #define CPSI_MMOD	0x00020000
    679 
    680 #define LITERAL_MASK	0xf000f000
    681 #define OPCODE_MASK	0xfe1fffff
    682 #define V4_STR_BIT	0x00000020
    683 #define VLDR_VMOV_SAME	0x0040f000
    684 
    685 #define T2_SUBS_PC_LR	0xf3de8f00
    686 
    687 #define DATA_OP_SHIFT	21
    688 
    689 #define T2_OPCODE_MASK	0xfe1fffff
    690 #define T2_DATA_OP_SHIFT 21
    691 
    692 #define A_COND_MASK         0xf0000000
    693 #define A_PUSH_POP_OP_MASK  0x0fff0000
    694 
    695 /* Opcodes for pushing/poping registers to/from the stack.  */
    696 #define A1_OPCODE_PUSH    0x092d0000
    697 #define A2_OPCODE_PUSH    0x052d0004
    698 #define A2_OPCODE_POP     0x049d0004
    699 
    700 /* Codes to distinguish the arithmetic instructions.  */
    701 #define OPCODE_AND	0
    702 #define OPCODE_EOR	1
    703 #define OPCODE_SUB	2
    704 #define OPCODE_RSB	3
    705 #define OPCODE_ADD	4
    706 #define OPCODE_ADC	5
    707 #define OPCODE_SBC	6
    708 #define OPCODE_RSC	7
    709 #define OPCODE_TST	8
    710 #define OPCODE_TEQ	9
    711 #define OPCODE_CMP	10
    712 #define OPCODE_CMN	11
    713 #define OPCODE_ORR	12
    714 #define OPCODE_MOV	13
    715 #define OPCODE_BIC	14
    716 #define OPCODE_MVN	15
    717 
    718 #define T2_OPCODE_AND	0
    719 #define T2_OPCODE_BIC	1
    720 #define T2_OPCODE_ORR	2
    721 #define T2_OPCODE_ORN	3
    722 #define T2_OPCODE_EOR	4
    723 #define T2_OPCODE_ADD	8
    724 #define T2_OPCODE_ADC	10
    725 #define T2_OPCODE_SBC	11
    726 #define T2_OPCODE_SUB	13
    727 #define T2_OPCODE_RSB	14
    728 
    729 #define T_OPCODE_MUL 0x4340
    730 #define T_OPCODE_TST 0x4200
    731 #define T_OPCODE_CMN 0x42c0
    732 #define T_OPCODE_NEG 0x4240
    733 #define T_OPCODE_MVN 0x43c0
    734 
    735 #define T_OPCODE_ADD_R3	0x1800
    736 #define T_OPCODE_SUB_R3 0x1a00
    737 #define T_OPCODE_ADD_HI 0x4400
    738 #define T_OPCODE_ADD_ST 0xb000
    739 #define T_OPCODE_SUB_ST 0xb080
    740 #define T_OPCODE_ADD_SP 0xa800
    741 #define T_OPCODE_ADD_PC 0xa000
    742 #define T_OPCODE_ADD_I8 0x3000
    743 #define T_OPCODE_SUB_I8 0x3800
    744 #define T_OPCODE_ADD_I3 0x1c00
    745 #define T_OPCODE_SUB_I3 0x1e00
    746 
    747 #define T_OPCODE_ASR_R	0x4100
    748 #define T_OPCODE_LSL_R	0x4080
    749 #define T_OPCODE_LSR_R	0x40c0
    750 #define T_OPCODE_ROR_R	0x41c0
    751 #define T_OPCODE_ASR_I	0x1000
    752 #define T_OPCODE_LSL_I	0x0000
    753 #define T_OPCODE_LSR_I	0x0800
    754 
    755 #define T_OPCODE_MOV_I8	0x2000
    756 #define T_OPCODE_CMP_I8 0x2800
    757 #define T_OPCODE_CMP_LR 0x4280
    758 #define T_OPCODE_MOV_HR 0x4600
    759 #define T_OPCODE_CMP_HR 0x4500
    760 
    761 #define T_OPCODE_LDR_PC 0x4800
    762 #define T_OPCODE_LDR_SP 0x9800
    763 #define T_OPCODE_STR_SP 0x9000
    764 #define T_OPCODE_LDR_IW 0x6800
    765 #define T_OPCODE_STR_IW 0x6000
    766 #define T_OPCODE_LDR_IH 0x8800
    767 #define T_OPCODE_STR_IH 0x8000
    768 #define T_OPCODE_LDR_IB 0x7800
    769 #define T_OPCODE_STR_IB 0x7000
    770 #define T_OPCODE_LDR_RW 0x5800
    771 #define T_OPCODE_STR_RW 0x5000
    772 #define T_OPCODE_LDR_RH 0x5a00
    773 #define T_OPCODE_STR_RH 0x5200
    774 #define T_OPCODE_LDR_RB 0x5c00
    775 #define T_OPCODE_STR_RB 0x5400
    776 
    777 #define T_OPCODE_PUSH	0xb400
    778 #define T_OPCODE_POP	0xbc00
    779 
    780 #define T_OPCODE_BRANCH 0xe000
    781 
    782 #define THUMB_SIZE	2	/* Size of thumb instruction.  */
    783 #define THUMB_PP_PC_LR 0x0100
    784 #define THUMB_LOAD_BIT 0x0800
    785 #define THUMB2_LOAD_BIT 0x00100000
    786 
    787 #define BAD_ARGS	_("bad arguments to instruction")
    788 #define BAD_SP          _("r13 not allowed here")
    789 #define BAD_PC		_("r15 not allowed here")
    790 #define BAD_COND	_("instruction cannot be conditional")
    791 #define BAD_OVERLAP	_("registers may not be the same")
    792 #define BAD_HIREG	_("lo register required")
    793 #define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
    794 #define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
    795 #define BAD_BRANCH	_("branch must be last instruction in IT block")
    796 #define BAD_NOT_IT	_("instruction not allowed in IT block")
    797 #define BAD_FPU		_("selected FPU does not support instruction")
    798 #define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
    799 #define BAD_IT_COND	_("incorrect condition in IT block")
    800 #define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
    801 #define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
    802 #define BAD_PC_ADDRESSING \
    803 	_("cannot use register index with PC-relative addressing")
    804 #define BAD_PC_WRITEBACK \
    805 	_("cannot use writeback with PC-relative addressing")
    806 #define BAD_RANGE	_("branch out of range")
    807 #define BAD_FP16	_("selected processor does not support fp16 instruction")
    808 #define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
    809 #define THUMB1_RELOC_ONLY  _("relocation valid in thumb1 code only")
    810 
    811 static struct hash_control * arm_ops_hsh;
    812 static struct hash_control * arm_cond_hsh;
    813 static struct hash_control * arm_shift_hsh;
    814 static struct hash_control * arm_psr_hsh;
    815 static struct hash_control * arm_v7m_psr_hsh;
    816 static struct hash_control * arm_reg_hsh;
    817 static struct hash_control * arm_reloc_hsh;
    818 static struct hash_control * arm_barrier_opt_hsh;
    819 
    820 /* Stuff needed to resolve the label ambiguity
    821    As:
    822      ...
    823      label:   <insn>
    824    may differ from:
    825      ...
    826      label:
    827 	      <insn>  */
    828 
    829 symbolS *  last_label_seen;
    830 static int label_is_thumb_function_name = FALSE;
    831 
    832 /* Literal pool structure.  Held on a per-section
    833    and per-sub-section basis.  */
    834 
    835 #define MAX_LITERAL_POOL_SIZE 1024
    836 typedef struct literal_pool
    837 {
    838   expressionS	         literals [MAX_LITERAL_POOL_SIZE];
    839   unsigned int	         next_free_entry;
    840   unsigned int	         id;
    841   symbolS *	         symbol;
    842   segT		         section;
    843   subsegT	         sub_section;
    844 #ifdef OBJ_ELF
    845   struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
    846 #endif
    847   struct literal_pool *  next;
    848   unsigned int		 alignment;
    849 } literal_pool;
    850 
    851 /* Pointer to a linked list of literal pools.  */
    852 literal_pool * list_of_pools = NULL;
    853 
    854 typedef enum asmfunc_states
    855 {
    856   OUTSIDE_ASMFUNC,
    857   WAITING_ASMFUNC_NAME,
    858   WAITING_ENDASMFUNC
    859 } asmfunc_states;
    860 
    861 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
    862 
    863 #ifdef OBJ_ELF
    864 #  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
    865 #else
    866 static struct current_it now_it;
    867 #endif
    868 
    869 static inline int
    870 now_it_compatible (int cond)
    871 {
    872   return (cond & ~1) == (now_it.cc & ~1);
    873 }
    874 
    875 static inline int
    876 conditional_insn (void)
    877 {
    878   return inst.cond != COND_ALWAYS;
    879 }
    880 
    881 static int in_it_block (void);
    882 
    883 static int handle_it_state (void);
    884 
    885 static void force_automatic_it_block_close (void);
    886 
    887 static void it_fsm_post_encode (void);
    888 
    889 #define set_it_insn_type(type)			\
    890   do						\
    891     {						\
    892       inst.it_insn_type = type;			\
    893       if (handle_it_state () == FAIL)		\
    894 	return;					\
    895     }						\
    896   while (0)
    897 
    898 #define set_it_insn_type_nonvoid(type, failret) \
    899   do						\
    900     {                                           \
    901       inst.it_insn_type = type;			\
    902       if (handle_it_state () == FAIL)		\
    903 	return failret;				\
    904     }						\
    905   while(0)
    906 
    907 #define set_it_insn_type_last()				\
    908   do							\
    909     {							\
    910       if (inst.cond == COND_ALWAYS)			\
    911 	set_it_insn_type (IF_INSIDE_IT_LAST_INSN);	\
    912       else						\
    913 	set_it_insn_type (INSIDE_IT_LAST_INSN);		\
    914     }							\
    915   while (0)
    916 
    917 /* Pure syntax.	 */
    918 
    919 /* This array holds the chars that always start a comment.  If the
    920    pre-processor is disabled, these aren't very useful.	 */
    921 char arm_comment_chars[] = "@";
    922 
    923 /* This array holds the chars that only start a comment at the beginning of
    924    a line.  If the line seems to have the form '# 123 filename'
    925    .line and .file directives will appear in the pre-processed output.	*/
    926 /* Note that input_file.c hand checks for '#' at the beginning of the
    927    first line of the input file.  This is because the compiler outputs
    928    #NO_APP at the beginning of its output.  */
    929 /* Also note that comments like this one will always work.  */
    930 const char line_comment_chars[] = "#";
    931 
    932 char arm_line_separator_chars[] = ";";
    933 
    934 /* Chars that can be used to separate mant
    935    from exp in floating point numbers.	*/
    936 const char EXP_CHARS[] = "eE";
    937 
    938 /* Chars that mean this number is a floating point constant.  */
    939 /* As in 0f12.456  */
    940 /* or	 0d1.2345e12  */
    941 
    942 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
    943 
    944 /* Prefix characters that indicate the start of an immediate
    945    value.  */
    946 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
    947 
    948 /* Separator character handling.  */
    949 
    950 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
    951 
    952 static inline int
    953 skip_past_char (char ** str, char c)
    954 {
    955   /* PR gas/14987: Allow for whitespace before the expected character.  */
    956   skip_whitespace (*str);
    957 
    958   if (**str == c)
    959     {
    960       (*str)++;
    961       return SUCCESS;
    962     }
    963   else
    964     return FAIL;
    965 }
    966 
    967 #define skip_past_comma(str) skip_past_char (str, ',')
    968 
    969 /* Arithmetic expressions (possibly involving symbols).	 */
    970 
    971 /* Return TRUE if anything in the expression is a bignum.  */
    972 
    973 static int
    974 walk_no_bignums (symbolS * sp)
    975 {
    976   if (symbol_get_value_expression (sp)->X_op == O_big)
    977     return 1;
    978 
    979   if (symbol_get_value_expression (sp)->X_add_symbol)
    980     {
    981       return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
    982 	      || (symbol_get_value_expression (sp)->X_op_symbol
    983 		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
    984     }
    985 
    986   return 0;
    987 }
    988 
    989 static int in_my_get_expression = 0;
    990 
    991 /* Third argument to my_get_expression.	 */
    992 #define GE_NO_PREFIX 0
    993 #define GE_IMM_PREFIX 1
    994 #define GE_OPT_PREFIX 2
    995 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
    996    immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
    997 #define GE_OPT_PREFIX_BIG 3
    998 
    999 static int
   1000 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
   1001 {
   1002   char * save_in;
   1003   segT	 seg;
   1004 
   1005   /* In unified syntax, all prefixes are optional.  */
   1006   if (unified_syntax)
   1007     prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
   1008 		  : GE_OPT_PREFIX;
   1009 
   1010   switch (prefix_mode)
   1011     {
   1012     case GE_NO_PREFIX: break;
   1013     case GE_IMM_PREFIX:
   1014       if (!is_immediate_prefix (**str))
   1015 	{
   1016 	  inst.error = _("immediate expression requires a # prefix");
   1017 	  return FAIL;
   1018 	}
   1019       (*str)++;
   1020       break;
   1021     case GE_OPT_PREFIX:
   1022     case GE_OPT_PREFIX_BIG:
   1023       if (is_immediate_prefix (**str))
   1024 	(*str)++;
   1025       break;
   1026     default: abort ();
   1027     }
   1028 
   1029   memset (ep, 0, sizeof (expressionS));
   1030 
   1031   save_in = input_line_pointer;
   1032   input_line_pointer = *str;
   1033   in_my_get_expression = 1;
   1034   seg = expression (ep);
   1035   in_my_get_expression = 0;
   1036 
   1037   if (ep->X_op == O_illegal || ep->X_op == O_absent)
   1038     {
   1039       /* We found a bad or missing expression in md_operand().  */
   1040       *str = input_line_pointer;
   1041       input_line_pointer = save_in;
   1042       if (inst.error == NULL)
   1043 	inst.error = (ep->X_op == O_absent
   1044 		      ? _("missing expression") :_("bad expression"));
   1045       return 1;
   1046     }
   1047 
   1048 #ifdef OBJ_AOUT
   1049   if (seg != absolute_section
   1050       && seg != text_section
   1051       && seg != data_section
   1052       && seg != bss_section
   1053       && seg != undefined_section)
   1054     {
   1055       inst.error = _("bad segment");
   1056       *str = input_line_pointer;
   1057       input_line_pointer = save_in;
   1058       return 1;
   1059     }
   1060 #else
   1061   (void) seg;
   1062 #endif
   1063 
   1064   /* Get rid of any bignums now, so that we don't generate an error for which
   1065      we can't establish a line number later on.	 Big numbers are never valid
   1066      in instructions, which is where this routine is always called.  */
   1067   if (prefix_mode != GE_OPT_PREFIX_BIG
   1068       && (ep->X_op == O_big
   1069 	  || (ep->X_add_symbol
   1070 	      && (walk_no_bignums (ep->X_add_symbol)
   1071 		  || (ep->X_op_symbol
   1072 		      && walk_no_bignums (ep->X_op_symbol))))))
   1073     {
   1074       inst.error = _("invalid constant");
   1075       *str = input_line_pointer;
   1076       input_line_pointer = save_in;
   1077       return 1;
   1078     }
   1079 
   1080   *str = input_line_pointer;
   1081   input_line_pointer = save_in;
   1082   return 0;
   1083 }
   1084 
   1085 /* Turn a string in input_line_pointer into a floating point constant
   1086    of type TYPE, and store the appropriate bytes in *LITP.  The number
   1087    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
   1088    returned, or NULL on OK.
   1089 
   1090    Note that fp constants aren't represent in the normal way on the ARM.
   1091    In big endian mode, things are as expected.	However, in little endian
   1092    mode fp constants are big-endian word-wise, and little-endian byte-wise
   1093    within the words.  For example, (double) 1.1 in big endian mode is
   1094    the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
   1095    the byte sequence 99 99 f1 3f 9a 99 99 99.
   1096 
   1097    ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
   1098 
   1099 const char *
   1100 md_atof (int type, char * litP, int * sizeP)
   1101 {
   1102   int prec;
   1103   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   1104   char *t;
   1105   int i;
   1106 
   1107   switch (type)
   1108     {
   1109     case 'f':
   1110     case 'F':
   1111     case 's':
   1112     case 'S':
   1113       prec = 2;
   1114       break;
   1115 
   1116     case 'd':
   1117     case 'D':
   1118     case 'r':
   1119     case 'R':
   1120       prec = 4;
   1121       break;
   1122 
   1123     case 'x':
   1124     case 'X':
   1125       prec = 5;
   1126       break;
   1127 
   1128     case 'p':
   1129     case 'P':
   1130       prec = 5;
   1131       break;
   1132 
   1133     default:
   1134       *sizeP = 0;
   1135       return _("Unrecognized or unsupported floating point constant");
   1136     }
   1137 
   1138   t = atof_ieee (input_line_pointer, type, words);
   1139   if (t)
   1140     input_line_pointer = t;
   1141   *sizeP = prec * sizeof (LITTLENUM_TYPE);
   1142 
   1143   if (target_big_endian)
   1144     {
   1145       for (i = 0; i < prec; i++)
   1146 	{
   1147 	  md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
   1148 	  litP += sizeof (LITTLENUM_TYPE);
   1149 	}
   1150     }
   1151   else
   1152     {
   1153       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
   1154 	for (i = prec - 1; i >= 0; i--)
   1155 	  {
   1156 	    md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
   1157 	    litP += sizeof (LITTLENUM_TYPE);
   1158 	  }
   1159       else
   1160 	/* For a 4 byte float the order of elements in `words' is 1 0.
   1161 	   For an 8 byte float the order is 1 0 3 2.  */
   1162 	for (i = 0; i < prec; i += 2)
   1163 	  {
   1164 	    md_number_to_chars (litP, (valueT) words[i + 1],
   1165 				sizeof (LITTLENUM_TYPE));
   1166 	    md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
   1167 				(valueT) words[i], sizeof (LITTLENUM_TYPE));
   1168 	    litP += 2 * sizeof (LITTLENUM_TYPE);
   1169 	  }
   1170     }
   1171 
   1172   return NULL;
   1173 }
   1174 
   1175 /* We handle all bad expressions here, so that we can report the faulty
   1176    instruction in the error message.  */
   1177 void
   1178 md_operand (expressionS * exp)
   1179 {
   1180   if (in_my_get_expression)
   1181     exp->X_op = O_illegal;
   1182 }
   1183 
   1184 /* Immediate values.  */
   1185 
   1186 /* Generic immediate-value read function for use in directives.
   1187    Accepts anything that 'expression' can fold to a constant.
   1188    *val receives the number.  */
   1189 #ifdef OBJ_ELF
   1190 static int
   1191 immediate_for_directive (int *val)
   1192 {
   1193   expressionS exp;
   1194   exp.X_op = O_illegal;
   1195 
   1196   if (is_immediate_prefix (*input_line_pointer))
   1197     {
   1198       input_line_pointer++;
   1199       expression (&exp);
   1200     }
   1201 
   1202   if (exp.X_op != O_constant)
   1203     {
   1204       as_bad (_("expected #constant"));
   1205       ignore_rest_of_line ();
   1206       return FAIL;
   1207     }
   1208   *val = exp.X_add_number;
   1209   return SUCCESS;
   1210 }
   1211 #endif
   1212 
   1213 /* Register parsing.  */
   1214 
   1215 /* Generic register parser.  CCP points to what should be the
   1216    beginning of a register name.  If it is indeed a valid register
   1217    name, advance CCP over it and return the reg_entry structure;
   1218    otherwise return NULL.  Does not issue diagnostics.	*/
   1219 
   1220 static struct reg_entry *
   1221 arm_reg_parse_multi (char **ccp)
   1222 {
   1223   char *start = *ccp;
   1224   char *p;
   1225   struct reg_entry *reg;
   1226 
   1227   skip_whitespace (start);
   1228 
   1229 #ifdef REGISTER_PREFIX
   1230   if (*start != REGISTER_PREFIX)
   1231     return NULL;
   1232   start++;
   1233 #endif
   1234 #ifdef OPTIONAL_REGISTER_PREFIX
   1235   if (*start == OPTIONAL_REGISTER_PREFIX)
   1236     start++;
   1237 #endif
   1238 
   1239   p = start;
   1240   if (!ISALPHA (*p) || !is_name_beginner (*p))
   1241     return NULL;
   1242 
   1243   do
   1244     p++;
   1245   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
   1246 
   1247   reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
   1248 
   1249   if (!reg)
   1250     return NULL;
   1251 
   1252   *ccp = p;
   1253   return reg;
   1254 }
   1255 
   1256 static int
   1257 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
   1258 		    enum arm_reg_type type)
   1259 {
   1260   /* Alternative syntaxes are accepted for a few register classes.  */
   1261   switch (type)
   1262     {
   1263     case REG_TYPE_MVF:
   1264     case REG_TYPE_MVD:
   1265     case REG_TYPE_MVFX:
   1266     case REG_TYPE_MVDX:
   1267       /* Generic coprocessor register names are allowed for these.  */
   1268       if (reg && reg->type == REG_TYPE_CN)
   1269 	return reg->number;
   1270       break;
   1271 
   1272     case REG_TYPE_CP:
   1273       /* For backward compatibility, a bare number is valid here.  */
   1274       {
   1275 	unsigned long processor = strtoul (start, ccp, 10);
   1276 	if (*ccp != start && processor <= 15)
   1277 	  return processor;
   1278       }
   1279 
   1280     case REG_TYPE_MMXWC:
   1281       /* WC includes WCG.  ??? I'm not sure this is true for all
   1282 	 instructions that take WC registers.  */
   1283       if (reg && reg->type == REG_TYPE_MMXWCG)
   1284 	return reg->number;
   1285       break;
   1286 
   1287     default:
   1288       break;
   1289     }
   1290 
   1291   return FAIL;
   1292 }
   1293 
   1294 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
   1295    return value is the register number or FAIL.  */
   1296 
   1297 static int
   1298 arm_reg_parse (char **ccp, enum arm_reg_type type)
   1299 {
   1300   char *start = *ccp;
   1301   struct reg_entry *reg = arm_reg_parse_multi (ccp);
   1302   int ret;
   1303 
   1304   /* Do not allow a scalar (reg+index) to parse as a register.  */
   1305   if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
   1306     return FAIL;
   1307 
   1308   if (reg && reg->type == type)
   1309     return reg->number;
   1310 
   1311   if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
   1312     return ret;
   1313 
   1314   *ccp = start;
   1315   return FAIL;
   1316 }
   1317 
   1318 /* Parse a Neon type specifier. *STR should point at the leading '.'
   1319    character. Does no verification at this stage that the type fits the opcode
   1320    properly. E.g.,
   1321 
   1322      .i32.i32.s16
   1323      .s32.f32
   1324      .u16
   1325 
   1326    Can all be legally parsed by this function.
   1327 
   1328    Fills in neon_type struct pointer with parsed information, and updates STR
   1329    to point after the parsed type specifier. Returns SUCCESS if this was a legal
   1330    type, FAIL if not.  */
   1331 
   1332 static int
   1333 parse_neon_type (struct neon_type *type, char **str)
   1334 {
   1335   char *ptr = *str;
   1336 
   1337   if (type)
   1338     type->elems = 0;
   1339 
   1340   while (type->elems < NEON_MAX_TYPE_ELS)
   1341     {
   1342       enum neon_el_type thistype = NT_untyped;
   1343       unsigned thissize = -1u;
   1344 
   1345       if (*ptr != '.')
   1346 	break;
   1347 
   1348       ptr++;
   1349 
   1350       /* Just a size without an explicit type.  */
   1351       if (ISDIGIT (*ptr))
   1352 	goto parsesize;
   1353 
   1354       switch (TOLOWER (*ptr))
   1355 	{
   1356 	case 'i': thistype = NT_integer; break;
   1357 	case 'f': thistype = NT_float; break;
   1358 	case 'p': thistype = NT_poly; break;
   1359 	case 's': thistype = NT_signed; break;
   1360 	case 'u': thistype = NT_unsigned; break;
   1361 	case 'd':
   1362 	  thistype = NT_float;
   1363 	  thissize = 64;
   1364 	  ptr++;
   1365 	  goto done;
   1366 	default:
   1367 	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
   1368 	  return FAIL;
   1369 	}
   1370 
   1371       ptr++;
   1372 
   1373       /* .f is an abbreviation for .f32.  */
   1374       if (thistype == NT_float && !ISDIGIT (*ptr))
   1375 	thissize = 32;
   1376       else
   1377 	{
   1378 	parsesize:
   1379 	  thissize = strtoul (ptr, &ptr, 10);
   1380 
   1381 	  if (thissize != 8 && thissize != 16 && thissize != 32
   1382 	      && thissize != 64)
   1383 	    {
   1384 	      as_bad (_("bad size %d in type specifier"), thissize);
   1385 	      return FAIL;
   1386 	    }
   1387 	}
   1388 
   1389       done:
   1390       if (type)
   1391 	{
   1392 	  type->el[type->elems].type = thistype;
   1393 	  type->el[type->elems].size = thissize;
   1394 	  type->elems++;
   1395 	}
   1396     }
   1397 
   1398   /* Empty/missing type is not a successful parse.  */
   1399   if (type->elems == 0)
   1400     return FAIL;
   1401 
   1402   *str = ptr;
   1403 
   1404   return SUCCESS;
   1405 }
   1406 
   1407 /* Errors may be set multiple times during parsing or bit encoding
   1408    (particularly in the Neon bits), but usually the earliest error which is set
   1409    will be the most meaningful. Avoid overwriting it with later (cascading)
   1410    errors by calling this function.  */
   1411 
   1412 static void
   1413 first_error (const char *err)
   1414 {
   1415   if (!inst.error)
   1416     inst.error = err;
   1417 }
   1418 
   1419 /* Parse a single type, e.g. ".s32", leading period included.  */
   1420 static int
   1421 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
   1422 {
   1423   char *str = *ccp;
   1424   struct neon_type optype;
   1425 
   1426   if (*str == '.')
   1427     {
   1428       if (parse_neon_type (&optype, &str) == SUCCESS)
   1429 	{
   1430 	  if (optype.elems == 1)
   1431 	    *vectype = optype.el[0];
   1432 	  else
   1433 	    {
   1434 	      first_error (_("only one type should be specified for operand"));
   1435 	      return FAIL;
   1436 	    }
   1437 	}
   1438       else
   1439 	{
   1440 	  first_error (_("vector type expected"));
   1441 	  return FAIL;
   1442 	}
   1443     }
   1444   else
   1445     return FAIL;
   1446 
   1447   *ccp = str;
   1448 
   1449   return SUCCESS;
   1450 }
   1451 
   1452 /* Special meanings for indices (which have a range of 0-7), which will fit into
   1453    a 4-bit integer.  */
   1454 
   1455 #define NEON_ALL_LANES		15
   1456 #define NEON_INTERLEAVE_LANES	14
   1457 
   1458 /* Parse either a register or a scalar, with an optional type. Return the
   1459    register number, and optionally fill in the actual type of the register
   1460    when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
   1461    type/index information in *TYPEINFO.  */
   1462 
   1463 static int
   1464 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
   1465 			   enum arm_reg_type *rtype,
   1466 			   struct neon_typed_alias *typeinfo)
   1467 {
   1468   char *str = *ccp;
   1469   struct reg_entry *reg = arm_reg_parse_multi (&str);
   1470   struct neon_typed_alias atype;
   1471   struct neon_type_el parsetype;
   1472 
   1473   atype.defined = 0;
   1474   atype.index = -1;
   1475   atype.eltype.type = NT_invtype;
   1476   atype.eltype.size = -1;
   1477 
   1478   /* Try alternate syntax for some types of register. Note these are mutually
   1479      exclusive with the Neon syntax extensions.  */
   1480   if (reg == NULL)
   1481     {
   1482       int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
   1483       if (altreg != FAIL)
   1484 	*ccp = str;
   1485       if (typeinfo)
   1486 	*typeinfo = atype;
   1487       return altreg;
   1488     }
   1489 
   1490   /* Undo polymorphism when a set of register types may be accepted.  */
   1491   if ((type == REG_TYPE_NDQ
   1492        && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
   1493       || (type == REG_TYPE_VFSD
   1494 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
   1495       || (type == REG_TYPE_NSDQ
   1496 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
   1497 	      || reg->type == REG_TYPE_NQ))
   1498       || (type == REG_TYPE_MMXWC
   1499 	  && (reg->type == REG_TYPE_MMXWCG)))
   1500     type = (enum arm_reg_type) reg->type;
   1501 
   1502   if (type != reg->type)
   1503     return FAIL;
   1504 
   1505   if (reg->neon)
   1506     atype = *reg->neon;
   1507 
   1508   if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
   1509     {
   1510       if ((atype.defined & NTA_HASTYPE) != 0)
   1511 	{
   1512 	  first_error (_("can't redefine type for operand"));
   1513 	  return FAIL;
   1514 	}
   1515       atype.defined |= NTA_HASTYPE;
   1516       atype.eltype = parsetype;
   1517     }
   1518 
   1519   if (skip_past_char (&str, '[') == SUCCESS)
   1520     {
   1521       if (type != REG_TYPE_VFD)
   1522 	{
   1523 	  first_error (_("only D registers may be indexed"));
   1524 	  return FAIL;
   1525 	}
   1526 
   1527       if ((atype.defined & NTA_HASINDEX) != 0)
   1528 	{
   1529 	  first_error (_("can't change index for operand"));
   1530 	  return FAIL;
   1531 	}
   1532 
   1533       atype.defined |= NTA_HASINDEX;
   1534 
   1535       if (skip_past_char (&str, ']') == SUCCESS)
   1536 	atype.index = NEON_ALL_LANES;
   1537       else
   1538 	{
   1539 	  expressionS exp;
   1540 
   1541 	  my_get_expression (&exp, &str, GE_NO_PREFIX);
   1542 
   1543 	  if (exp.X_op != O_constant)
   1544 	    {
   1545 	      first_error (_("constant expression required"));
   1546 	      return FAIL;
   1547 	    }
   1548 
   1549 	  if (skip_past_char (&str, ']') == FAIL)
   1550 	    return FAIL;
   1551 
   1552 	  atype.index = exp.X_add_number;
   1553 	}
   1554     }
   1555 
   1556   if (typeinfo)
   1557     *typeinfo = atype;
   1558 
   1559   if (rtype)
   1560     *rtype = type;
   1561 
   1562   *ccp = str;
   1563 
   1564   return reg->number;
   1565 }
   1566 
   1567 /* Like arm_reg_parse, but allow allow the following extra features:
   1568     - If RTYPE is non-zero, return the (possibly restricted) type of the
   1569       register (e.g. Neon double or quad reg when either has been requested).
   1570     - If this is a Neon vector type with additional type information, fill
   1571       in the struct pointed to by VECTYPE (if non-NULL).
   1572    This function will fault on encountering a scalar.  */
   1573 
   1574 static int
   1575 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
   1576 		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
   1577 {
   1578   struct neon_typed_alias atype;
   1579   char *str = *ccp;
   1580   int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
   1581 
   1582   if (reg == FAIL)
   1583     return FAIL;
   1584 
   1585   /* Do not allow regname(... to parse as a register.  */
   1586   if (*str == '(')
   1587     return FAIL;
   1588 
   1589   /* Do not allow a scalar (reg+index) to parse as a register.  */
   1590   if ((atype.defined & NTA_HASINDEX) != 0)
   1591     {
   1592       first_error (_("register operand expected, but got scalar"));
   1593       return FAIL;
   1594     }
   1595 
   1596   if (vectype)
   1597     *vectype = atype.eltype;
   1598 
   1599   *ccp = str;
   1600 
   1601   return reg;
   1602 }
   1603 
   1604 #define NEON_SCALAR_REG(X)	((X) >> 4)
   1605 #define NEON_SCALAR_INDEX(X)	((X) & 15)
   1606 
   1607 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
   1608    have enough information to be able to do a good job bounds-checking. So, we
   1609    just do easy checks here, and do further checks later.  */
   1610 
   1611 static int
   1612 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
   1613 {
   1614   int reg;
   1615   char *str = *ccp;
   1616   struct neon_typed_alias atype;
   1617 
   1618   reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
   1619 
   1620   if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
   1621     return FAIL;
   1622 
   1623   if (atype.index == NEON_ALL_LANES)
   1624     {
   1625       first_error (_("scalar must have an index"));
   1626       return FAIL;
   1627     }
   1628   else if (atype.index >= 64 / elsize)
   1629     {
   1630       first_error (_("scalar index out of range"));
   1631       return FAIL;
   1632     }
   1633 
   1634   if (type)
   1635     *type = atype.eltype;
   1636 
   1637   *ccp = str;
   1638 
   1639   return reg * 16 + atype.index;
   1640 }
   1641 
   1642 /* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
   1643 
   1644 static long
   1645 parse_reg_list (char ** strp)
   1646 {
   1647   char * str = * strp;
   1648   long	 range = 0;
   1649   int	 another_range;
   1650 
   1651   /* We come back here if we get ranges concatenated by '+' or '|'.  */
   1652   do
   1653     {
   1654       skip_whitespace (str);
   1655 
   1656       another_range = 0;
   1657 
   1658       if (*str == '{')
   1659 	{
   1660 	  int in_range = 0;
   1661 	  int cur_reg = -1;
   1662 
   1663 	  str++;
   1664 	  do
   1665 	    {
   1666 	      int reg;
   1667 
   1668 	      if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
   1669 		{
   1670 		  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
   1671 		  return FAIL;
   1672 		}
   1673 
   1674 	      if (in_range)
   1675 		{
   1676 		  int i;
   1677 
   1678 		  if (reg <= cur_reg)
   1679 		    {
   1680 		      first_error (_("bad range in register list"));
   1681 		      return FAIL;
   1682 		    }
   1683 
   1684 		  for (i = cur_reg + 1; i < reg; i++)
   1685 		    {
   1686 		      if (range & (1 << i))
   1687 			as_tsktsk
   1688 			  (_("Warning: duplicated register (r%d) in register list"),
   1689 			   i);
   1690 		      else
   1691 			range |= 1 << i;
   1692 		    }
   1693 		  in_range = 0;
   1694 		}
   1695 
   1696 	      if (range & (1 << reg))
   1697 		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
   1698 			   reg);
   1699 	      else if (reg <= cur_reg)
   1700 		as_tsktsk (_("Warning: register range not in ascending order"));
   1701 
   1702 	      range |= 1 << reg;
   1703 	      cur_reg = reg;
   1704 	    }
   1705 	  while (skip_past_comma (&str) != FAIL
   1706 		 || (in_range = 1, *str++ == '-'));
   1707 	  str--;
   1708 
   1709 	  if (skip_past_char (&str, '}') == FAIL)
   1710 	    {
   1711 	      first_error (_("missing `}'"));
   1712 	      return FAIL;
   1713 	    }
   1714 	}
   1715       else
   1716 	{
   1717 	  expressionS exp;
   1718 
   1719 	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
   1720 	    return FAIL;
   1721 
   1722 	  if (exp.X_op == O_constant)
   1723 	    {
   1724 	      if (exp.X_add_number
   1725 		  != (exp.X_add_number & 0x0000ffff))
   1726 		{
   1727 		  inst.error = _("invalid register mask");
   1728 		  return FAIL;
   1729 		}
   1730 
   1731 	      if ((range & exp.X_add_number) != 0)
   1732 		{
   1733 		  int regno = range & exp.X_add_number;
   1734 
   1735 		  regno &= -regno;
   1736 		  regno = (1 << regno) - 1;
   1737 		  as_tsktsk
   1738 		    (_("Warning: duplicated register (r%d) in register list"),
   1739 		     regno);
   1740 		}
   1741 
   1742 	      range |= exp.X_add_number;
   1743 	    }
   1744 	  else
   1745 	    {
   1746 	      if (inst.reloc.type != 0)
   1747 		{
   1748 		  inst.error = _("expression too complex");
   1749 		  return FAIL;
   1750 		}
   1751 
   1752 	      memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
   1753 	      inst.reloc.type = BFD_RELOC_ARM_MULTI;
   1754 	      inst.reloc.pc_rel = 0;
   1755 	    }
   1756 	}
   1757 
   1758       if (*str == '|' || *str == '+')
   1759 	{
   1760 	  str++;
   1761 	  another_range = 1;
   1762 	}
   1763     }
   1764   while (another_range);
   1765 
   1766   *strp = str;
   1767   return range;
   1768 }
   1769 
   1770 /* Types of registers in a list.  */
   1771 
   1772 enum reg_list_els
   1773 {
   1774   REGLIST_VFP_S,
   1775   REGLIST_VFP_D,
   1776   REGLIST_NEON_D
   1777 };
   1778 
   1779 /* Parse a VFP register list.  If the string is invalid return FAIL.
   1780    Otherwise return the number of registers, and set PBASE to the first
   1781    register.  Parses registers of type ETYPE.
   1782    If REGLIST_NEON_D is used, several syntax enhancements are enabled:
   1783      - Q registers can be used to specify pairs of D registers
   1784      - { } can be omitted from around a singleton register list
   1785 	 FIXME: This is not implemented, as it would require backtracking in
   1786 	 some cases, e.g.:
   1787 	   vtbl.8 d3,d4,d5
   1788 	 This could be done (the meaning isn't really ambiguous), but doesn't
   1789 	 fit in well with the current parsing framework.
   1790      - 32 D registers may be used (also true for VFPv3).
   1791    FIXME: Types are ignored in these register lists, which is probably a
   1792    bug.  */
   1793 
   1794 static int
   1795 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
   1796 {
   1797   char *str = *ccp;
   1798   int base_reg;
   1799   int new_base;
   1800   enum arm_reg_type regtype = (enum arm_reg_type) 0;
   1801   int max_regs = 0;
   1802   int count = 0;
   1803   int warned = 0;
   1804   unsigned long mask = 0;
   1805   int i;
   1806 
   1807   if (skip_past_char (&str, '{') == FAIL)
   1808     {
   1809       inst.error = _("expecting {");
   1810       return FAIL;
   1811     }
   1812 
   1813   switch (etype)
   1814     {
   1815     case REGLIST_VFP_S:
   1816       regtype = REG_TYPE_VFS;
   1817       max_regs = 32;
   1818       break;
   1819 
   1820     case REGLIST_VFP_D:
   1821       regtype = REG_TYPE_VFD;
   1822       break;
   1823 
   1824     case REGLIST_NEON_D:
   1825       regtype = REG_TYPE_NDQ;
   1826       break;
   1827     }
   1828 
   1829   if (etype != REGLIST_VFP_S)
   1830     {
   1831       /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
   1832       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
   1833 	{
   1834 	  max_regs = 32;
   1835 	  if (thumb_mode)
   1836 	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   1837 				    fpu_vfp_ext_d32);
   1838 	  else
   1839 	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
   1840 				    fpu_vfp_ext_d32);
   1841 	}
   1842       else
   1843 	max_regs = 16;
   1844     }
   1845 
   1846   base_reg = max_regs;
   1847 
   1848   do
   1849     {
   1850       int setmask = 1, addregs = 1;
   1851 
   1852       new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
   1853 
   1854       if (new_base == FAIL)
   1855 	{
   1856 	  first_error (_(reg_expected_msgs[regtype]));
   1857 	  return FAIL;
   1858 	}
   1859 
   1860       if (new_base >= max_regs)
   1861 	{
   1862 	  first_error (_("register out of range in list"));
   1863 	  return FAIL;
   1864 	}
   1865 
   1866       /* Note: a value of 2 * n is returned for the register Q<n>.  */
   1867       if (regtype == REG_TYPE_NQ)
   1868 	{
   1869 	  setmask = 3;
   1870 	  addregs = 2;
   1871 	}
   1872 
   1873       if (new_base < base_reg)
   1874 	base_reg = new_base;
   1875 
   1876       if (mask & (setmask << new_base))
   1877 	{
   1878 	  first_error (_("invalid register list"));
   1879 	  return FAIL;
   1880 	}
   1881 
   1882       if ((mask >> new_base) != 0 && ! warned)
   1883 	{
   1884 	  as_tsktsk (_("register list not in ascending order"));
   1885 	  warned = 1;
   1886 	}
   1887 
   1888       mask |= setmask << new_base;
   1889       count += addregs;
   1890 
   1891       if (*str == '-') /* We have the start of a range expression */
   1892 	{
   1893 	  int high_range;
   1894 
   1895 	  str++;
   1896 
   1897 	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
   1898 	      == FAIL)
   1899 	    {
   1900 	      inst.error = gettext (reg_expected_msgs[regtype]);
   1901 	      return FAIL;
   1902 	    }
   1903 
   1904 	  if (high_range >= max_regs)
   1905 	    {
   1906 	      first_error (_("register out of range in list"));
   1907 	      return FAIL;
   1908 	    }
   1909 
   1910 	  if (regtype == REG_TYPE_NQ)
   1911 	    high_range = high_range + 1;
   1912 
   1913 	  if (high_range <= new_base)
   1914 	    {
   1915 	      inst.error = _("register range not in ascending order");
   1916 	      return FAIL;
   1917 	    }
   1918 
   1919 	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
   1920 	    {
   1921 	      if (mask & (setmask << new_base))
   1922 		{
   1923 		  inst.error = _("invalid register list");
   1924 		  return FAIL;
   1925 		}
   1926 
   1927 	      mask |= setmask << new_base;
   1928 	      count += addregs;
   1929 	    }
   1930 	}
   1931     }
   1932   while (skip_past_comma (&str) != FAIL);
   1933 
   1934   str++;
   1935 
   1936   /* Sanity check -- should have raised a parse error above.  */
   1937   if (count == 0 || count > max_regs)
   1938     abort ();
   1939 
   1940   *pbase = base_reg;
   1941 
   1942   /* Final test -- the registers must be consecutive.  */
   1943   mask >>= base_reg;
   1944   for (i = 0; i < count; i++)
   1945     {
   1946       if ((mask & (1u << i)) == 0)
   1947 	{
   1948 	  inst.error = _("non-contiguous register range");
   1949 	  return FAIL;
   1950 	}
   1951     }
   1952 
   1953   *ccp = str;
   1954 
   1955   return count;
   1956 }
   1957 
   1958 /* True if two alias types are the same.  */
   1959 
   1960 static bfd_boolean
   1961 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
   1962 {
   1963   if (!a && !b)
   1964     return TRUE;
   1965 
   1966   if (!a || !b)
   1967     return FALSE;
   1968 
   1969   if (a->defined != b->defined)
   1970     return FALSE;
   1971 
   1972   if ((a->defined & NTA_HASTYPE) != 0
   1973       && (a->eltype.type != b->eltype.type
   1974 	  || a->eltype.size != b->eltype.size))
   1975     return FALSE;
   1976 
   1977   if ((a->defined & NTA_HASINDEX) != 0
   1978       && (a->index != b->index))
   1979     return FALSE;
   1980 
   1981   return TRUE;
   1982 }
   1983 
   1984 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
   1985    The base register is put in *PBASE.
   1986    The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
   1987    the return value.
   1988    The register stride (minus one) is put in bit 4 of the return value.
   1989    Bits [6:5] encode the list length (minus one).
   1990    The type of the list elements is put in *ELTYPE, if non-NULL.  */
   1991 
   1992 #define NEON_LANE(X)		((X) & 0xf)
   1993 #define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
   1994 #define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
   1995 
   1996 static int
   1997 parse_neon_el_struct_list (char **str, unsigned *pbase,
   1998 			   struct neon_type_el *eltype)
   1999 {
   2000   char *ptr = *str;
   2001   int base_reg = -1;
   2002   int reg_incr = -1;
   2003   int count = 0;
   2004   int lane = -1;
   2005   int leading_brace = 0;
   2006   enum arm_reg_type rtype = REG_TYPE_NDQ;
   2007   const char *const incr_error = _("register stride must be 1 or 2");
   2008   const char *const type_error = _("mismatched element/structure types in list");
   2009   struct neon_typed_alias firsttype;
   2010   firsttype.defined = 0;
   2011   firsttype.eltype.type = NT_invtype;
   2012   firsttype.eltype.size = -1;
   2013   firsttype.index = -1;
   2014 
   2015   if (skip_past_char (&ptr, '{') == SUCCESS)
   2016     leading_brace = 1;
   2017 
   2018   do
   2019     {
   2020       struct neon_typed_alias atype;
   2021       int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
   2022 
   2023       if (getreg == FAIL)
   2024 	{
   2025 	  first_error (_(reg_expected_msgs[rtype]));
   2026 	  return FAIL;
   2027 	}
   2028 
   2029       if (base_reg == -1)
   2030 	{
   2031 	  base_reg = getreg;
   2032 	  if (rtype == REG_TYPE_NQ)
   2033 	    {
   2034 	      reg_incr = 1;
   2035 	    }
   2036 	  firsttype = atype;
   2037 	}
   2038       else if (reg_incr == -1)
   2039 	{
   2040 	  reg_incr = getreg - base_reg;
   2041 	  if (reg_incr < 1 || reg_incr > 2)
   2042 	    {
   2043 	      first_error (_(incr_error));
   2044 	      return FAIL;
   2045 	    }
   2046 	}
   2047       else if (getreg != base_reg + reg_incr * count)
   2048 	{
   2049 	  first_error (_(incr_error));
   2050 	  return FAIL;
   2051 	}
   2052 
   2053       if (! neon_alias_types_same (&atype, &firsttype))
   2054 	{
   2055 	  first_error (_(type_error));
   2056 	  return FAIL;
   2057 	}
   2058 
   2059       /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
   2060 	 modes.  */
   2061       if (ptr[0] == '-')
   2062 	{
   2063 	  struct neon_typed_alias htype;
   2064 	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
   2065 	  if (lane == -1)
   2066 	    lane = NEON_INTERLEAVE_LANES;
   2067 	  else if (lane != NEON_INTERLEAVE_LANES)
   2068 	    {
   2069 	      first_error (_(type_error));
   2070 	      return FAIL;
   2071 	    }
   2072 	  if (reg_incr == -1)
   2073 	    reg_incr = 1;
   2074 	  else if (reg_incr != 1)
   2075 	    {
   2076 	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
   2077 	      return FAIL;
   2078 	    }
   2079 	  ptr++;
   2080 	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
   2081 	  if (hireg == FAIL)
   2082 	    {
   2083 	      first_error (_(reg_expected_msgs[rtype]));
   2084 	      return FAIL;
   2085 	    }
   2086 	  if (! neon_alias_types_same (&htype, &firsttype))
   2087 	    {
   2088 	      first_error (_(type_error));
   2089 	      return FAIL;
   2090 	    }
   2091 	  count += hireg + dregs - getreg;
   2092 	  continue;
   2093 	}
   2094 
   2095       /* If we're using Q registers, we can't use [] or [n] syntax.  */
   2096       if (rtype == REG_TYPE_NQ)
   2097 	{
   2098 	  count += 2;
   2099 	  continue;
   2100 	}
   2101 
   2102       if ((atype.defined & NTA_HASINDEX) != 0)
   2103 	{
   2104 	  if (lane == -1)
   2105 	    lane = atype.index;
   2106 	  else if (lane != atype.index)
   2107 	    {
   2108 	      first_error (_(type_error));
   2109 	      return FAIL;
   2110 	    }
   2111 	}
   2112       else if (lane == -1)
   2113 	lane = NEON_INTERLEAVE_LANES;
   2114       else if (lane != NEON_INTERLEAVE_LANES)
   2115 	{
   2116 	  first_error (_(type_error));
   2117 	  return FAIL;
   2118 	}
   2119       count++;
   2120     }
   2121   while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
   2122 
   2123   /* No lane set by [x]. We must be interleaving structures.  */
   2124   if (lane == -1)
   2125     lane = NEON_INTERLEAVE_LANES;
   2126 
   2127   /* Sanity check.  */
   2128   if (lane == -1 || base_reg == -1 || count < 1 || count > 4
   2129       || (count > 1 && reg_incr == -1))
   2130     {
   2131       first_error (_("error parsing element/structure list"));
   2132       return FAIL;
   2133     }
   2134 
   2135   if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
   2136     {
   2137       first_error (_("expected }"));
   2138       return FAIL;
   2139     }
   2140 
   2141   if (reg_incr == -1)
   2142     reg_incr = 1;
   2143 
   2144   if (eltype)
   2145     *eltype = firsttype.eltype;
   2146 
   2147   *pbase = base_reg;
   2148   *str = ptr;
   2149 
   2150   return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
   2151 }
   2152 
   2153 /* Parse an explicit relocation suffix on an expression.  This is
   2154    either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
   2155    arm_reloc_hsh contains no entries, so this function can only
   2156    succeed if there is no () after the word.  Returns -1 on error,
   2157    BFD_RELOC_UNUSED if there wasn't any suffix.	 */
   2158 
   2159 static int
   2160 parse_reloc (char **str)
   2161 {
   2162   struct reloc_entry *r;
   2163   char *p, *q;
   2164 
   2165   if (**str != '(')
   2166     return BFD_RELOC_UNUSED;
   2167 
   2168   p = *str + 1;
   2169   q = p;
   2170 
   2171   while (*q && *q != ')' && *q != ',')
   2172     q++;
   2173   if (*q != ')')
   2174     return -1;
   2175 
   2176   if ((r = (struct reloc_entry *)
   2177        hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
   2178     return -1;
   2179 
   2180   *str = q + 1;
   2181   return r->reloc;
   2182 }
   2183 
   2184 /* Directives: register aliases.  */
   2185 
   2186 static struct reg_entry *
   2187 insert_reg_alias (char *str, unsigned number, int type)
   2188 {
   2189   struct reg_entry *new_reg;
   2190   const char *name;
   2191 
   2192   if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
   2193     {
   2194       if (new_reg->builtin)
   2195 	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
   2196 
   2197       /* Only warn about a redefinition if it's not defined as the
   2198 	 same register.	 */
   2199       else if (new_reg->number != number || new_reg->type != type)
   2200 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
   2201 
   2202       return NULL;
   2203     }
   2204 
   2205   name = xstrdup (str);
   2206   new_reg = XNEW (struct reg_entry);
   2207 
   2208   new_reg->name = name;
   2209   new_reg->number = number;
   2210   new_reg->type = type;
   2211   new_reg->builtin = FALSE;
   2212   new_reg->neon = NULL;
   2213 
   2214   if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
   2215     abort ();
   2216 
   2217   return new_reg;
   2218 }
   2219 
   2220 static void
   2221 insert_neon_reg_alias (char *str, int number, int type,
   2222 		       struct neon_typed_alias *atype)
   2223 {
   2224   struct reg_entry *reg = insert_reg_alias (str, number, type);
   2225 
   2226   if (!reg)
   2227     {
   2228       first_error (_("attempt to redefine typed alias"));
   2229       return;
   2230     }
   2231 
   2232   if (atype)
   2233     {
   2234       reg->neon = XNEW (struct neon_typed_alias);
   2235       *reg->neon = *atype;
   2236     }
   2237 }
   2238 
   2239 /* Look for the .req directive.	 This is of the form:
   2240 
   2241 	new_register_name .req existing_register_name
   2242 
   2243    If we find one, or if it looks sufficiently like one that we want to
   2244    handle any error here, return TRUE.  Otherwise return FALSE.  */
   2245 
   2246 static bfd_boolean
   2247 create_register_alias (char * newname, char *p)
   2248 {
   2249   struct reg_entry *old;
   2250   char *oldname, *nbuf;
   2251   size_t nlen;
   2252 
   2253   /* The input scrubber ensures that whitespace after the mnemonic is
   2254      collapsed to single spaces.  */
   2255   oldname = p;
   2256   if (strncmp (oldname, " .req ", 6) != 0)
   2257     return FALSE;
   2258 
   2259   oldname += 6;
   2260   if (*oldname == '\0')
   2261     return FALSE;
   2262 
   2263   old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
   2264   if (!old)
   2265     {
   2266       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
   2267       return TRUE;
   2268     }
   2269 
   2270   /* If TC_CASE_SENSITIVE is defined, then newname already points to
   2271      the desired alias name, and p points to its end.  If not, then
   2272      the desired alias name is in the global original_case_string.  */
   2273 #ifdef TC_CASE_SENSITIVE
   2274   nlen = p - newname;
   2275 #else
   2276   newname = original_case_string;
   2277   nlen = strlen (newname);
   2278 #endif
   2279 
   2280   nbuf = xmemdup0 (newname, nlen);
   2281 
   2282   /* Create aliases under the new name as stated; an all-lowercase
   2283      version of the new name; and an all-uppercase version of the new
   2284      name.  */
   2285   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
   2286     {
   2287       for (p = nbuf; *p; p++)
   2288 	*p = TOUPPER (*p);
   2289 
   2290       if (strncmp (nbuf, newname, nlen))
   2291 	{
   2292 	  /* If this attempt to create an additional alias fails, do not bother
   2293 	     trying to create the all-lower case alias.  We will fail and issue
   2294 	     a second, duplicate error message.  This situation arises when the
   2295 	     programmer does something like:
   2296 	       foo .req r0
   2297 	       Foo .req r1
   2298 	     The second .req creates the "Foo" alias but then fails to create
   2299 	     the artificial FOO alias because it has already been created by the
   2300 	     first .req.  */
   2301 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
   2302 	    {
   2303 	      free (nbuf);
   2304 	      return TRUE;
   2305 	    }
   2306 	}
   2307 
   2308       for (p = nbuf; *p; p++)
   2309 	*p = TOLOWER (*p);
   2310 
   2311       if (strncmp (nbuf, newname, nlen))
   2312 	insert_reg_alias (nbuf, old->number, old->type);
   2313     }
   2314 
   2315   free (nbuf);
   2316   return TRUE;
   2317 }
   2318 
   2319 /* Create a Neon typed/indexed register alias using directives, e.g.:
   2320      X .dn d5.s32[1]
   2321      Y .qn 6.s16
   2322      Z .dn d7
   2323      T .dn Z[0]
   2324    These typed registers can be used instead of the types specified after the
   2325    Neon mnemonic, so long as all operands given have types. Types can also be
   2326    specified directly, e.g.:
   2327      vadd d0.s32, d1.s32, d2.s32  */
   2328 
   2329 static bfd_boolean
   2330 create_neon_reg_alias (char *newname, char *p)
   2331 {
   2332   enum arm_reg_type basetype;
   2333   struct reg_entry *basereg;
   2334   struct reg_entry mybasereg;
   2335   struct neon_type ntype;
   2336   struct neon_typed_alias typeinfo;
   2337   char *namebuf, *nameend ATTRIBUTE_UNUSED;
   2338   int namelen;
   2339 
   2340   typeinfo.defined = 0;
   2341   typeinfo.eltype.type = NT_invtype;
   2342   typeinfo.eltype.size = -1;
   2343   typeinfo.index = -1;
   2344 
   2345   nameend = p;
   2346 
   2347   if (strncmp (p, " .dn ", 5) == 0)
   2348     basetype = REG_TYPE_VFD;
   2349   else if (strncmp (p, " .qn ", 5) == 0)
   2350     basetype = REG_TYPE_NQ;
   2351   else
   2352     return FALSE;
   2353 
   2354   p += 5;
   2355 
   2356   if (*p == '\0')
   2357     return FALSE;
   2358 
   2359   basereg = arm_reg_parse_multi (&p);
   2360 
   2361   if (basereg && basereg->type != basetype)
   2362     {
   2363       as_bad (_("bad type for register"));
   2364       return FALSE;
   2365     }
   2366 
   2367   if (basereg == NULL)
   2368     {
   2369       expressionS exp;
   2370       /* Try parsing as an integer.  */
   2371       my_get_expression (&exp, &p, GE_NO_PREFIX);
   2372       if (exp.X_op != O_constant)
   2373 	{
   2374 	  as_bad (_("expression must be constant"));
   2375 	  return FALSE;
   2376 	}
   2377       basereg = &mybasereg;
   2378       basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
   2379 						  : exp.X_add_number;
   2380       basereg->neon = 0;
   2381     }
   2382 
   2383   if (basereg->neon)
   2384     typeinfo = *basereg->neon;
   2385 
   2386   if (parse_neon_type (&ntype, &p) == SUCCESS)
   2387     {
   2388       /* We got a type.  */
   2389       if (typeinfo.defined & NTA_HASTYPE)
   2390 	{
   2391 	  as_bad (_("can't redefine the type of a register alias"));
   2392 	  return FALSE;
   2393 	}
   2394 
   2395       typeinfo.defined |= NTA_HASTYPE;
   2396       if (ntype.elems != 1)
   2397 	{
   2398 	  as_bad (_("you must specify a single type only"));
   2399 	  return FALSE;
   2400 	}
   2401       typeinfo.eltype = ntype.el[0];
   2402     }
   2403 
   2404   if (skip_past_char (&p, '[') == SUCCESS)
   2405     {
   2406       expressionS exp;
   2407       /* We got a scalar index.  */
   2408 
   2409       if (typeinfo.defined & NTA_HASINDEX)
   2410 	{
   2411 	  as_bad (_("can't redefine the index of a scalar alias"));
   2412 	  return FALSE;
   2413 	}
   2414 
   2415       my_get_expression (&exp, &p, GE_NO_PREFIX);
   2416 
   2417       if (exp.X_op != O_constant)
   2418 	{
   2419 	  as_bad (_("scalar index must be constant"));
   2420 	  return FALSE;
   2421 	}
   2422 
   2423       typeinfo.defined |= NTA_HASINDEX;
   2424       typeinfo.index = exp.X_add_number;
   2425 
   2426       if (skip_past_char (&p, ']') == FAIL)
   2427 	{
   2428 	  as_bad (_("expecting ]"));
   2429 	  return FALSE;
   2430 	}
   2431     }
   2432 
   2433   /* If TC_CASE_SENSITIVE is defined, then newname already points to
   2434      the desired alias name, and p points to its end.  If not, then
   2435      the desired alias name is in the global original_case_string.  */
   2436 #ifdef TC_CASE_SENSITIVE
   2437   namelen = nameend - newname;
   2438 #else
   2439   newname = original_case_string;
   2440   namelen = strlen (newname);
   2441 #endif
   2442 
   2443   namebuf = xmemdup0 (newname, namelen);
   2444 
   2445   insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2446 			 typeinfo.defined != 0 ? &typeinfo : NULL);
   2447 
   2448   /* Insert name in all uppercase.  */
   2449   for (p = namebuf; *p; p++)
   2450     *p = TOUPPER (*p);
   2451 
   2452   if (strncmp (namebuf, newname, namelen))
   2453     insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2454 			   typeinfo.defined != 0 ? &typeinfo : NULL);
   2455 
   2456   /* Insert name in all lowercase.  */
   2457   for (p = namebuf; *p; p++)
   2458     *p = TOLOWER (*p);
   2459 
   2460   if (strncmp (namebuf, newname, namelen))
   2461     insert_neon_reg_alias (namebuf, basereg->number, basetype,
   2462 			   typeinfo.defined != 0 ? &typeinfo : NULL);
   2463 
   2464   free (namebuf);
   2465   return TRUE;
   2466 }
   2467 
   2468 /* Should never be called, as .req goes between the alias and the
   2469    register name, not at the beginning of the line.  */
   2470 
   2471 static void
   2472 s_req (int a ATTRIBUTE_UNUSED)
   2473 {
   2474   as_bad (_("invalid syntax for .req directive"));
   2475 }
   2476 
   2477 static void
   2478 s_dn (int a ATTRIBUTE_UNUSED)
   2479 {
   2480   as_bad (_("invalid syntax for .dn directive"));
   2481 }
   2482 
   2483 static void
   2484 s_qn (int a ATTRIBUTE_UNUSED)
   2485 {
   2486   as_bad (_("invalid syntax for .qn directive"));
   2487 }
   2488 
   2489 /* The .unreq directive deletes an alias which was previously defined
   2490    by .req.  For example:
   2491 
   2492        my_alias .req r11
   2493        .unreq my_alias	  */
   2494 
   2495 static void
   2496 s_unreq (int a ATTRIBUTE_UNUSED)
   2497 {
   2498   char * name;
   2499   char saved_char;
   2500 
   2501   name = input_line_pointer;
   2502 
   2503   while (*input_line_pointer != 0
   2504 	 && *input_line_pointer != ' '
   2505 	 && *input_line_pointer != '\n')
   2506     ++input_line_pointer;
   2507 
   2508   saved_char = *input_line_pointer;
   2509   *input_line_pointer = 0;
   2510 
   2511   if (!*name)
   2512     as_bad (_("invalid syntax for .unreq directive"));
   2513   else
   2514     {
   2515       struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
   2516 							      name);
   2517 
   2518       if (!reg)
   2519 	as_bad (_("unknown register alias '%s'"), name);
   2520       else if (reg->builtin)
   2521 	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
   2522 		 name);
   2523       else
   2524 	{
   2525 	  char * p;
   2526 	  char * nbuf;
   2527 
   2528 	  hash_delete (arm_reg_hsh, name, FALSE);
   2529 	  free ((char *) reg->name);
   2530 	  if (reg->neon)
   2531 	    free (reg->neon);
   2532 	  free (reg);
   2533 
   2534 	  /* Also locate the all upper case and all lower case versions.
   2535 	     Do not complain if we cannot find one or the other as it
   2536 	     was probably deleted above.  */
   2537 
   2538 	  nbuf = strdup (name);
   2539 	  for (p = nbuf; *p; p++)
   2540 	    *p = TOUPPER (*p);
   2541 	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
   2542 	  if (reg)
   2543 	    {
   2544 	      hash_delete (arm_reg_hsh, nbuf, FALSE);
   2545 	      free ((char *) reg->name);
   2546 	      if (reg->neon)
   2547 		free (reg->neon);
   2548 	      free (reg);
   2549 	    }
   2550 
   2551 	  for (p = nbuf; *p; p++)
   2552 	    *p = TOLOWER (*p);
   2553 	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
   2554 	  if (reg)
   2555 	    {
   2556 	      hash_delete (arm_reg_hsh, nbuf, FALSE);
   2557 	      free ((char *) reg->name);
   2558 	      if (reg->neon)
   2559 		free (reg->neon);
   2560 	      free (reg);
   2561 	    }
   2562 
   2563 	  free (nbuf);
   2564 	}
   2565     }
   2566 
   2567   *input_line_pointer = saved_char;
   2568   demand_empty_rest_of_line ();
   2569 }
   2570 
   2571 /* Directives: Instruction set selection.  */
   2572 
   2573 #ifdef OBJ_ELF
   2574 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
   2575    (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
   2576    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
   2577    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
   2578 
   2579 /* Create a new mapping symbol for the transition to STATE.  */
   2580 
   2581 static void
   2582 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
   2583 {
   2584   symbolS * symbolP;
   2585   const char * symname;
   2586   int type;
   2587 
   2588   switch (state)
   2589     {
   2590     case MAP_DATA:
   2591       symname = "$d";
   2592       type = BSF_NO_FLAGS;
   2593       break;
   2594     case MAP_ARM:
   2595       symname = "$a";
   2596       type = BSF_NO_FLAGS;
   2597       break;
   2598     case MAP_THUMB:
   2599       symname = "$t";
   2600       type = BSF_NO_FLAGS;
   2601       break;
   2602     default:
   2603       abort ();
   2604     }
   2605 
   2606   symbolP = symbol_new (symname, now_seg, value, frag);
   2607   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
   2608 
   2609   switch (state)
   2610     {
   2611     case MAP_ARM:
   2612       THUMB_SET_FUNC (symbolP, 0);
   2613       ARM_SET_THUMB (symbolP, 0);
   2614       ARM_SET_INTERWORK (symbolP, support_interwork);
   2615       break;
   2616 
   2617     case MAP_THUMB:
   2618       THUMB_SET_FUNC (symbolP, 1);
   2619       ARM_SET_THUMB (symbolP, 1);
   2620       ARM_SET_INTERWORK (symbolP, support_interwork);
   2621       break;
   2622 
   2623     case MAP_DATA:
   2624     default:
   2625       break;
   2626     }
   2627 
   2628   /* Save the mapping symbols for future reference.  Also check that
   2629      we do not place two mapping symbols at the same offset within a
   2630      frag.  We'll handle overlap between frags in
   2631      check_mapping_symbols.
   2632 
   2633      If .fill or other data filling directive generates zero sized data,
   2634      the mapping symbol for the following code will have the same value
   2635      as the one generated for the data filling directive.  In this case,
   2636      we replace the old symbol with the new one at the same address.  */
   2637   if (value == 0)
   2638     {
   2639       if (frag->tc_frag_data.first_map != NULL)
   2640 	{
   2641 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
   2642 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
   2643 	}
   2644       frag->tc_frag_data.first_map = symbolP;
   2645     }
   2646   if (frag->tc_frag_data.last_map != NULL)
   2647     {
   2648       know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
   2649       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
   2650 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
   2651     }
   2652   frag->tc_frag_data.last_map = symbolP;
   2653 }
   2654 
   2655 /* We must sometimes convert a region marked as code to data during
   2656    code alignment, if an odd number of bytes have to be padded.  The
   2657    code mapping symbol is pushed to an aligned address.  */
   2658 
   2659 static void
   2660 insert_data_mapping_symbol (enum mstate state,
   2661 			    valueT value, fragS *frag, offsetT bytes)
   2662 {
   2663   /* If there was already a mapping symbol, remove it.  */
   2664   if (frag->tc_frag_data.last_map != NULL
   2665       && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
   2666     {
   2667       symbolS *symp = frag->tc_frag_data.last_map;
   2668 
   2669       if (value == 0)
   2670 	{
   2671 	  know (frag->tc_frag_data.first_map == symp);
   2672 	  frag->tc_frag_data.first_map = NULL;
   2673 	}
   2674       frag->tc_frag_data.last_map = NULL;
   2675       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
   2676     }
   2677 
   2678   make_mapping_symbol (MAP_DATA, value, frag);
   2679   make_mapping_symbol (state, value + bytes, frag);
   2680 }
   2681 
   2682 static void mapping_state_2 (enum mstate state, int max_chars);
   2683 
   2684 /* Set the mapping state to STATE.  Only call this when about to
   2685    emit some STATE bytes to the file.  */
   2686 
   2687 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
   2688 void
   2689 mapping_state (enum mstate state)
   2690 {
   2691   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
   2692 
   2693   if (mapstate == state)
   2694     /* The mapping symbol has already been emitted.
   2695        There is nothing else to do.  */
   2696     return;
   2697 
   2698   if (state == MAP_ARM || state == MAP_THUMB)
   2699     /*  PR gas/12931
   2700 	All ARM instructions require 4-byte alignment.
   2701 	(Almost) all Thumb instructions require 2-byte alignment.
   2702 
   2703 	When emitting instructions into any section, mark the section
   2704 	appropriately.
   2705 
   2706 	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
   2707 	but themselves require 2-byte alignment; this applies to some
   2708 	PC- relative forms.  However, these cases will invovle implicit
   2709 	literal pool generation or an explicit .align >=2, both of
   2710 	which will cause the section to me marked with sufficient
   2711 	alignment.  Thus, we don't handle those cases here.  */
   2712     record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
   2713 
   2714   if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
   2715     /* This case will be evaluated later.  */
   2716     return;
   2717 
   2718   mapping_state_2 (state, 0);
   2719 }
   2720 
   2721 /* Same as mapping_state, but MAX_CHARS bytes have already been
   2722    allocated.  Put the mapping symbol that far back.  */
   2723 
   2724 static void
   2725 mapping_state_2 (enum mstate state, int max_chars)
   2726 {
   2727   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
   2728 
   2729   if (!SEG_NORMAL (now_seg))
   2730     return;
   2731 
   2732   if (mapstate == state)
   2733     /* The mapping symbol has already been emitted.
   2734        There is nothing else to do.  */
   2735     return;
   2736 
   2737   if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
   2738 	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
   2739     {
   2740       struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
   2741       const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
   2742 
   2743       if (add_symbol)
   2744 	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
   2745     }
   2746 
   2747   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
   2748   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
   2749 }
   2750 #undef TRANSITION
   2751 #else
   2752 #define mapping_state(x) ((void)0)
   2753 #define mapping_state_2(x, y) ((void)0)
   2754 #endif
   2755 
   2756 /* Find the real, Thumb encoded start of a Thumb function.  */
   2757 
   2758 #ifdef OBJ_COFF
   2759 static symbolS *
   2760 find_real_start (symbolS * symbolP)
   2761 {
   2762   char *       real_start;
   2763   const char * name = S_GET_NAME (symbolP);
   2764   symbolS *    new_target;
   2765 
   2766   /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
   2767 #define STUB_NAME ".real_start_of"
   2768 
   2769   if (name == NULL)
   2770     abort ();
   2771 
   2772   /* The compiler may generate BL instructions to local labels because
   2773      it needs to perform a branch to a far away location. These labels
   2774      do not have a corresponding ".real_start_of" label.  We check
   2775      both for S_IS_LOCAL and for a leading dot, to give a way to bypass
   2776      the ".real_start_of" convention for nonlocal branches.  */
   2777   if (S_IS_LOCAL (symbolP) || name[0] == '.')
   2778     return symbolP;
   2779 
   2780   real_start = concat (STUB_NAME, name, NULL);
   2781   new_target = symbol_find (real_start);
   2782   free (real_start);
   2783 
   2784   if (new_target == NULL)
   2785     {
   2786       as_warn (_("Failed to find real start of function: %s\n"), name);
   2787       new_target = symbolP;
   2788     }
   2789 
   2790   return new_target;
   2791 }
   2792 #endif
   2793 
   2794 static void
   2795 opcode_select (int width)
   2796 {
   2797   switch (width)
   2798     {
   2799     case 16:
   2800       if (! thumb_mode)
   2801 	{
   2802 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
   2803 	    as_bad (_("selected processor does not support THUMB opcodes"));
   2804 
   2805 	  thumb_mode = 1;
   2806 	  /* No need to force the alignment, since we will have been
   2807 	     coming from ARM mode, which is word-aligned.  */
   2808 	  record_alignment (now_seg, 1);
   2809 	}
   2810       break;
   2811 
   2812     case 32:
   2813       if (thumb_mode)
   2814 	{
   2815 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
   2816 	    as_bad (_("selected processor does not support ARM opcodes"));
   2817 
   2818 	  thumb_mode = 0;
   2819 
   2820 	  if (!need_pass_2)
   2821 	    frag_align (2, 0, 0);
   2822 
   2823 	  record_alignment (now_seg, 1);
   2824 	}
   2825       break;
   2826 
   2827     default:
   2828       as_bad (_("invalid instruction size selected (%d)"), width);
   2829     }
   2830 }
   2831 
   2832 static void
   2833 s_arm (int ignore ATTRIBUTE_UNUSED)
   2834 {
   2835   opcode_select (32);
   2836   demand_empty_rest_of_line ();
   2837 }
   2838 
   2839 static void
   2840 s_thumb (int ignore ATTRIBUTE_UNUSED)
   2841 {
   2842   opcode_select (16);
   2843   demand_empty_rest_of_line ();
   2844 }
   2845 
   2846 static void
   2847 s_code (int unused ATTRIBUTE_UNUSED)
   2848 {
   2849   int temp;
   2850 
   2851   temp = get_absolute_expression ();
   2852   switch (temp)
   2853     {
   2854     case 16:
   2855     case 32:
   2856       opcode_select (temp);
   2857       break;
   2858 
   2859     default:
   2860       as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
   2861     }
   2862 }
   2863 
   2864 static void
   2865 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
   2866 {
   2867   /* If we are not already in thumb mode go into it, EVEN if
   2868      the target processor does not support thumb instructions.
   2869      This is used by gcc/config/arm/lib1funcs.asm for example
   2870      to compile interworking support functions even if the
   2871      target processor should not support interworking.	*/
   2872   if (! thumb_mode)
   2873     {
   2874       thumb_mode = 2;
   2875       record_alignment (now_seg, 1);
   2876     }
   2877 
   2878   demand_empty_rest_of_line ();
   2879 }
   2880 
   2881 static void
   2882 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
   2883 {
   2884   s_thumb (0);
   2885 
   2886   /* The following label is the name/address of the start of a Thumb function.
   2887      We need to know this for the interworking support.	 */
   2888   label_is_thumb_function_name = TRUE;
   2889 }
   2890 
   2891 /* Perform a .set directive, but also mark the alias as
   2892    being a thumb function.  */
   2893 
   2894 static void
   2895 s_thumb_set (int equiv)
   2896 {
   2897   /* XXX the following is a duplicate of the code for s_set() in read.c
   2898      We cannot just call that code as we need to get at the symbol that
   2899      is created.  */
   2900   char *    name;
   2901   char	    delim;
   2902   char *    end_name;
   2903   symbolS * symbolP;
   2904 
   2905   /* Especial apologies for the random logic:
   2906      This just grew, and could be parsed much more simply!
   2907      Dean - in haste.  */
   2908   delim	    = get_symbol_name (& name);
   2909   end_name  = input_line_pointer;
   2910   (void) restore_line_pointer (delim);
   2911 
   2912   if (*input_line_pointer != ',')
   2913     {
   2914       *end_name = 0;
   2915       as_bad (_("expected comma after name \"%s\""), name);
   2916       *end_name = delim;
   2917       ignore_rest_of_line ();
   2918       return;
   2919     }
   2920 
   2921   input_line_pointer++;
   2922   *end_name = 0;
   2923 
   2924   if (name[0] == '.' && name[1] == '\0')
   2925     {
   2926       /* XXX - this should not happen to .thumb_set.  */
   2927       abort ();
   2928     }
   2929 
   2930   if ((symbolP = symbol_find (name)) == NULL
   2931       && (symbolP = md_undefined_symbol (name)) == NULL)
   2932     {
   2933 #ifndef NO_LISTING
   2934       /* When doing symbol listings, play games with dummy fragments living
   2935 	 outside the normal fragment chain to record the file and line info
   2936 	 for this symbol.  */
   2937       if (listing & LISTING_SYMBOLS)
   2938 	{
   2939 	  extern struct list_info_struct * listing_tail;
   2940 	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
   2941 
   2942 	  memset (dummy_frag, 0, sizeof (fragS));
   2943 	  dummy_frag->fr_type = rs_fill;
   2944 	  dummy_frag->line = listing_tail;
   2945 	  symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
   2946 	  dummy_frag->fr_symbol = symbolP;
   2947 	}
   2948       else
   2949 #endif
   2950 	symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
   2951 
   2952 #ifdef OBJ_COFF
   2953       /* "set" symbols are local unless otherwise specified.  */
   2954       SF_SET_LOCAL (symbolP);
   2955 #endif /* OBJ_COFF  */
   2956     }				/* Make a new symbol.  */
   2957 
   2958   symbol_table_insert (symbolP);
   2959 
   2960   * end_name = delim;
   2961 
   2962   if (equiv
   2963       && S_IS_DEFINED (symbolP)
   2964       && S_GET_SEGMENT (symbolP) != reg_section)
   2965     as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
   2966 
   2967   pseudo_set (symbolP);
   2968 
   2969   demand_empty_rest_of_line ();
   2970 
   2971   /* XXX Now we come to the Thumb specific bit of code.	 */
   2972 
   2973   THUMB_SET_FUNC (symbolP, 1);
   2974   ARM_SET_THUMB (symbolP, 1);
   2975 #if defined OBJ_ELF || defined OBJ_COFF
   2976   ARM_SET_INTERWORK (symbolP, support_interwork);
   2977 #endif
   2978 }
   2979 
   2980 /* Directives: Mode selection.  */
   2981 
   2982 /* .syntax [unified|divided] - choose the new unified syntax
   2983    (same for Arm and Thumb encoding, modulo slight differences in what
   2984    can be represented) or the old divergent syntax for each mode.  */
   2985 static void
   2986 s_syntax (int unused ATTRIBUTE_UNUSED)
   2987 {
   2988   char *name, delim;
   2989 
   2990   delim = get_symbol_name (& name);
   2991 
   2992   if (!strcasecmp (name, "unified"))
   2993     unified_syntax = TRUE;
   2994   else if (!strcasecmp (name, "divided"))
   2995     unified_syntax = FALSE;
   2996   else
   2997     {
   2998       as_bad (_("unrecognized syntax mode \"%s\""), name);
   2999       return;
   3000     }
   3001   (void) restore_line_pointer (delim);
   3002   demand_empty_rest_of_line ();
   3003 }
   3004 
   3005 /* Directives: sectioning and alignment.  */
   3006 
   3007 static void
   3008 s_bss (int ignore ATTRIBUTE_UNUSED)
   3009 {
   3010   /* We don't support putting frags in the BSS segment, we fake it by
   3011      marking in_bss, then looking at s_skip for clues.	*/
   3012   subseg_set (bss_section, 0);
   3013   demand_empty_rest_of_line ();
   3014 
   3015 #ifdef md_elf_section_change_hook
   3016   md_elf_section_change_hook ();
   3017 #endif
   3018 }
   3019 
   3020 static void
   3021 s_even (int ignore ATTRIBUTE_UNUSED)
   3022 {
   3023   /* Never make frag if expect extra pass.  */
   3024   if (!need_pass_2)
   3025     frag_align (1, 0, 0);
   3026 
   3027   record_alignment (now_seg, 1);
   3028 
   3029   demand_empty_rest_of_line ();
   3030 }
   3031 
   3032 /* Directives: CodeComposer Studio.  */
   3033 
   3034 /*  .ref  (for CodeComposer Studio syntax only).  */
   3035 static void
   3036 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
   3037 {
   3038   if (codecomposer_syntax)
   3039     ignore_rest_of_line ();
   3040   else
   3041     as_bad (_(".ref pseudo-op only available with -mccs flag."));
   3042 }
   3043 
   3044 /*  If name is not NULL, then it is used for marking the beginning of a
   3045     function, wherease if it is NULL then it means the function end.  */
   3046 static void
   3047 asmfunc_debug (const char * name)
   3048 {
   3049   static const char * last_name = NULL;
   3050 
   3051   if (name != NULL)
   3052     {
   3053       gas_assert (last_name == NULL);
   3054       last_name = name;
   3055 
   3056       if (debug_type == DEBUG_STABS)
   3057          stabs_generate_asm_func (name, name);
   3058     }
   3059   else
   3060     {
   3061       gas_assert (last_name != NULL);
   3062 
   3063       if (debug_type == DEBUG_STABS)
   3064         stabs_generate_asm_endfunc (last_name, last_name);
   3065 
   3066       last_name = NULL;
   3067     }
   3068 }
   3069 
   3070 static void
   3071 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
   3072 {
   3073   if (codecomposer_syntax)
   3074     {
   3075       switch (asmfunc_state)
   3076 	{
   3077 	case OUTSIDE_ASMFUNC:
   3078 	  asmfunc_state = WAITING_ASMFUNC_NAME;
   3079 	  break;
   3080 
   3081 	case WAITING_ASMFUNC_NAME:
   3082 	  as_bad (_(".asmfunc repeated."));
   3083 	  break;
   3084 
   3085 	case WAITING_ENDASMFUNC:
   3086 	  as_bad (_(".asmfunc without function."));
   3087 	  break;
   3088 	}
   3089       demand_empty_rest_of_line ();
   3090     }
   3091   else
   3092     as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
   3093 }
   3094 
   3095 static void
   3096 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
   3097 {
   3098   if (codecomposer_syntax)
   3099     {
   3100       switch (asmfunc_state)
   3101 	{
   3102 	case OUTSIDE_ASMFUNC:
   3103 	  as_bad (_(".endasmfunc without a .asmfunc."));
   3104 	  break;
   3105 
   3106 	case WAITING_ASMFUNC_NAME:
   3107 	  as_bad (_(".endasmfunc without function."));
   3108 	  break;
   3109 
   3110 	case WAITING_ENDASMFUNC:
   3111 	  asmfunc_state = OUTSIDE_ASMFUNC;
   3112 	  asmfunc_debug (NULL);
   3113 	  break;
   3114 	}
   3115       demand_empty_rest_of_line ();
   3116     }
   3117   else
   3118     as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
   3119 }
   3120 
   3121 static void
   3122 s_ccs_def (int name)
   3123 {
   3124   if (codecomposer_syntax)
   3125     s_globl (name);
   3126   else
   3127     as_bad (_(".def pseudo-op only available with -mccs flag."));
   3128 }
   3129 
   3130 /* Directives: Literal pools.  */
   3131 
   3132 static literal_pool *
   3133 find_literal_pool (void)
   3134 {
   3135   literal_pool * pool;
   3136 
   3137   for (pool = list_of_pools; pool != NULL; pool = pool->next)
   3138     {
   3139       if (pool->section == now_seg
   3140 	  && pool->sub_section == now_subseg)
   3141 	break;
   3142     }
   3143 
   3144   return pool;
   3145 }
   3146 
   3147 static literal_pool *
   3148 find_or_make_literal_pool (void)
   3149 {
   3150   /* Next literal pool ID number.  */
   3151   static unsigned int latest_pool_num = 1;
   3152   literal_pool *      pool;
   3153 
   3154   pool = find_literal_pool ();
   3155 
   3156   if (pool == NULL)
   3157     {
   3158       /* Create a new pool.  */
   3159       pool = XNEW (literal_pool);
   3160       if (! pool)
   3161 	return NULL;
   3162 
   3163       pool->next_free_entry = 0;
   3164       pool->section	    = now_seg;
   3165       pool->sub_section	    = now_subseg;
   3166       pool->next	    = list_of_pools;
   3167       pool->symbol	    = NULL;
   3168       pool->alignment	    = 2;
   3169 
   3170       /* Add it to the list.  */
   3171       list_of_pools = pool;
   3172     }
   3173 
   3174   /* New pools, and emptied pools, will have a NULL symbol.  */
   3175   if (pool->symbol == NULL)
   3176     {
   3177       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
   3178 				    (valueT) 0, &zero_address_frag);
   3179       pool->id = latest_pool_num ++;
   3180     }
   3181 
   3182   /* Done.  */
   3183   return pool;
   3184 }
   3185 
   3186 /* Add the literal in the global 'inst'
   3187    structure to the relevant literal pool.  */
   3188 
   3189 static int
   3190 add_to_lit_pool (unsigned int nbytes)
   3191 {
   3192 #define PADDING_SLOT 0x1
   3193 #define LIT_ENTRY_SIZE_MASK 0xFF
   3194   literal_pool * pool;
   3195   unsigned int entry, pool_size = 0;
   3196   bfd_boolean padding_slot_p = FALSE;
   3197   unsigned imm1 = 0;
   3198   unsigned imm2 = 0;
   3199 
   3200   if (nbytes == 8)
   3201     {
   3202       imm1 = inst.operands[1].imm;
   3203       imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
   3204 	       : inst.reloc.exp.X_unsigned ? 0
   3205 	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
   3206       if (target_big_endian)
   3207 	{
   3208 	  imm1 = imm2;
   3209 	  imm2 = inst.operands[1].imm;
   3210 	}
   3211     }
   3212 
   3213   pool = find_or_make_literal_pool ();
   3214 
   3215   /* Check if this literal value is already in the pool.  */
   3216   for (entry = 0; entry < pool->next_free_entry; entry ++)
   3217     {
   3218       if (nbytes == 4)
   3219 	{
   3220 	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
   3221 	      && (inst.reloc.exp.X_op == O_constant)
   3222 	      && (pool->literals[entry].X_add_number
   3223 		  == inst.reloc.exp.X_add_number)
   3224 	      && (pool->literals[entry].X_md == nbytes)
   3225 	      && (pool->literals[entry].X_unsigned
   3226 		  == inst.reloc.exp.X_unsigned))
   3227 	    break;
   3228 
   3229 	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
   3230 	      && (inst.reloc.exp.X_op == O_symbol)
   3231 	      && (pool->literals[entry].X_add_number
   3232 		  == inst.reloc.exp.X_add_number)
   3233 	      && (pool->literals[entry].X_add_symbol
   3234 		  == inst.reloc.exp.X_add_symbol)
   3235 	      && (pool->literals[entry].X_op_symbol
   3236 		  == inst.reloc.exp.X_op_symbol)
   3237 	      && (pool->literals[entry].X_md == nbytes))
   3238 	    break;
   3239 	}
   3240       else if ((nbytes == 8)
   3241 	       && !(pool_size & 0x7)
   3242 	       && ((entry + 1) != pool->next_free_entry)
   3243 	       && (pool->literals[entry].X_op == O_constant)
   3244 	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
   3245 	       && (pool->literals[entry].X_unsigned
   3246 		   == inst.reloc.exp.X_unsigned)
   3247 	       && (pool->literals[entry + 1].X_op == O_constant)
   3248 	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
   3249 	       && (pool->literals[entry + 1].X_unsigned
   3250 		   == inst.reloc.exp.X_unsigned))
   3251 	break;
   3252 
   3253       padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
   3254       if (padding_slot_p && (nbytes == 4))
   3255 	break;
   3256 
   3257       pool_size += 4;
   3258     }
   3259 
   3260   /* Do we need to create a new entry?	*/
   3261   if (entry == pool->next_free_entry)
   3262     {
   3263       if (entry >= MAX_LITERAL_POOL_SIZE)
   3264 	{
   3265 	  inst.error = _("literal pool overflow");
   3266 	  return FAIL;
   3267 	}
   3268 
   3269       if (nbytes == 8)
   3270 	{
   3271 	  /* For 8-byte entries, we align to an 8-byte boundary,
   3272 	     and split it into two 4-byte entries, because on 32-bit
   3273 	     host, 8-byte constants are treated as big num, thus
   3274 	     saved in "generic_bignum" which will be overwritten
   3275 	     by later assignments.
   3276 
   3277 	     We also need to make sure there is enough space for
   3278 	     the split.
   3279 
   3280 	     We also check to make sure the literal operand is a
   3281 	     constant number.  */
   3282 	  if (!(inst.reloc.exp.X_op == O_constant
   3283 	        || inst.reloc.exp.X_op == O_big))
   3284 	    {
   3285 	      inst.error = _("invalid type for literal pool");
   3286 	      return FAIL;
   3287 	    }
   3288 	  else if (pool_size & 0x7)
   3289 	    {
   3290 	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
   3291 		{
   3292 		  inst.error = _("literal pool overflow");
   3293 		  return FAIL;
   3294 		}
   3295 
   3296 	      pool->literals[entry] = inst.reloc.exp;
   3297 	      pool->literals[entry].X_op = O_constant;
   3298 	      pool->literals[entry].X_add_number = 0;
   3299 	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
   3300 	      pool->next_free_entry += 1;
   3301 	      pool_size += 4;
   3302 	    }
   3303 	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
   3304 	    {
   3305 	      inst.error = _("literal pool overflow");
   3306 	      return FAIL;
   3307 	    }
   3308 
   3309 	  pool->literals[entry] = inst.reloc.exp;
   3310 	  pool->literals[entry].X_op = O_constant;
   3311 	  pool->literals[entry].X_add_number = imm1;
   3312 	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
   3313 	  pool->literals[entry++].X_md = 4;
   3314 	  pool->literals[entry] = inst.reloc.exp;
   3315 	  pool->literals[entry].X_op = O_constant;
   3316 	  pool->literals[entry].X_add_number = imm2;
   3317 	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
   3318 	  pool->literals[entry].X_md = 4;
   3319 	  pool->alignment = 3;
   3320 	  pool->next_free_entry += 1;
   3321 	}
   3322       else
   3323 	{
   3324 	  pool->literals[entry] = inst.reloc.exp;
   3325 	  pool->literals[entry].X_md = 4;
   3326 	}
   3327 
   3328 #ifdef OBJ_ELF
   3329       /* PR ld/12974: Record the location of the first source line to reference
   3330 	 this entry in the literal pool.  If it turns out during linking that the
   3331 	 symbol does not exist we will be able to give an accurate line number for
   3332 	 the (first use of the) missing reference.  */
   3333       if (debug_type == DEBUG_DWARF2)
   3334 	dwarf2_where (pool->locs + entry);
   3335 #endif
   3336       pool->next_free_entry += 1;
   3337     }
   3338   else if (padding_slot_p)
   3339     {
   3340       pool->literals[entry] = inst.reloc.exp;
   3341       pool->literals[entry].X_md = nbytes;
   3342     }
   3343 
   3344   inst.reloc.exp.X_op	      = O_symbol;
   3345   inst.reloc.exp.X_add_number = pool_size;
   3346   inst.reloc.exp.X_add_symbol = pool->symbol;
   3347 
   3348   return SUCCESS;
   3349 }
   3350 
   3351 bfd_boolean
   3352 tc_start_label_without_colon (void)
   3353 {
   3354   bfd_boolean ret = TRUE;
   3355 
   3356   if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
   3357     {
   3358       const char *label = input_line_pointer;
   3359 
   3360       while (!is_end_of_line[(int) label[-1]])
   3361 	--label;
   3362 
   3363       if (*label == '.')
   3364 	{
   3365 	  as_bad (_("Invalid label '%s'"), label);
   3366 	  ret = FALSE;
   3367 	}
   3368 
   3369       asmfunc_debug (label);
   3370 
   3371       asmfunc_state = WAITING_ENDASMFUNC;
   3372     }
   3373 
   3374   return ret;
   3375 }
   3376 
   3377 /* Can't use symbol_new here, so have to create a symbol and then at
   3378    a later date assign it a value. Thats what these functions do.  */
   3379 
   3380 static void
   3381 symbol_locate (symbolS *    symbolP,
   3382 	       const char * name,	/* It is copied, the caller can modify.	 */
   3383 	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
   3384 	       valueT	    valu,	/* Symbol value.  */
   3385 	       fragS *	    frag)	/* Associated fragment.	 */
   3386 {
   3387   size_t name_length;
   3388   char * preserved_copy_of_name;
   3389 
   3390   name_length = strlen (name) + 1;   /* +1 for \0.  */
   3391   obstack_grow (&notes, name, name_length);
   3392   preserved_copy_of_name = (char *) obstack_finish (&notes);
   3393 
   3394 #ifdef tc_canonicalize_symbol_name
   3395   preserved_copy_of_name =
   3396     tc_canonicalize_symbol_name (preserved_copy_of_name);
   3397 #endif
   3398 
   3399   S_SET_NAME (symbolP, preserved_copy_of_name);
   3400 
   3401   S_SET_SEGMENT (symbolP, segment);
   3402   S_SET_VALUE (symbolP, valu);
   3403   symbol_clear_list_pointers (symbolP);
   3404 
   3405   symbol_set_frag (symbolP, frag);
   3406 
   3407   /* Link to end of symbol chain.  */
   3408   {
   3409     extern int symbol_table_frozen;
   3410 
   3411     if (symbol_table_frozen)
   3412       abort ();
   3413   }
   3414 
   3415   symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
   3416 
   3417   obj_symbol_new_hook (symbolP);
   3418 
   3419 #ifdef tc_symbol_new_hook
   3420   tc_symbol_new_hook (symbolP);
   3421 #endif
   3422 
   3423 #ifdef DEBUG_SYMS
   3424   verify_symbol_chain (symbol_rootP, symbol_lastP);
   3425 #endif /* DEBUG_SYMS  */
   3426 }
   3427 
   3428 static void
   3429 s_ltorg (int ignored ATTRIBUTE_UNUSED)
   3430 {
   3431   unsigned int entry;
   3432   literal_pool * pool;
   3433   char sym_name[20];
   3434 
   3435   pool = find_literal_pool ();
   3436   if (pool == NULL
   3437       || pool->symbol == NULL
   3438       || pool->next_free_entry == 0)
   3439     return;
   3440 
   3441   /* Align pool as you have word accesses.
   3442      Only make a frag if we have to.  */
   3443   if (!need_pass_2)
   3444     frag_align (pool->alignment, 0, 0);
   3445 
   3446   record_alignment (now_seg, 2);
   3447 
   3448 #ifdef OBJ_ELF
   3449   seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
   3450   make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
   3451 #endif
   3452   sprintf (sym_name, "$$lit_\002%x", pool->id);
   3453 
   3454   symbol_locate (pool->symbol, sym_name, now_seg,
   3455 		 (valueT) frag_now_fix (), frag_now);
   3456   symbol_table_insert (pool->symbol);
   3457 
   3458   ARM_SET_THUMB (pool->symbol, thumb_mode);
   3459 
   3460 #if defined OBJ_COFF || defined OBJ_ELF
   3461   ARM_SET_INTERWORK (pool->symbol, support_interwork);
   3462 #endif
   3463 
   3464   for (entry = 0; entry < pool->next_free_entry; entry ++)
   3465     {
   3466 #ifdef OBJ_ELF
   3467       if (debug_type == DEBUG_DWARF2)
   3468 	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
   3469 #endif
   3470       /* First output the expression in the instruction to the pool.  */
   3471       emit_expr (&(pool->literals[entry]),
   3472 		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
   3473     }
   3474 
   3475   /* Mark the pool as empty.  */
   3476   pool->next_free_entry = 0;
   3477   pool->symbol = NULL;
   3478 }
   3479 
   3480 #ifdef OBJ_ELF
   3481 /* Forward declarations for functions below, in the MD interface
   3482    section.  */
   3483 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
   3484 static valueT create_unwind_entry (int);
   3485 static void start_unwind_section (const segT, int);
   3486 static void add_unwind_opcode (valueT, int);
   3487 static void flush_pending_unwind (void);
   3488 
   3489 /* Directives: Data.  */
   3490 
   3491 static void
   3492 s_arm_elf_cons (int nbytes)
   3493 {
   3494   expressionS exp;
   3495 
   3496 #ifdef md_flush_pending_output
   3497   md_flush_pending_output ();
   3498 #endif
   3499 
   3500   if (is_it_end_of_statement ())
   3501     {
   3502       demand_empty_rest_of_line ();
   3503       return;
   3504     }
   3505 
   3506 #ifdef md_cons_align
   3507   md_cons_align (nbytes);
   3508 #endif
   3509 
   3510   mapping_state (MAP_DATA);
   3511   do
   3512     {
   3513       int reloc;
   3514       char *base = input_line_pointer;
   3515 
   3516       expression (& exp);
   3517 
   3518       if (exp.X_op != O_symbol)
   3519 	emit_expr (&exp, (unsigned int) nbytes);
   3520       else
   3521 	{
   3522 	  char *before_reloc = input_line_pointer;
   3523 	  reloc = parse_reloc (&input_line_pointer);
   3524 	  if (reloc == -1)
   3525 	    {
   3526 	      as_bad (_("unrecognized relocation suffix"));
   3527 	      ignore_rest_of_line ();
   3528 	      return;
   3529 	    }
   3530 	  else if (reloc == BFD_RELOC_UNUSED)
   3531 	    emit_expr (&exp, (unsigned int) nbytes);
   3532 	  else
   3533 	    {
   3534 	      reloc_howto_type *howto = (reloc_howto_type *)
   3535 		  bfd_reloc_type_lookup (stdoutput,
   3536 					 (bfd_reloc_code_real_type) reloc);
   3537 	      int size = bfd_get_reloc_size (howto);
   3538 
   3539 	      if (reloc == BFD_RELOC_ARM_PLT32)
   3540 		{
   3541 		  as_bad (_("(plt) is only valid on branch targets"));
   3542 		  reloc = BFD_RELOC_UNUSED;
   3543 		  size = 0;
   3544 		}
   3545 
   3546 	      if (size > nbytes)
   3547 		as_bad (_("%s relocations do not fit in %d bytes"),
   3548 			howto->name, nbytes);
   3549 	      else
   3550 		{
   3551 		  /* We've parsed an expression stopping at O_symbol.
   3552 		     But there may be more expression left now that we
   3553 		     have parsed the relocation marker.  Parse it again.
   3554 		     XXX Surely there is a cleaner way to do this.  */
   3555 		  char *p = input_line_pointer;
   3556 		  int offset;
   3557 		  char *save_buf = XNEWVEC (char, input_line_pointer - base);
   3558 
   3559 		  memcpy (save_buf, base, input_line_pointer - base);
   3560 		  memmove (base + (input_line_pointer - before_reloc),
   3561 			   base, before_reloc - base);
   3562 
   3563 		  input_line_pointer = base + (input_line_pointer-before_reloc);
   3564 		  expression (&exp);
   3565 		  memcpy (base, save_buf, p - base);
   3566 
   3567 		  offset = nbytes - size;
   3568 		  p = frag_more (nbytes);
   3569 		  memset (p, 0, nbytes);
   3570 		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
   3571 			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
   3572 		  free (save_buf);
   3573 		}
   3574 	    }
   3575 	}
   3576     }
   3577   while (*input_line_pointer++ == ',');
   3578 
   3579   /* Put terminator back into stream.  */
   3580   input_line_pointer --;
   3581   demand_empty_rest_of_line ();
   3582 }
   3583 
   3584 /* Emit an expression containing a 32-bit thumb instruction.
   3585    Implementation based on put_thumb32_insn.  */
   3586 
   3587 static void
   3588 emit_thumb32_expr (expressionS * exp)
   3589 {
   3590   expressionS exp_high = *exp;
   3591 
   3592   exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
   3593   emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
   3594   exp->X_add_number &= 0xffff;
   3595   emit_expr (exp, (unsigned int) THUMB_SIZE);
   3596 }
   3597 
   3598 /*  Guess the instruction size based on the opcode.  */
   3599 
   3600 static int
   3601 thumb_insn_size (int opcode)
   3602 {
   3603   if ((unsigned int) opcode < 0xe800u)
   3604     return 2;
   3605   else if ((unsigned int) opcode >= 0xe8000000u)
   3606     return 4;
   3607   else
   3608     return 0;
   3609 }
   3610 
   3611 static bfd_boolean
   3612 emit_insn (expressionS *exp, int nbytes)
   3613 {
   3614   int size = 0;
   3615 
   3616   if (exp->X_op == O_constant)
   3617     {
   3618       size = nbytes;
   3619 
   3620       if (size == 0)
   3621 	size = thumb_insn_size (exp->X_add_number);
   3622 
   3623       if (size != 0)
   3624 	{
   3625 	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
   3626 	    {
   3627 	      as_bad (_(".inst.n operand too big. "\
   3628 			"Use .inst.w instead"));
   3629 	      size = 0;
   3630 	    }
   3631 	  else
   3632 	    {
   3633 	      if (now_it.state == AUTOMATIC_IT_BLOCK)
   3634 		set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
   3635 	      else
   3636 		set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
   3637 
   3638 	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
   3639 		emit_thumb32_expr (exp);
   3640 	      else
   3641 		emit_expr (exp, (unsigned int) size);
   3642 
   3643 	      it_fsm_post_encode ();
   3644 	    }
   3645 	}
   3646       else
   3647 	as_bad (_("cannot determine Thumb instruction size. "	\
   3648 		  "Use .inst.n/.inst.w instead"));
   3649     }
   3650   else
   3651     as_bad (_("constant expression required"));
   3652 
   3653   return (size != 0);
   3654 }
   3655 
   3656 /* Like s_arm_elf_cons but do not use md_cons_align and
   3657    set the mapping state to MAP_ARM/MAP_THUMB.  */
   3658 
   3659 static void
   3660 s_arm_elf_inst (int nbytes)
   3661 {
   3662   if (is_it_end_of_statement ())
   3663     {
   3664       demand_empty_rest_of_line ();
   3665       return;
   3666     }
   3667 
   3668   /* Calling mapping_state () here will not change ARM/THUMB,
   3669      but will ensure not to be in DATA state.  */
   3670 
   3671   if (thumb_mode)
   3672     mapping_state (MAP_THUMB);
   3673   else
   3674     {
   3675       if (nbytes != 0)
   3676 	{
   3677 	  as_bad (_("width suffixes are invalid in ARM mode"));
   3678 	  ignore_rest_of_line ();
   3679 	  return;
   3680 	}
   3681 
   3682       nbytes = 4;
   3683 
   3684       mapping_state (MAP_ARM);
   3685     }
   3686 
   3687   do
   3688     {
   3689       expressionS exp;
   3690 
   3691       expression (& exp);
   3692 
   3693       if (! emit_insn (& exp, nbytes))
   3694 	{
   3695 	  ignore_rest_of_line ();
   3696 	  return;
   3697 	}
   3698     }
   3699   while (*input_line_pointer++ == ',');
   3700 
   3701   /* Put terminator back into stream.  */
   3702   input_line_pointer --;
   3703   demand_empty_rest_of_line ();
   3704 }
   3705 
   3706 /* Parse a .rel31 directive.  */
   3707 
   3708 static void
   3709 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
   3710 {
   3711   expressionS exp;
   3712   char *p;
   3713   valueT highbit;
   3714 
   3715   highbit = 0;
   3716   if (*input_line_pointer == '1')
   3717     highbit = 0x80000000;
   3718   else if (*input_line_pointer != '0')
   3719     as_bad (_("expected 0 or 1"));
   3720 
   3721   input_line_pointer++;
   3722   if (*input_line_pointer != ',')
   3723     as_bad (_("missing comma"));
   3724   input_line_pointer++;
   3725 
   3726 #ifdef md_flush_pending_output
   3727   md_flush_pending_output ();
   3728 #endif
   3729 
   3730 #ifdef md_cons_align
   3731   md_cons_align (4);
   3732 #endif
   3733 
   3734   mapping_state (MAP_DATA);
   3735 
   3736   expression (&exp);
   3737 
   3738   p = frag_more (4);
   3739   md_number_to_chars (p, highbit, 4);
   3740   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
   3741 	       BFD_RELOC_ARM_PREL31);
   3742 
   3743   demand_empty_rest_of_line ();
   3744 }
   3745 
   3746 /* Directives: AEABI stack-unwind tables.  */
   3747 
   3748 /* Parse an unwind_fnstart directive.  Simply records the current location.  */
   3749 
   3750 static void
   3751 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
   3752 {
   3753   demand_empty_rest_of_line ();
   3754   if (unwind.proc_start)
   3755     {
   3756       as_bad (_("duplicate .fnstart directive"));
   3757       return;
   3758     }
   3759 
   3760   /* Mark the start of the function.  */
   3761   unwind.proc_start = expr_build_dot ();
   3762 
   3763   /* Reset the rest of the unwind info.	 */
   3764   unwind.opcode_count = 0;
   3765   unwind.table_entry = NULL;
   3766   unwind.personality_routine = NULL;
   3767   unwind.personality_index = -1;
   3768   unwind.frame_size = 0;
   3769   unwind.fp_offset = 0;
   3770   unwind.fp_reg = REG_SP;
   3771   unwind.fp_used = 0;
   3772   unwind.sp_restored = 0;
   3773 }
   3774 
   3775 
   3776 /* Parse a handlerdata directive.  Creates the exception handling table entry
   3777    for the function.  */
   3778 
   3779 static void
   3780 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
   3781 {
   3782   demand_empty_rest_of_line ();
   3783   if (!unwind.proc_start)
   3784     as_bad (MISSING_FNSTART);
   3785 
   3786   if (unwind.table_entry)
   3787     as_bad (_("duplicate .handlerdata directive"));
   3788 
   3789   create_unwind_entry (1);
   3790 }
   3791 
   3792 /* Parse an unwind_fnend directive.  Generates the index table entry.  */
   3793 
   3794 static void
   3795 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
   3796 {
   3797   long where;
   3798   char *ptr;
   3799   valueT val;
   3800   unsigned int marked_pr_dependency;
   3801 
   3802   demand_empty_rest_of_line ();
   3803 
   3804   if (!unwind.proc_start)
   3805     {
   3806       as_bad (_(".fnend directive without .fnstart"));
   3807       return;
   3808     }
   3809 
   3810   /* Add eh table entry.  */
   3811   if (unwind.table_entry == NULL)
   3812     val = create_unwind_entry (0);
   3813   else
   3814     val = 0;
   3815 
   3816   /* Add index table entry.  This is two words.	 */
   3817   start_unwind_section (unwind.saved_seg, 1);
   3818   frag_align (2, 0, 0);
   3819   record_alignment (now_seg, 2);
   3820 
   3821   ptr = frag_more (8);
   3822   memset (ptr, 0, 8);
   3823   where = frag_now_fix () - 8;
   3824 
   3825   /* Self relative offset of the function start.  */
   3826   fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
   3827 	   BFD_RELOC_ARM_PREL31);
   3828 
   3829   /* Indicate dependency on EHABI-defined personality routines to the
   3830      linker, if it hasn't been done already.  */
   3831   marked_pr_dependency
   3832     = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
   3833   if (unwind.personality_index >= 0 && unwind.personality_index < 3
   3834       && !(marked_pr_dependency & (1 << unwind.personality_index)))
   3835     {
   3836       static const char *const name[] =
   3837 	{
   3838 	  "__aeabi_unwind_cpp_pr0",
   3839 	  "__aeabi_unwind_cpp_pr1",
   3840 	  "__aeabi_unwind_cpp_pr2"
   3841 	};
   3842       symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
   3843       fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
   3844       seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
   3845 	|= 1 << unwind.personality_index;
   3846     }
   3847 
   3848   if (val)
   3849     /* Inline exception table entry.  */
   3850     md_number_to_chars (ptr + 4, val, 4);
   3851   else
   3852     /* Self relative offset of the table entry.	 */
   3853     fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
   3854 	     BFD_RELOC_ARM_PREL31);
   3855 
   3856   /* Restore the original section.  */
   3857   subseg_set (unwind.saved_seg, unwind.saved_subseg);
   3858 
   3859   unwind.proc_start = NULL;
   3860 }
   3861 
   3862 
   3863 /* Parse an unwind_cantunwind directive.  */
   3864 
   3865 static void
   3866 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
   3867 {
   3868   demand_empty_rest_of_line ();
   3869   if (!unwind.proc_start)
   3870     as_bad (MISSING_FNSTART);
   3871 
   3872   if (unwind.personality_routine || unwind.personality_index != -1)
   3873     as_bad (_("personality routine specified for cantunwind frame"));
   3874 
   3875   unwind.personality_index = -2;
   3876 }
   3877 
   3878 
   3879 /* Parse a personalityindex directive.	*/
   3880 
   3881 static void
   3882 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
   3883 {
   3884   expressionS exp;
   3885 
   3886   if (!unwind.proc_start)
   3887     as_bad (MISSING_FNSTART);
   3888 
   3889   if (unwind.personality_routine || unwind.personality_index != -1)
   3890     as_bad (_("duplicate .personalityindex directive"));
   3891 
   3892   expression (&exp);
   3893 
   3894   if (exp.X_op != O_constant
   3895       || exp.X_add_number < 0 || exp.X_add_number > 15)
   3896     {
   3897       as_bad (_("bad personality routine number"));
   3898       ignore_rest_of_line ();
   3899       return;
   3900     }
   3901 
   3902   unwind.personality_index = exp.X_add_number;
   3903 
   3904   demand_empty_rest_of_line ();
   3905 }
   3906 
   3907 
   3908 /* Parse a personality directive.  */
   3909 
   3910 static void
   3911 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
   3912 {
   3913   char *name, *p, c;
   3914 
   3915   if (!unwind.proc_start)
   3916     as_bad (MISSING_FNSTART);
   3917 
   3918   if (unwind.personality_routine || unwind.personality_index != -1)
   3919     as_bad (_("duplicate .personality directive"));
   3920 
   3921   c = get_symbol_name (& name);
   3922   p = input_line_pointer;
   3923   if (c == '"')
   3924     ++ input_line_pointer;
   3925   unwind.personality_routine = symbol_find_or_make (name);
   3926   *p = c;
   3927   demand_empty_rest_of_line ();
   3928 }
   3929 
   3930 
   3931 /* Parse a directive saving core registers.  */
   3932 
   3933 static void
   3934 s_arm_unwind_save_core (void)
   3935 {
   3936   valueT op;
   3937   long range;
   3938   int n;
   3939 
   3940   range = parse_reg_list (&input_line_pointer);
   3941   if (range == FAIL)
   3942     {
   3943       as_bad (_("expected register list"));
   3944       ignore_rest_of_line ();
   3945       return;
   3946     }
   3947 
   3948   demand_empty_rest_of_line ();
   3949 
   3950   /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
   3951      into .unwind_save {..., sp...}.  We aren't bothered about the value of
   3952      ip because it is clobbered by calls.  */
   3953   if (unwind.sp_restored && unwind.fp_reg == 12
   3954       && (range & 0x3000) == 0x1000)
   3955     {
   3956       unwind.opcode_count--;
   3957       unwind.sp_restored = 0;
   3958       range = (range | 0x2000) & ~0x1000;
   3959       unwind.pending_offset = 0;
   3960     }
   3961 
   3962   /* Pop r4-r15.  */
   3963   if (range & 0xfff0)
   3964     {
   3965       /* See if we can use the short opcodes.  These pop a block of up to 8
   3966 	 registers starting with r4, plus maybe r14.  */
   3967       for (n = 0; n < 8; n++)
   3968 	{
   3969 	  /* Break at the first non-saved register.	 */
   3970 	  if ((range & (1 << (n + 4))) == 0)
   3971 	    break;
   3972 	}
   3973       /* See if there are any other bits set.  */
   3974       if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
   3975 	{
   3976 	  /* Use the long form.  */
   3977 	  op = 0x8000 | ((range >> 4) & 0xfff);
   3978 	  add_unwind_opcode (op, 2);
   3979 	}
   3980       else
   3981 	{
   3982 	  /* Use the short form.  */
   3983 	  if (range & 0x4000)
   3984 	    op = 0xa8; /* Pop r14.	*/
   3985 	  else
   3986 	    op = 0xa0; /* Do not pop r14.  */
   3987 	  op |= (n - 1);
   3988 	  add_unwind_opcode (op, 1);
   3989 	}
   3990     }
   3991 
   3992   /* Pop r0-r3.	 */
   3993   if (range & 0xf)
   3994     {
   3995       op = 0xb100 | (range & 0xf);
   3996       add_unwind_opcode (op, 2);
   3997     }
   3998 
   3999   /* Record the number of bytes pushed.	 */
   4000   for (n = 0; n < 16; n++)
   4001     {
   4002       if (range & (1 << n))
   4003 	unwind.frame_size += 4;
   4004     }
   4005 }
   4006 
   4007 
   4008 /* Parse a directive saving FPA registers.  */
   4009 
   4010 static void
   4011 s_arm_unwind_save_fpa (int reg)
   4012 {
   4013   expressionS exp;
   4014   int num_regs;
   4015   valueT op;
   4016 
   4017   /* Get Number of registers to transfer.  */
   4018   if (skip_past_comma (&input_line_pointer) != FAIL)
   4019     expression (&exp);
   4020   else
   4021     exp.X_op = O_illegal;
   4022 
   4023   if (exp.X_op != O_constant)
   4024     {
   4025       as_bad (_("expected , <constant>"));
   4026       ignore_rest_of_line ();
   4027       return;
   4028     }
   4029 
   4030   num_regs = exp.X_add_number;
   4031 
   4032   if (num_regs < 1 || num_regs > 4)
   4033     {
   4034       as_bad (_("number of registers must be in the range [1:4]"));
   4035       ignore_rest_of_line ();
   4036       return;
   4037     }
   4038 
   4039   demand_empty_rest_of_line ();
   4040 
   4041   if (reg == 4)
   4042     {
   4043       /* Short form.  */
   4044       op = 0xb4 | (num_regs - 1);
   4045       add_unwind_opcode (op, 1);
   4046     }
   4047   else
   4048     {
   4049       /* Long form.  */
   4050       op = 0xc800 | (reg << 4) | (num_regs - 1);
   4051       add_unwind_opcode (op, 2);
   4052     }
   4053   unwind.frame_size += num_regs * 12;
   4054 }
   4055 
   4056 
   4057 /* Parse a directive saving VFP registers for ARMv6 and above.  */
   4058 
   4059 static void
   4060 s_arm_unwind_save_vfp_armv6 (void)
   4061 {
   4062   int count;
   4063   unsigned int start;
   4064   valueT op;
   4065   int num_vfpv3_regs = 0;
   4066   int num_regs_below_16;
   4067 
   4068   count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
   4069   if (count == FAIL)
   4070     {
   4071       as_bad (_("expected register list"));
   4072       ignore_rest_of_line ();
   4073       return;
   4074     }
   4075 
   4076   demand_empty_rest_of_line ();
   4077 
   4078   /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
   4079      than FSTMX/FLDMX-style ones).  */
   4080 
   4081   /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
   4082   if (start >= 16)
   4083     num_vfpv3_regs = count;
   4084   else if (start + count > 16)
   4085     num_vfpv3_regs = start + count - 16;
   4086 
   4087   if (num_vfpv3_regs > 0)
   4088     {
   4089       int start_offset = start > 16 ? start - 16 : 0;
   4090       op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
   4091       add_unwind_opcode (op, 2);
   4092     }
   4093 
   4094   /* Generate opcode for registers numbered in the range 0 .. 15.  */
   4095   num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
   4096   gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
   4097   if (num_regs_below_16 > 0)
   4098     {
   4099       op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
   4100       add_unwind_opcode (op, 2);
   4101     }
   4102 
   4103   unwind.frame_size += count * 8;
   4104 }
   4105 
   4106 
   4107 /* Parse a directive saving VFP registers for pre-ARMv6.  */
   4108 
   4109 static void
   4110 s_arm_unwind_save_vfp (void)
   4111 {
   4112   int count;
   4113   unsigned int reg;
   4114   valueT op;
   4115 
   4116   count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
   4117   if (count == FAIL)
   4118     {
   4119       as_bad (_("expected register list"));
   4120       ignore_rest_of_line ();
   4121       return;
   4122     }
   4123 
   4124   demand_empty_rest_of_line ();
   4125 
   4126   if (reg == 8)
   4127     {
   4128       /* Short form.  */
   4129       op = 0xb8 | (count - 1);
   4130       add_unwind_opcode (op, 1);
   4131     }
   4132   else
   4133     {
   4134       /* Long form.  */
   4135       op = 0xb300 | (reg << 4) | (count - 1);
   4136       add_unwind_opcode (op, 2);
   4137     }
   4138   unwind.frame_size += count * 8 + 4;
   4139 }
   4140 
   4141 
   4142 /* Parse a directive saving iWMMXt data registers.  */
   4143 
   4144 static void
   4145 s_arm_unwind_save_mmxwr (void)
   4146 {
   4147   int reg;
   4148   int hi_reg;
   4149   int i;
   4150   unsigned mask = 0;
   4151   valueT op;
   4152 
   4153   if (*input_line_pointer == '{')
   4154     input_line_pointer++;
   4155 
   4156   do
   4157     {
   4158       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
   4159 
   4160       if (reg == FAIL)
   4161 	{
   4162 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
   4163 	  goto error;
   4164 	}
   4165 
   4166       if (mask >> reg)
   4167 	as_tsktsk (_("register list not in ascending order"));
   4168       mask |= 1 << reg;
   4169 
   4170       if (*input_line_pointer == '-')
   4171 	{
   4172 	  input_line_pointer++;
   4173 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
   4174 	  if (hi_reg == FAIL)
   4175 	    {
   4176 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
   4177 	      goto error;
   4178 	    }
   4179 	  else if (reg >= hi_reg)
   4180 	    {
   4181 	      as_bad (_("bad register range"));
   4182 	      goto error;
   4183 	    }
   4184 	  for (; reg < hi_reg; reg++)
   4185 	    mask |= 1 << reg;
   4186 	}
   4187     }
   4188   while (skip_past_comma (&input_line_pointer) != FAIL);
   4189 
   4190   skip_past_char (&input_line_pointer, '}');
   4191 
   4192   demand_empty_rest_of_line ();
   4193 
   4194   /* Generate any deferred opcodes because we're going to be looking at
   4195      the list.	*/
   4196   flush_pending_unwind ();
   4197 
   4198   for (i = 0; i < 16; i++)
   4199     {
   4200       if (mask & (1 << i))
   4201 	unwind.frame_size += 8;
   4202     }
   4203 
   4204   /* Attempt to combine with a previous opcode.	 We do this because gcc
   4205      likes to output separate unwind directives for a single block of
   4206      registers.	 */
   4207   if (unwind.opcode_count > 0)
   4208     {
   4209       i = unwind.opcodes[unwind.opcode_count - 1];
   4210       if ((i & 0xf8) == 0xc0)
   4211 	{
   4212 	  i &= 7;
   4213 	  /* Only merge if the blocks are contiguous.  */
   4214 	  if (i < 6)
   4215 	    {
   4216 	      if ((mask & 0xfe00) == (1 << 9))
   4217 		{
   4218 		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
   4219 		  unwind.opcode_count--;
   4220 		}
   4221 	    }
   4222 	  else if (i == 6 && unwind.opcode_count >= 2)
   4223 	    {
   4224 	      i = unwind.opcodes[unwind.opcode_count - 2];
   4225 	      reg = i >> 4;
   4226 	      i &= 0xf;
   4227 
   4228 	      op = 0xffff << (reg - 1);
   4229 	      if (reg > 0
   4230 		  && ((mask & op) == (1u << (reg - 1))))
   4231 		{
   4232 		  op = (1 << (reg + i + 1)) - 1;
   4233 		  op &= ~((1 << reg) - 1);
   4234 		  mask |= op;
   4235 		  unwind.opcode_count -= 2;
   4236 		}
   4237 	    }
   4238 	}
   4239     }
   4240 
   4241   hi_reg = 15;
   4242   /* We want to generate opcodes in the order the registers have been
   4243      saved, ie. descending order.  */
   4244   for (reg = 15; reg >= -1; reg--)
   4245     {
   4246       /* Save registers in blocks.  */
   4247       if (reg < 0
   4248 	  || !(mask & (1 << reg)))
   4249 	{
   4250 	  /* We found an unsaved reg.  Generate opcodes to save the
   4251 	     preceding block.	*/
   4252 	  if (reg != hi_reg)
   4253 	    {
   4254 	      if (reg == 9)
   4255 		{
   4256 		  /* Short form.  */
   4257 		  op = 0xc0 | (hi_reg - 10);
   4258 		  add_unwind_opcode (op, 1);
   4259 		}
   4260 	      else
   4261 		{
   4262 		  /* Long form.	 */
   4263 		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
   4264 		  add_unwind_opcode (op, 2);
   4265 		}
   4266 	    }
   4267 	  hi_reg = reg - 1;
   4268 	}
   4269     }
   4270 
   4271   return;
   4272 error:
   4273   ignore_rest_of_line ();
   4274 }
   4275 
   4276 static void
   4277 s_arm_unwind_save_mmxwcg (void)
   4278 {
   4279   int reg;
   4280   int hi_reg;
   4281   unsigned mask = 0;
   4282   valueT op;
   4283 
   4284   if (*input_line_pointer == '{')
   4285     input_line_pointer++;
   4286 
   4287   skip_whitespace (input_line_pointer);
   4288 
   4289   do
   4290     {
   4291       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
   4292 
   4293       if (reg == FAIL)
   4294 	{
   4295 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
   4296 	  goto error;
   4297 	}
   4298 
   4299       reg -= 8;
   4300       if (mask >> reg)
   4301 	as_tsktsk (_("register list not in ascending order"));
   4302       mask |= 1 << reg;
   4303 
   4304       if (*input_line_pointer == '-')
   4305 	{
   4306 	  input_line_pointer++;
   4307 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
   4308 	  if (hi_reg == FAIL)
   4309 	    {
   4310 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
   4311 	      goto error;
   4312 	    }
   4313 	  else if (reg >= hi_reg)
   4314 	    {
   4315 	      as_bad (_("bad register range"));
   4316 	      goto error;
   4317 	    }
   4318 	  for (; reg < hi_reg; reg++)
   4319 	    mask |= 1 << reg;
   4320 	}
   4321     }
   4322   while (skip_past_comma (&input_line_pointer) != FAIL);
   4323 
   4324   skip_past_char (&input_line_pointer, '}');
   4325 
   4326   demand_empty_rest_of_line ();
   4327 
   4328   /* Generate any deferred opcodes because we're going to be looking at
   4329      the list.	*/
   4330   flush_pending_unwind ();
   4331 
   4332   for (reg = 0; reg < 16; reg++)
   4333     {
   4334       if (mask & (1 << reg))
   4335 	unwind.frame_size += 4;
   4336     }
   4337   op = 0xc700 | mask;
   4338   add_unwind_opcode (op, 2);
   4339   return;
   4340 error:
   4341   ignore_rest_of_line ();
   4342 }
   4343 
   4344 
   4345 /* Parse an unwind_save directive.
   4346    If the argument is non-zero, this is a .vsave directive.  */
   4347 
   4348 static void
   4349 s_arm_unwind_save (int arch_v6)
   4350 {
   4351   char *peek;
   4352   struct reg_entry *reg;
   4353   bfd_boolean had_brace = FALSE;
   4354 
   4355   if (!unwind.proc_start)
   4356     as_bad (MISSING_FNSTART);
   4357 
   4358   /* Figure out what sort of save we have.  */
   4359   peek = input_line_pointer;
   4360 
   4361   if (*peek == '{')
   4362     {
   4363       had_brace = TRUE;
   4364       peek++;
   4365     }
   4366 
   4367   reg = arm_reg_parse_multi (&peek);
   4368 
   4369   if (!reg)
   4370     {
   4371       as_bad (_("register expected"));
   4372       ignore_rest_of_line ();
   4373       return;
   4374     }
   4375 
   4376   switch (reg->type)
   4377     {
   4378     case REG_TYPE_FN:
   4379       if (had_brace)
   4380 	{
   4381 	  as_bad (_("FPA .unwind_save does not take a register list"));
   4382 	  ignore_rest_of_line ();
   4383 	  return;
   4384 	}
   4385       input_line_pointer = peek;
   4386       s_arm_unwind_save_fpa (reg->number);
   4387       return;
   4388 
   4389     case REG_TYPE_RN:
   4390       s_arm_unwind_save_core ();
   4391       return;
   4392 
   4393     case REG_TYPE_VFD:
   4394       if (arch_v6)
   4395 	s_arm_unwind_save_vfp_armv6 ();
   4396       else
   4397 	s_arm_unwind_save_vfp ();
   4398       return;
   4399 
   4400     case REG_TYPE_MMXWR:
   4401       s_arm_unwind_save_mmxwr ();
   4402       return;
   4403 
   4404     case REG_TYPE_MMXWCG:
   4405       s_arm_unwind_save_mmxwcg ();
   4406       return;
   4407 
   4408     default:
   4409       as_bad (_(".unwind_save does not support this kind of register"));
   4410       ignore_rest_of_line ();
   4411     }
   4412 }
   4413 
   4414 
   4415 /* Parse an unwind_movsp directive.  */
   4416 
   4417 static void
   4418 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
   4419 {
   4420   int reg;
   4421   valueT op;
   4422   int offset;
   4423 
   4424   if (!unwind.proc_start)
   4425     as_bad (MISSING_FNSTART);
   4426 
   4427   reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4428   if (reg == FAIL)
   4429     {
   4430       as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
   4431       ignore_rest_of_line ();
   4432       return;
   4433     }
   4434 
   4435   /* Optional constant.	 */
   4436   if (skip_past_comma (&input_line_pointer) != FAIL)
   4437     {
   4438       if (immediate_for_directive (&offset) == FAIL)
   4439 	return;
   4440     }
   4441   else
   4442     offset = 0;
   4443 
   4444   demand_empty_rest_of_line ();
   4445 
   4446   if (reg == REG_SP || reg == REG_PC)
   4447     {
   4448       as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
   4449       return;
   4450     }
   4451 
   4452   if (unwind.fp_reg != REG_SP)
   4453     as_bad (_("unexpected .unwind_movsp directive"));
   4454 
   4455   /* Generate opcode to restore the value.  */
   4456   op = 0x90 | reg;
   4457   add_unwind_opcode (op, 1);
   4458 
   4459   /* Record the information for later.	*/
   4460   unwind.fp_reg = reg;
   4461   unwind.fp_offset = unwind.frame_size - offset;
   4462   unwind.sp_restored = 1;
   4463 }
   4464 
   4465 /* Parse an unwind_pad directive.  */
   4466 
   4467 static void
   4468 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
   4469 {
   4470   int offset;
   4471 
   4472   if (!unwind.proc_start)
   4473     as_bad (MISSING_FNSTART);
   4474 
   4475   if (immediate_for_directive (&offset) == FAIL)
   4476     return;
   4477 
   4478   if (offset & 3)
   4479     {
   4480       as_bad (_("stack increment must be multiple of 4"));
   4481       ignore_rest_of_line ();
   4482       return;
   4483     }
   4484 
   4485   /* Don't generate any opcodes, just record the details for later.  */
   4486   unwind.frame_size += offset;
   4487   unwind.pending_offset += offset;
   4488 
   4489   demand_empty_rest_of_line ();
   4490 }
   4491 
   4492 /* Parse an unwind_setfp directive.  */
   4493 
   4494 static void
   4495 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
   4496 {
   4497   int sp_reg;
   4498   int fp_reg;
   4499   int offset;
   4500 
   4501   if (!unwind.proc_start)
   4502     as_bad (MISSING_FNSTART);
   4503 
   4504   fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4505   if (skip_past_comma (&input_line_pointer) == FAIL)
   4506     sp_reg = FAIL;
   4507   else
   4508     sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
   4509 
   4510   if (fp_reg == FAIL || sp_reg == FAIL)
   4511     {
   4512       as_bad (_("expected <reg>, <reg>"));
   4513       ignore_rest_of_line ();
   4514       return;
   4515     }
   4516 
   4517   /* Optional constant.	 */
   4518   if (skip_past_comma (&input_line_pointer) != FAIL)
   4519     {
   4520       if (immediate_for_directive (&offset) == FAIL)
   4521 	return;
   4522     }
   4523   else
   4524     offset = 0;
   4525 
   4526   demand_empty_rest_of_line ();
   4527 
   4528   if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
   4529     {
   4530       as_bad (_("register must be either sp or set by a previous"
   4531 		"unwind_movsp directive"));
   4532       return;
   4533     }
   4534 
   4535   /* Don't generate any opcodes, just record the information for later.	 */
   4536   unwind.fp_reg = fp_reg;
   4537   unwind.fp_used = 1;
   4538   if (sp_reg == REG_SP)
   4539     unwind.fp_offset = unwind.frame_size - offset;
   4540   else
   4541     unwind.fp_offset -= offset;
   4542 }
   4543 
   4544 /* Parse an unwind_raw directive.  */
   4545 
   4546 static void
   4547 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
   4548 {
   4549   expressionS exp;
   4550   /* This is an arbitrary limit.	 */
   4551   unsigned char op[16];
   4552   int count;
   4553 
   4554   if (!unwind.proc_start)
   4555     as_bad (MISSING_FNSTART);
   4556 
   4557   expression (&exp);
   4558   if (exp.X_op == O_constant
   4559       && skip_past_comma (&input_line_pointer) != FAIL)
   4560     {
   4561       unwind.frame_size += exp.X_add_number;
   4562       expression (&exp);
   4563     }
   4564   else
   4565     exp.X_op = O_illegal;
   4566 
   4567   if (exp.X_op != O_constant)
   4568     {
   4569       as_bad (_("expected <offset>, <opcode>"));
   4570       ignore_rest_of_line ();
   4571       return;
   4572     }
   4573 
   4574   count = 0;
   4575 
   4576   /* Parse the opcode.	*/
   4577   for (;;)
   4578     {
   4579       if (count >= 16)
   4580 	{
   4581 	  as_bad (_("unwind opcode too long"));
   4582 	  ignore_rest_of_line ();
   4583 	}
   4584       if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
   4585 	{
   4586 	  as_bad (_("invalid unwind opcode"));
   4587 	  ignore_rest_of_line ();
   4588 	  return;
   4589 	}
   4590       op[count++] = exp.X_add_number;
   4591 
   4592       /* Parse the next byte.  */
   4593       if (skip_past_comma (&input_line_pointer) == FAIL)
   4594 	break;
   4595 
   4596       expression (&exp);
   4597     }
   4598 
   4599   /* Add the opcode bytes in reverse order.  */
   4600   while (count--)
   4601     add_unwind_opcode (op[count], 1);
   4602 
   4603   demand_empty_rest_of_line ();
   4604 }
   4605 
   4606 
   4607 /* Parse a .eabi_attribute directive.  */
   4608 
   4609 static void
   4610 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
   4611 {
   4612   int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
   4613 
   4614   if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
   4615     attributes_set_explicitly[tag] = 1;
   4616 }
   4617 
   4618 /* Emit a tls fix for the symbol.  */
   4619 
   4620 static void
   4621 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
   4622 {
   4623   char *p;
   4624   expressionS exp;
   4625 #ifdef md_flush_pending_output
   4626   md_flush_pending_output ();
   4627 #endif
   4628 
   4629 #ifdef md_cons_align
   4630   md_cons_align (4);
   4631 #endif
   4632 
   4633   /* Since we're just labelling the code, there's no need to define a
   4634      mapping symbol.  */
   4635   expression (&exp);
   4636   p = obstack_next_free (&frchain_now->frch_obstack);
   4637   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
   4638 	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
   4639 	       : BFD_RELOC_ARM_TLS_DESCSEQ);
   4640 }
   4641 #endif /* OBJ_ELF */
   4642 
   4643 static void s_arm_arch (int);
   4644 static void s_arm_object_arch (int);
   4645 static void s_arm_cpu (int);
   4646 static void s_arm_fpu (int);
   4647 static void s_arm_arch_extension (int);
   4648 
   4649 #ifdef TE_PE
   4650 
   4651 static void
   4652 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
   4653 {
   4654   expressionS exp;
   4655 
   4656   do
   4657     {
   4658       expression (&exp);
   4659       if (exp.X_op == O_symbol)
   4660 	exp.X_op = O_secrel;
   4661 
   4662       emit_expr (&exp, 4);
   4663     }
   4664   while (*input_line_pointer++ == ',');
   4665 
   4666   input_line_pointer--;
   4667   demand_empty_rest_of_line ();
   4668 }
   4669 #endif /* TE_PE */
   4670 
   4671 /* This table describes all the machine specific pseudo-ops the assembler
   4672    has to support.  The fields are:
   4673      pseudo-op name without dot
   4674      function to call to execute this pseudo-op
   4675      Integer arg to pass to the function.  */
   4676 
   4677 const pseudo_typeS md_pseudo_table[] =
   4678 {
   4679   /* Never called because '.req' does not start a line.	 */
   4680   { "req",	   s_req,	  0 },
   4681   /* Following two are likewise never called.  */
   4682   { "dn",	   s_dn,          0 },
   4683   { "qn",          s_qn,          0 },
   4684   { "unreq",	   s_unreq,	  0 },
   4685   { "bss",	   s_bss,	  0 },
   4686   { "align",	   s_align_ptwo,  2 },
   4687   { "arm",	   s_arm,	  0 },
   4688   { "thumb",	   s_thumb,	  0 },
   4689   { "code",	   s_code,	  0 },
   4690   { "force_thumb", s_force_thumb, 0 },
   4691   { "thumb_func",  s_thumb_func,  0 },
   4692   { "thumb_set",   s_thumb_set,	  0 },
   4693   { "even",	   s_even,	  0 },
   4694   { "ltorg",	   s_ltorg,	  0 },
   4695   { "pool",	   s_ltorg,	  0 },
   4696   { "syntax",	   s_syntax,	  0 },
   4697   { "cpu",	   s_arm_cpu,	  0 },
   4698   { "arch",	   s_arm_arch,	  0 },
   4699   { "object_arch", s_arm_object_arch,	0 },
   4700   { "fpu",	   s_arm_fpu,	  0 },
   4701   { "arch_extension", s_arm_arch_extension, 0 },
   4702 #ifdef OBJ_ELF
   4703   { "word",	        s_arm_elf_cons, 4 },
   4704   { "long",	        s_arm_elf_cons, 4 },
   4705   { "inst.n",           s_arm_elf_inst, 2 },
   4706   { "inst.w",           s_arm_elf_inst, 4 },
   4707   { "inst",             s_arm_elf_inst, 0 },
   4708   { "rel31",	        s_arm_rel31,	  0 },
   4709   { "fnstart",		s_arm_unwind_fnstart,	0 },
   4710   { "fnend",		s_arm_unwind_fnend,	0 },
   4711   { "cantunwind",	s_arm_unwind_cantunwind, 0 },
   4712   { "personality",	s_arm_unwind_personality, 0 },
   4713   { "personalityindex",	s_arm_unwind_personalityindex, 0 },
   4714   { "handlerdata",	s_arm_unwind_handlerdata, 0 },
   4715   { "save",		s_arm_unwind_save,	0 },
   4716   { "vsave",		s_arm_unwind_save,	1 },
   4717   { "movsp",		s_arm_unwind_movsp,	0 },
   4718   { "pad",		s_arm_unwind_pad,	0 },
   4719   { "setfp",		s_arm_unwind_setfp,	0 },
   4720   { "unwind_raw",	s_arm_unwind_raw,	0 },
   4721   { "eabi_attribute",	s_arm_eabi_attribute,	0 },
   4722   { "tlsdescseq",	s_arm_tls_descseq,      0 },
   4723 #else
   4724   { "word",	   cons, 4},
   4725 
   4726   /* These are used for dwarf.  */
   4727   {"2byte", cons, 2},
   4728   {"4byte", cons, 4},
   4729   {"8byte", cons, 8},
   4730   /* These are used for dwarf2.  */
   4731   { "file", (void (*) (int)) dwarf2_directive_file, 0 },
   4732   { "loc",  dwarf2_directive_loc,  0 },
   4733   { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
   4734 #endif
   4735   { "extend",	   float_cons, 'x' },
   4736   { "ldouble",	   float_cons, 'x' },
   4737   { "packed",	   float_cons, 'p' },
   4738 #ifdef TE_PE
   4739   {"secrel32", pe_directive_secrel, 0},
   4740 #endif
   4741 
   4742   /* These are for compatibility with CodeComposer Studio.  */
   4743   {"ref",          s_ccs_ref,        0},
   4744   {"def",          s_ccs_def,        0},
   4745   {"asmfunc",      s_ccs_asmfunc,    0},
   4746   {"endasmfunc",   s_ccs_endasmfunc, 0},
   4747 
   4748   { 0, 0, 0 }
   4749 };
   4750 
   4751 /* Parser functions used exclusively in instruction operands.  */
   4753 
   4754 /* Generic immediate-value read function for use in insn parsing.
   4755    STR points to the beginning of the immediate (the leading #);
   4756    VAL receives the value; if the value is outside [MIN, MAX]
   4757    issue an error.  PREFIX_OPT is true if the immediate prefix is
   4758    optional.  */
   4759 
   4760 static int
   4761 parse_immediate (char **str, int *val, int min, int max,
   4762 		 bfd_boolean prefix_opt)
   4763 {
   4764   expressionS exp;
   4765   my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
   4766   if (exp.X_op != O_constant)
   4767     {
   4768       inst.error = _("constant expression required");
   4769       return FAIL;
   4770     }
   4771 
   4772   if (exp.X_add_number < min || exp.X_add_number > max)
   4773     {
   4774       inst.error = _("immediate value out of range");
   4775       return FAIL;
   4776     }
   4777 
   4778   *val = exp.X_add_number;
   4779   return SUCCESS;
   4780 }
   4781 
   4782 /* Less-generic immediate-value read function with the possibility of loading a
   4783    big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
   4784    instructions. Puts the result directly in inst.operands[i].  */
   4785 
   4786 static int
   4787 parse_big_immediate (char **str, int i, expressionS *in_exp,
   4788 		     bfd_boolean allow_symbol_p)
   4789 {
   4790   expressionS exp;
   4791   expressionS *exp_p = in_exp ? in_exp : &exp;
   4792   char *ptr = *str;
   4793 
   4794   my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
   4795 
   4796   if (exp_p->X_op == O_constant)
   4797     {
   4798       inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
   4799       /* If we're on a 64-bit host, then a 64-bit number can be returned using
   4800 	 O_constant.  We have to be careful not to break compilation for
   4801 	 32-bit X_add_number, though.  */
   4802       if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
   4803 	{
   4804 	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
   4805 	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
   4806 				  & 0xffffffff);
   4807 	  inst.operands[i].regisimm = 1;
   4808 	}
   4809     }
   4810   else if (exp_p->X_op == O_big
   4811 	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
   4812     {
   4813       unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
   4814 
   4815       /* Bignums have their least significant bits in
   4816 	 generic_bignum[0]. Make sure we put 32 bits in imm and
   4817 	 32 bits in reg,  in a (hopefully) portable way.  */
   4818       gas_assert (parts != 0);
   4819 
   4820       /* Make sure that the number is not too big.
   4821 	 PR 11972: Bignums can now be sign-extended to the
   4822 	 size of a .octa so check that the out of range bits
   4823 	 are all zero or all one.  */
   4824       if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
   4825 	{
   4826 	  LITTLENUM_TYPE m = -1;
   4827 
   4828 	  if (generic_bignum[parts * 2] != 0
   4829 	      && generic_bignum[parts * 2] != m)
   4830 	    return FAIL;
   4831 
   4832 	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
   4833 	    if (generic_bignum[j] != generic_bignum[j-1])
   4834 	      return FAIL;
   4835 	}
   4836 
   4837       inst.operands[i].imm = 0;
   4838       for (j = 0; j < parts; j++, idx++)
   4839 	inst.operands[i].imm |= generic_bignum[idx]
   4840 				<< (LITTLENUM_NUMBER_OF_BITS * j);
   4841       inst.operands[i].reg = 0;
   4842       for (j = 0; j < parts; j++, idx++)
   4843 	inst.operands[i].reg |= generic_bignum[idx]
   4844 				<< (LITTLENUM_NUMBER_OF_BITS * j);
   4845       inst.operands[i].regisimm = 1;
   4846     }
   4847   else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
   4848     return FAIL;
   4849 
   4850   *str = ptr;
   4851 
   4852   return SUCCESS;
   4853 }
   4854 
   4855 /* Returns the pseudo-register number of an FPA immediate constant,
   4856    or FAIL if there isn't a valid constant here.  */
   4857 
   4858 static int
   4859 parse_fpa_immediate (char ** str)
   4860 {
   4861   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   4862   char *	 save_in;
   4863   expressionS	 exp;
   4864   int		 i;
   4865   int		 j;
   4866 
   4867   /* First try and match exact strings, this is to guarantee
   4868      that some formats will work even for cross assembly.  */
   4869 
   4870   for (i = 0; fp_const[i]; i++)
   4871     {
   4872       if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
   4873 	{
   4874 	  char *start = *str;
   4875 
   4876 	  *str += strlen (fp_const[i]);
   4877 	  if (is_end_of_line[(unsigned char) **str])
   4878 	    return i + 8;
   4879 	  *str = start;
   4880 	}
   4881     }
   4882 
   4883   /* Just because we didn't get a match doesn't mean that the constant
   4884      isn't valid, just that it is in a format that we don't
   4885      automatically recognize.  Try parsing it with the standard
   4886      expression routines.  */
   4887 
   4888   memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
   4889 
   4890   /* Look for a raw floating point number.  */
   4891   if ((save_in = atof_ieee (*str, 'x', words)) != NULL
   4892       && is_end_of_line[(unsigned char) *save_in])
   4893     {
   4894       for (i = 0; i < NUM_FLOAT_VALS; i++)
   4895 	{
   4896 	  for (j = 0; j < MAX_LITTLENUMS; j++)
   4897 	    {
   4898 	      if (words[j] != fp_values[i][j])
   4899 		break;
   4900 	    }
   4901 
   4902 	  if (j == MAX_LITTLENUMS)
   4903 	    {
   4904 	      *str = save_in;
   4905 	      return i + 8;
   4906 	    }
   4907 	}
   4908     }
   4909 
   4910   /* Try and parse a more complex expression, this will probably fail
   4911      unless the code uses a floating point prefix (eg "0f").  */
   4912   save_in = input_line_pointer;
   4913   input_line_pointer = *str;
   4914   if (expression (&exp) == absolute_section
   4915       && exp.X_op == O_big
   4916       && exp.X_add_number < 0)
   4917     {
   4918       /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
   4919 	 Ditto for 15.	*/
   4920 #define X_PRECISION 5
   4921 #define E_PRECISION 15L
   4922       if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
   4923 	{
   4924 	  for (i = 0; i < NUM_FLOAT_VALS; i++)
   4925 	    {
   4926 	      for (j = 0; j < MAX_LITTLENUMS; j++)
   4927 		{
   4928 		  if (words[j] != fp_values[i][j])
   4929 		    break;
   4930 		}
   4931 
   4932 	      if (j == MAX_LITTLENUMS)
   4933 		{
   4934 		  *str = input_line_pointer;
   4935 		  input_line_pointer = save_in;
   4936 		  return i + 8;
   4937 		}
   4938 	    }
   4939 	}
   4940     }
   4941 
   4942   *str = input_line_pointer;
   4943   input_line_pointer = save_in;
   4944   inst.error = _("invalid FPA immediate expression");
   4945   return FAIL;
   4946 }
   4947 
   4948 /* Returns 1 if a number has "quarter-precision" float format
   4949    0baBbbbbbc defgh000 00000000 00000000.  */
   4950 
   4951 static int
   4952 is_quarter_float (unsigned imm)
   4953 {
   4954   int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
   4955   return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
   4956 }
   4957 
   4958 
   4959 /* Detect the presence of a floating point or integer zero constant,
   4960    i.e. #0.0 or #0.  */
   4961 
   4962 static bfd_boolean
   4963 parse_ifimm_zero (char **in)
   4964 {
   4965   int error_code;
   4966 
   4967   if (!is_immediate_prefix (**in))
   4968     return FALSE;
   4969 
   4970   ++*in;
   4971 
   4972   /* Accept #0x0 as a synonym for #0.  */
   4973   if (strncmp (*in, "0x", 2) == 0)
   4974     {
   4975       int val;
   4976       if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
   4977         return FALSE;
   4978       return TRUE;
   4979     }
   4980 
   4981   error_code = atof_generic (in, ".", EXP_CHARS,
   4982                              &generic_floating_point_number);
   4983 
   4984   if (!error_code
   4985       && generic_floating_point_number.sign == '+'
   4986       && (generic_floating_point_number.low
   4987           > generic_floating_point_number.leader))
   4988     return TRUE;
   4989 
   4990   return FALSE;
   4991 }
   4992 
   4993 /* Parse an 8-bit "quarter-precision" floating point number of the form:
   4994    0baBbbbbbc defgh000 00000000 00000000.
   4995    The zero and minus-zero cases need special handling, since they can't be
   4996    encoded in the "quarter-precision" float format, but can nonetheless be
   4997    loaded as integer constants.  */
   4998 
   4999 static unsigned
   5000 parse_qfloat_immediate (char **ccp, int *immed)
   5001 {
   5002   char *str = *ccp;
   5003   char *fpnum;
   5004   LITTLENUM_TYPE words[MAX_LITTLENUMS];
   5005   int found_fpchar = 0;
   5006 
   5007   skip_past_char (&str, '#');
   5008 
   5009   /* We must not accidentally parse an integer as a floating-point number. Make
   5010      sure that the value we parse is not an integer by checking for special
   5011      characters '.' or 'e'.
   5012      FIXME: This is a horrible hack, but doing better is tricky because type
   5013      information isn't in a very usable state at parse time.  */
   5014   fpnum = str;
   5015   skip_whitespace (fpnum);
   5016 
   5017   if (strncmp (fpnum, "0x", 2) == 0)
   5018     return FAIL;
   5019   else
   5020     {
   5021       for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
   5022 	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
   5023 	  {
   5024 	    found_fpchar = 1;
   5025 	    break;
   5026 	  }
   5027 
   5028       if (!found_fpchar)
   5029 	return FAIL;
   5030     }
   5031 
   5032   if ((str = atof_ieee (str, 's', words)) != NULL)
   5033     {
   5034       unsigned fpword = 0;
   5035       int i;
   5036 
   5037       /* Our FP word must be 32 bits (single-precision FP).  */
   5038       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
   5039 	{
   5040 	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
   5041 	  fpword |= words[i];
   5042 	}
   5043 
   5044       if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
   5045 	*immed = fpword;
   5046       else
   5047 	return FAIL;
   5048 
   5049       *ccp = str;
   5050 
   5051       return SUCCESS;
   5052     }
   5053 
   5054   return FAIL;
   5055 }
   5056 
   5057 /* Shift operands.  */
   5058 enum shift_kind
   5059 {
   5060   SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
   5061 };
   5062 
   5063 struct asm_shift_name
   5064 {
   5065   const char	  *name;
   5066   enum shift_kind  kind;
   5067 };
   5068 
   5069 /* Third argument to parse_shift.  */
   5070 enum parse_shift_mode
   5071 {
   5072   NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
   5073   SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
   5074   SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
   5075   SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
   5076   SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
   5077 };
   5078 
   5079 /* Parse a <shift> specifier on an ARM data processing instruction.
   5080    This has three forms:
   5081 
   5082      (LSL|LSR|ASL|ASR|ROR) Rs
   5083      (LSL|LSR|ASL|ASR|ROR) #imm
   5084      RRX
   5085 
   5086    Note that ASL is assimilated to LSL in the instruction encoding, and
   5087    RRX to ROR #0 (which cannot be written as such).  */
   5088 
   5089 static int
   5090 parse_shift (char **str, int i, enum parse_shift_mode mode)
   5091 {
   5092   const struct asm_shift_name *shift_name;
   5093   enum shift_kind shift;
   5094   char *s = *str;
   5095   char *p = s;
   5096   int reg;
   5097 
   5098   for (p = *str; ISALPHA (*p); p++)
   5099     ;
   5100 
   5101   if (p == *str)
   5102     {
   5103       inst.error = _("shift expression expected");
   5104       return FAIL;
   5105     }
   5106 
   5107   shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
   5108 							    p - *str);
   5109 
   5110   if (shift_name == NULL)
   5111     {
   5112       inst.error = _("shift expression expected");
   5113       return FAIL;
   5114     }
   5115 
   5116   shift = shift_name->kind;
   5117 
   5118   switch (mode)
   5119     {
   5120     case NO_SHIFT_RESTRICT:
   5121     case SHIFT_IMMEDIATE:   break;
   5122 
   5123     case SHIFT_LSL_OR_ASR_IMMEDIATE:
   5124       if (shift != SHIFT_LSL && shift != SHIFT_ASR)
   5125 	{
   5126 	  inst.error = _("'LSL' or 'ASR' required");
   5127 	  return FAIL;
   5128 	}
   5129       break;
   5130 
   5131     case SHIFT_LSL_IMMEDIATE:
   5132       if (shift != SHIFT_LSL)
   5133 	{
   5134 	  inst.error = _("'LSL' required");
   5135 	  return FAIL;
   5136 	}
   5137       break;
   5138 
   5139     case SHIFT_ASR_IMMEDIATE:
   5140       if (shift != SHIFT_ASR)
   5141 	{
   5142 	  inst.error = _("'ASR' required");
   5143 	  return FAIL;
   5144 	}
   5145       break;
   5146 
   5147     default: abort ();
   5148     }
   5149 
   5150   if (shift != SHIFT_RRX)
   5151     {
   5152       /* Whitespace can appear here if the next thing is a bare digit.	*/
   5153       skip_whitespace (p);
   5154 
   5155       if (mode == NO_SHIFT_RESTRICT
   5156 	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5157 	{
   5158 	  inst.operands[i].imm = reg;
   5159 	  inst.operands[i].immisreg = 1;
   5160 	}
   5161       else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5162 	return FAIL;
   5163     }
   5164   inst.operands[i].shift_kind = shift;
   5165   inst.operands[i].shifted = 1;
   5166   *str = p;
   5167   return SUCCESS;
   5168 }
   5169 
   5170 /* Parse a <shifter_operand> for an ARM data processing instruction:
   5171 
   5172       #<immediate>
   5173       #<immediate>, <rotate>
   5174       <Rm>
   5175       <Rm>, <shift>
   5176 
   5177    where <shift> is defined by parse_shift above, and <rotate> is a
   5178    multiple of 2 between 0 and 30.  Validation of immediate operands
   5179    is deferred to md_apply_fix.  */
   5180 
   5181 static int
   5182 parse_shifter_operand (char **str, int i)
   5183 {
   5184   int value;
   5185   expressionS exp;
   5186 
   5187   if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
   5188     {
   5189       inst.operands[i].reg = value;
   5190       inst.operands[i].isreg = 1;
   5191 
   5192       /* parse_shift will override this if appropriate */
   5193       inst.reloc.exp.X_op = O_constant;
   5194       inst.reloc.exp.X_add_number = 0;
   5195 
   5196       if (skip_past_comma (str) == FAIL)
   5197 	return SUCCESS;
   5198 
   5199       /* Shift operation on register.  */
   5200       return parse_shift (str, i, NO_SHIFT_RESTRICT);
   5201     }
   5202 
   5203   if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
   5204     return FAIL;
   5205 
   5206   if (skip_past_comma (str) == SUCCESS)
   5207     {
   5208       /* #x, y -- ie explicit rotation by Y.  */
   5209       if (my_get_expression (&exp, str, GE_NO_PREFIX))
   5210 	return FAIL;
   5211 
   5212       if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
   5213 	{
   5214 	  inst.error = _("constant expression expected");
   5215 	  return FAIL;
   5216 	}
   5217 
   5218       value = exp.X_add_number;
   5219       if (value < 0 || value > 30 || value % 2 != 0)
   5220 	{
   5221 	  inst.error = _("invalid rotation");
   5222 	  return FAIL;
   5223 	}
   5224       if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
   5225 	{
   5226 	  inst.error = _("invalid constant");
   5227 	  return FAIL;
   5228 	}
   5229 
   5230       /* Encode as specified.  */
   5231       inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
   5232       return SUCCESS;
   5233     }
   5234 
   5235   inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   5236   inst.reloc.pc_rel = 0;
   5237   return SUCCESS;
   5238 }
   5239 
   5240 /* Group relocation information.  Each entry in the table contains the
   5241    textual name of the relocation as may appear in assembler source
   5242    and must end with a colon.
   5243    Along with this textual name are the relocation codes to be used if
   5244    the corresponding instruction is an ALU instruction (ADD or SUB only),
   5245    an LDR, an LDRS, or an LDC.  */
   5246 
   5247 struct group_reloc_table_entry
   5248 {
   5249   const char *name;
   5250   int alu_code;
   5251   int ldr_code;
   5252   int ldrs_code;
   5253   int ldc_code;
   5254 };
   5255 
   5256 typedef enum
   5257 {
   5258   /* Varieties of non-ALU group relocation.  */
   5259 
   5260   GROUP_LDR,
   5261   GROUP_LDRS,
   5262   GROUP_LDC
   5263 } group_reloc_type;
   5264 
   5265 static struct group_reloc_table_entry group_reloc_table[] =
   5266   { /* Program counter relative: */
   5267     { "pc_g0_nc",
   5268       BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
   5269       0,				/* LDR */
   5270       0,				/* LDRS */
   5271       0 },				/* LDC */
   5272     { "pc_g0",
   5273       BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
   5274       BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
   5275       BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
   5276       BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
   5277     { "pc_g1_nc",
   5278       BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
   5279       0,				/* LDR */
   5280       0,				/* LDRS */
   5281       0 },				/* LDC */
   5282     { "pc_g1",
   5283       BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
   5284       BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
   5285       BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
   5286       BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
   5287     { "pc_g2",
   5288       BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
   5289       BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
   5290       BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
   5291       BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
   5292     /* Section base relative */
   5293     { "sb_g0_nc",
   5294       BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
   5295       0,				/* LDR */
   5296       0,				/* LDRS */
   5297       0 },				/* LDC */
   5298     { "sb_g0",
   5299       BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
   5300       BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
   5301       BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
   5302       BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
   5303     { "sb_g1_nc",
   5304       BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
   5305       0,				/* LDR */
   5306       0,				/* LDRS */
   5307       0 },				/* LDC */
   5308     { "sb_g1",
   5309       BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
   5310       BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
   5311       BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
   5312       BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
   5313     { "sb_g2",
   5314       BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
   5315       BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
   5316       BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
   5317       BFD_RELOC_ARM_LDC_SB_G2 },	/* LDC */
   5318     /* Absolute thumb alu relocations.  */
   5319     { "lower0_7",
   5320       BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU.  */
   5321       0,				/* LDR.  */
   5322       0,				/* LDRS.  */
   5323       0 },				/* LDC.  */
   5324     { "lower8_15",
   5325       BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU.  */
   5326       0,				/* LDR.  */
   5327       0,				/* LDRS.  */
   5328       0 },				/* LDC.  */
   5329     { "upper0_7",
   5330       BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU.  */
   5331       0,				/* LDR.  */
   5332       0,				/* LDRS.  */
   5333       0 },				/* LDC.  */
   5334     { "upper8_15",
   5335       BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU.  */
   5336       0,				/* LDR.  */
   5337       0,				/* LDRS.  */
   5338       0 } };				/* LDC.  */
   5339 
   5340 /* Given the address of a pointer pointing to the textual name of a group
   5341    relocation as may appear in assembler source, attempt to find its details
   5342    in group_reloc_table.  The pointer will be updated to the character after
   5343    the trailing colon.  On failure, FAIL will be returned; SUCCESS
   5344    otherwise.  On success, *entry will be updated to point at the relevant
   5345    group_reloc_table entry. */
   5346 
   5347 static int
   5348 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
   5349 {
   5350   unsigned int i;
   5351   for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
   5352     {
   5353       int length = strlen (group_reloc_table[i].name);
   5354 
   5355       if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
   5356 	  && (*str)[length] == ':')
   5357 	{
   5358 	  *out = &group_reloc_table[i];
   5359 	  *str += (length + 1);
   5360 	  return SUCCESS;
   5361 	}
   5362     }
   5363 
   5364   return FAIL;
   5365 }
   5366 
   5367 /* Parse a <shifter_operand> for an ARM data processing instruction
   5368    (as for parse_shifter_operand) where group relocations are allowed:
   5369 
   5370       #<immediate>
   5371       #<immediate>, <rotate>
   5372       #:<group_reloc>:<expression>
   5373       <Rm>
   5374       <Rm>, <shift>
   5375 
   5376    where <group_reloc> is one of the strings defined in group_reloc_table.
   5377    The hashes are optional.
   5378 
   5379    Everything else is as for parse_shifter_operand.  */
   5380 
   5381 static parse_operand_result
   5382 parse_shifter_operand_group_reloc (char **str, int i)
   5383 {
   5384   /* Determine if we have the sequence of characters #: or just :
   5385      coming next.  If we do, then we check for a group relocation.
   5386      If we don't, punt the whole lot to parse_shifter_operand.  */
   5387 
   5388   if (((*str)[0] == '#' && (*str)[1] == ':')
   5389       || (*str)[0] == ':')
   5390     {
   5391       struct group_reloc_table_entry *entry;
   5392 
   5393       if ((*str)[0] == '#')
   5394 	(*str) += 2;
   5395       else
   5396 	(*str)++;
   5397 
   5398       /* Try to parse a group relocation.  Anything else is an error.  */
   5399       if (find_group_reloc_table_entry (str, &entry) == FAIL)
   5400 	{
   5401 	  inst.error = _("unknown group relocation");
   5402 	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5403 	}
   5404 
   5405       /* We now have the group relocation table entry corresponding to
   5406 	 the name in the assembler source.  Next, we parse the expression.  */
   5407       if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
   5408 	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5409 
   5410       /* Record the relocation type (always the ALU variant here).  */
   5411       inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
   5412       gas_assert (inst.reloc.type != 0);
   5413 
   5414       return PARSE_OPERAND_SUCCESS;
   5415     }
   5416   else
   5417     return parse_shifter_operand (str, i) == SUCCESS
   5418 	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
   5419 
   5420   /* Never reached.  */
   5421 }
   5422 
   5423 /* Parse a Neon alignment expression.  Information is written to
   5424    inst.operands[i].  We assume the initial ':' has been skipped.
   5425 
   5426    align	.imm = align << 8, .immisalign=1, .preind=0  */
   5427 static parse_operand_result
   5428 parse_neon_alignment (char **str, int i)
   5429 {
   5430   char *p = *str;
   5431   expressionS exp;
   5432 
   5433   my_get_expression (&exp, &p, GE_NO_PREFIX);
   5434 
   5435   if (exp.X_op != O_constant)
   5436     {
   5437       inst.error = _("alignment must be constant");
   5438       return PARSE_OPERAND_FAIL;
   5439     }
   5440 
   5441   inst.operands[i].imm = exp.X_add_number << 8;
   5442   inst.operands[i].immisalign = 1;
   5443   /* Alignments are not pre-indexes.  */
   5444   inst.operands[i].preind = 0;
   5445 
   5446   *str = p;
   5447   return PARSE_OPERAND_SUCCESS;
   5448 }
   5449 
   5450 /* Parse all forms of an ARM address expression.  Information is written
   5451    to inst.operands[i] and/or inst.reloc.
   5452 
   5453    Preindexed addressing (.preind=1):
   5454 
   5455    [Rn, #offset]       .reg=Rn .reloc.exp=offset
   5456    [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5457    [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5458 		       .shift_kind=shift .reloc.exp=shift_imm
   5459 
   5460    These three may have a trailing ! which causes .writeback to be set also.
   5461 
   5462    Postindexed addressing (.postind=1, .writeback=1):
   5463 
   5464    [Rn], #offset       .reg=Rn .reloc.exp=offset
   5465    [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5466    [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
   5467 		       .shift_kind=shift .reloc.exp=shift_imm
   5468 
   5469    Unindexed addressing (.preind=0, .postind=0):
   5470 
   5471    [Rn], {option}      .reg=Rn .imm=option .immisreg=0
   5472 
   5473    Other:
   5474 
   5475    [Rn]{!}	       shorthand for [Rn,#0]{!}
   5476    =immediate	       .isreg=0 .reloc.exp=immediate
   5477    label	       .reg=PC .reloc.pc_rel=1 .reloc.exp=label
   5478 
   5479   It is the caller's responsibility to check for addressing modes not
   5480   supported by the instruction, and to set inst.reloc.type.  */
   5481 
   5482 static parse_operand_result
   5483 parse_address_main (char **str, int i, int group_relocations,
   5484 		    group_reloc_type group_type)
   5485 {
   5486   char *p = *str;
   5487   int reg;
   5488 
   5489   if (skip_past_char (&p, '[') == FAIL)
   5490     {
   5491       if (skip_past_char (&p, '=') == FAIL)
   5492 	{
   5493 	  /* Bare address - translate to PC-relative offset.  */
   5494 	  inst.reloc.pc_rel = 1;
   5495 	  inst.operands[i].reg = REG_PC;
   5496 	  inst.operands[i].isreg = 1;
   5497 	  inst.operands[i].preind = 1;
   5498 
   5499 	  if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
   5500 	    return PARSE_OPERAND_FAIL;
   5501 	}
   5502       else if (parse_big_immediate (&p, i, &inst.reloc.exp,
   5503 				    /*allow_symbol_p=*/TRUE))
   5504 	return PARSE_OPERAND_FAIL;
   5505 
   5506       *str = p;
   5507       return PARSE_OPERAND_SUCCESS;
   5508     }
   5509 
   5510   /* PR gas/14887: Allow for whitespace after the opening bracket.  */
   5511   skip_whitespace (p);
   5512 
   5513   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   5514     {
   5515       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   5516       return PARSE_OPERAND_FAIL;
   5517     }
   5518   inst.operands[i].reg = reg;
   5519   inst.operands[i].isreg = 1;
   5520 
   5521   if (skip_past_comma (&p) == SUCCESS)
   5522     {
   5523       inst.operands[i].preind = 1;
   5524 
   5525       if (*p == '+') p++;
   5526       else if (*p == '-') p++, inst.operands[i].negative = 1;
   5527 
   5528       if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5529 	{
   5530 	  inst.operands[i].imm = reg;
   5531 	  inst.operands[i].immisreg = 1;
   5532 
   5533 	  if (skip_past_comma (&p) == SUCCESS)
   5534 	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
   5535 	      return PARSE_OPERAND_FAIL;
   5536 	}
   5537       else if (skip_past_char (&p, ':') == SUCCESS)
   5538 	{
   5539 	  /* FIXME: '@' should be used here, but it's filtered out by generic
   5540 	     code before we get to see it here. This may be subject to
   5541 	     change.  */
   5542 	  parse_operand_result result = parse_neon_alignment (&p, i);
   5543 
   5544 	  if (result != PARSE_OPERAND_SUCCESS)
   5545 	    return result;
   5546 	}
   5547       else
   5548 	{
   5549 	  if (inst.operands[i].negative)
   5550 	    {
   5551 	      inst.operands[i].negative = 0;
   5552 	      p--;
   5553 	    }
   5554 
   5555 	  if (group_relocations
   5556 	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
   5557 	    {
   5558 	      struct group_reloc_table_entry *entry;
   5559 
   5560 	      /* Skip over the #: or : sequence.  */
   5561 	      if (*p == '#')
   5562 		p += 2;
   5563 	      else
   5564 		p++;
   5565 
   5566 	      /* Try to parse a group relocation.  Anything else is an
   5567 		 error.  */
   5568 	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
   5569 		{
   5570 		  inst.error = _("unknown group relocation");
   5571 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5572 		}
   5573 
   5574 	      /* We now have the group relocation table entry corresponding to
   5575 		 the name in the assembler source.  Next, we parse the
   5576 		 expression.  */
   5577 	      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
   5578 		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5579 
   5580 	      /* Record the relocation type.  */
   5581 	      switch (group_type)
   5582 		{
   5583 		  case GROUP_LDR:
   5584 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
   5585 		    break;
   5586 
   5587 		  case GROUP_LDRS:
   5588 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
   5589 		    break;
   5590 
   5591 		  case GROUP_LDC:
   5592 		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
   5593 		    break;
   5594 
   5595 		  default:
   5596 		    gas_assert (0);
   5597 		}
   5598 
   5599 	      if (inst.reloc.type == 0)
   5600 		{
   5601 		  inst.error = _("this group relocation is not allowed on this instruction");
   5602 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
   5603 		}
   5604 	    }
   5605 	  else
   5606 	    {
   5607 	      char *q = p;
   5608 	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5609 		return PARSE_OPERAND_FAIL;
   5610 	      /* If the offset is 0, find out if it's a +0 or -0.  */
   5611 	      if (inst.reloc.exp.X_op == O_constant
   5612 		  && inst.reloc.exp.X_add_number == 0)
   5613 		{
   5614 		  skip_whitespace (q);
   5615 		  if (*q == '#')
   5616 		    {
   5617 		      q++;
   5618 		      skip_whitespace (q);
   5619 		    }
   5620 		  if (*q == '-')
   5621 		    inst.operands[i].negative = 1;
   5622 		}
   5623 	    }
   5624 	}
   5625     }
   5626   else if (skip_past_char (&p, ':') == SUCCESS)
   5627     {
   5628       /* FIXME: '@' should be used here, but it's filtered out by generic code
   5629 	 before we get to see it here. This may be subject to change.  */
   5630       parse_operand_result result = parse_neon_alignment (&p, i);
   5631 
   5632       if (result != PARSE_OPERAND_SUCCESS)
   5633 	return result;
   5634     }
   5635 
   5636   if (skip_past_char (&p, ']') == FAIL)
   5637     {
   5638       inst.error = _("']' expected");
   5639       return PARSE_OPERAND_FAIL;
   5640     }
   5641 
   5642   if (skip_past_char (&p, '!') == SUCCESS)
   5643     inst.operands[i].writeback = 1;
   5644 
   5645   else if (skip_past_comma (&p) == SUCCESS)
   5646     {
   5647       if (skip_past_char (&p, '{') == SUCCESS)
   5648 	{
   5649 	  /* [Rn], {expr} - unindexed, with option */
   5650 	  if (parse_immediate (&p, &inst.operands[i].imm,
   5651 			       0, 255, TRUE) == FAIL)
   5652 	    return PARSE_OPERAND_FAIL;
   5653 
   5654 	  if (skip_past_char (&p, '}') == FAIL)
   5655 	    {
   5656 	      inst.error = _("'}' expected at end of 'option' field");
   5657 	      return PARSE_OPERAND_FAIL;
   5658 	    }
   5659 	  if (inst.operands[i].preind)
   5660 	    {
   5661 	      inst.error = _("cannot combine index with option");
   5662 	      return PARSE_OPERAND_FAIL;
   5663 	    }
   5664 	  *str = p;
   5665 	  return PARSE_OPERAND_SUCCESS;
   5666 	}
   5667       else
   5668 	{
   5669 	  inst.operands[i].postind = 1;
   5670 	  inst.operands[i].writeback = 1;
   5671 
   5672 	  if (inst.operands[i].preind)
   5673 	    {
   5674 	      inst.error = _("cannot combine pre- and post-indexing");
   5675 	      return PARSE_OPERAND_FAIL;
   5676 	    }
   5677 
   5678 	  if (*p == '+') p++;
   5679 	  else if (*p == '-') p++, inst.operands[i].negative = 1;
   5680 
   5681 	  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
   5682 	    {
   5683 	      /* We might be using the immediate for alignment already. If we
   5684 		 are, OR the register number into the low-order bits.  */
   5685 	      if (inst.operands[i].immisalign)
   5686 		inst.operands[i].imm |= reg;
   5687 	      else
   5688 		inst.operands[i].imm = reg;
   5689 	      inst.operands[i].immisreg = 1;
   5690 
   5691 	      if (skip_past_comma (&p) == SUCCESS)
   5692 		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
   5693 		  return PARSE_OPERAND_FAIL;
   5694 	    }
   5695 	  else
   5696 	    {
   5697 	      char *q = p;
   5698 	      if (inst.operands[i].negative)
   5699 		{
   5700 		  inst.operands[i].negative = 0;
   5701 		  p--;
   5702 		}
   5703 	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
   5704 		return PARSE_OPERAND_FAIL;
   5705 	      /* If the offset is 0, find out if it's a +0 or -0.  */
   5706 	      if (inst.reloc.exp.X_op == O_constant
   5707 		  && inst.reloc.exp.X_add_number == 0)
   5708 		{
   5709 		  skip_whitespace (q);
   5710 		  if (*q == '#')
   5711 		    {
   5712 		      q++;
   5713 		      skip_whitespace (q);
   5714 		    }
   5715 		  if (*q == '-')
   5716 		    inst.operands[i].negative = 1;
   5717 		}
   5718 	    }
   5719 	}
   5720     }
   5721 
   5722   /* If at this point neither .preind nor .postind is set, we have a
   5723      bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
   5724   if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
   5725     {
   5726       inst.operands[i].preind = 1;
   5727       inst.reloc.exp.X_op = O_constant;
   5728       inst.reloc.exp.X_add_number = 0;
   5729     }
   5730   *str = p;
   5731   return PARSE_OPERAND_SUCCESS;
   5732 }
   5733 
   5734 static int
   5735 parse_address (char **str, int i)
   5736 {
   5737   return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
   5738 	 ? SUCCESS : FAIL;
   5739 }
   5740 
   5741 static parse_operand_result
   5742 parse_address_group_reloc (char **str, int i, group_reloc_type type)
   5743 {
   5744   return parse_address_main (str, i, 1, type);
   5745 }
   5746 
   5747 /* Parse an operand for a MOVW or MOVT instruction.  */
   5748 static int
   5749 parse_half (char **str)
   5750 {
   5751   char * p;
   5752 
   5753   p = *str;
   5754   skip_past_char (&p, '#');
   5755   if (strncasecmp (p, ":lower16:", 9) == 0)
   5756     inst.reloc.type = BFD_RELOC_ARM_MOVW;
   5757   else if (strncasecmp (p, ":upper16:", 9) == 0)
   5758     inst.reloc.type = BFD_RELOC_ARM_MOVT;
   5759 
   5760   if (inst.reloc.type != BFD_RELOC_UNUSED)
   5761     {
   5762       p += 9;
   5763       skip_whitespace (p);
   5764     }
   5765 
   5766   if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
   5767     return FAIL;
   5768 
   5769   if (inst.reloc.type == BFD_RELOC_UNUSED)
   5770     {
   5771       if (inst.reloc.exp.X_op != O_constant)
   5772 	{
   5773 	  inst.error = _("constant expression expected");
   5774 	  return FAIL;
   5775 	}
   5776       if (inst.reloc.exp.X_add_number < 0
   5777 	  || inst.reloc.exp.X_add_number > 0xffff)
   5778 	{
   5779 	  inst.error = _("immediate value out of range");
   5780 	  return FAIL;
   5781 	}
   5782     }
   5783   *str = p;
   5784   return SUCCESS;
   5785 }
   5786 
   5787 /* Miscellaneous. */
   5788 
   5789 /* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
   5790    or a bitmask suitable to be or-ed into the ARM msr instruction.  */
   5791 static int
   5792 parse_psr (char **str, bfd_boolean lhs)
   5793 {
   5794   char *p;
   5795   unsigned long psr_field;
   5796   const struct asm_psr *psr;
   5797   char *start;
   5798   bfd_boolean is_apsr = FALSE;
   5799   bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
   5800 
   5801   /* PR gas/12698:  If the user has specified -march=all then m_profile will
   5802      be TRUE, but we want to ignore it in this case as we are building for any
   5803      CPU type, including non-m variants.  */
   5804   if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
   5805     m_profile = FALSE;
   5806 
   5807   /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
   5808      feature for ease of use and backwards compatibility.  */
   5809   p = *str;
   5810   if (strncasecmp (p, "SPSR", 4) == 0)
   5811     {
   5812       if (m_profile)
   5813 	goto unsupported_psr;
   5814 
   5815       psr_field = SPSR_BIT;
   5816     }
   5817   else if (strncasecmp (p, "CPSR", 4) == 0)
   5818     {
   5819       if (m_profile)
   5820 	goto unsupported_psr;
   5821 
   5822       psr_field = 0;
   5823     }
   5824   else if (strncasecmp (p, "APSR", 4) == 0)
   5825     {
   5826       /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
   5827 	 and ARMv7-R architecture CPUs.  */
   5828       is_apsr = TRUE;
   5829       psr_field = 0;
   5830     }
   5831   else if (m_profile)
   5832     {
   5833       start = p;
   5834       do
   5835 	p++;
   5836       while (ISALNUM (*p) || *p == '_');
   5837 
   5838       if (strncasecmp (start, "iapsr", 5) == 0
   5839 	  || strncasecmp (start, "eapsr", 5) == 0
   5840 	  || strncasecmp (start, "xpsr", 4) == 0
   5841 	  || strncasecmp (start, "psr", 3) == 0)
   5842 	p = start + strcspn (start, "rR") + 1;
   5843 
   5844       psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
   5845 						  p - start);
   5846 
   5847       if (!psr)
   5848 	return FAIL;
   5849 
   5850       /* If APSR is being written, a bitfield may be specified.  Note that
   5851 	 APSR itself is handled above.  */
   5852       if (psr->field <= 3)
   5853 	{
   5854 	  psr_field = psr->field;
   5855 	  is_apsr = TRUE;
   5856 	  goto check_suffix;
   5857 	}
   5858 
   5859       *str = p;
   5860       /* M-profile MSR instructions have the mask field set to "10", except
   5861 	 *PSR variants which modify APSR, which may use a different mask (and
   5862 	 have been handled already).  Do that by setting the PSR_f field
   5863 	 here.  */
   5864       return psr->field | (lhs ? PSR_f : 0);
   5865     }
   5866   else
   5867     goto unsupported_psr;
   5868 
   5869   p += 4;
   5870 check_suffix:
   5871   if (*p == '_')
   5872     {
   5873       /* A suffix follows.  */
   5874       p++;
   5875       start = p;
   5876 
   5877       do
   5878 	p++;
   5879       while (ISALNUM (*p) || *p == '_');
   5880 
   5881       if (is_apsr)
   5882 	{
   5883 	  /* APSR uses a notation for bits, rather than fields.  */
   5884 	  unsigned int nzcvq_bits = 0;
   5885 	  unsigned int g_bit = 0;
   5886 	  char *bit;
   5887 
   5888 	  for (bit = start; bit != p; bit++)
   5889 	    {
   5890 	      switch (TOLOWER (*bit))
   5891 		{
   5892 		case 'n':
   5893 		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
   5894 		  break;
   5895 
   5896 		case 'z':
   5897 		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
   5898 		  break;
   5899 
   5900 		case 'c':
   5901 		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
   5902 		  break;
   5903 
   5904 		case 'v':
   5905 		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
   5906 		  break;
   5907 
   5908 		case 'q':
   5909 		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
   5910 		  break;
   5911 
   5912 		case 'g':
   5913 		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
   5914 		  break;
   5915 
   5916 		default:
   5917 		  inst.error = _("unexpected bit specified after APSR");
   5918 		  return FAIL;
   5919 		}
   5920 	    }
   5921 
   5922 	  if (nzcvq_bits == 0x1f)
   5923 	    psr_field |= PSR_f;
   5924 
   5925 	  if (g_bit == 0x1)
   5926 	    {
   5927 	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
   5928 		{
   5929 		  inst.error = _("selected processor does not "
   5930 				 "support DSP extension");
   5931 		  return FAIL;
   5932 		}
   5933 
   5934 	      psr_field |= PSR_s;
   5935 	    }
   5936 
   5937 	  if ((nzcvq_bits & 0x20) != 0
   5938 	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
   5939 	      || (g_bit & 0x2) != 0)
   5940 	    {
   5941 	      inst.error = _("bad bitmask specified after APSR");
   5942 	      return FAIL;
   5943 	    }
   5944 	}
   5945       else
   5946 	{
   5947 	  psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
   5948 						      p - start);
   5949 	  if (!psr)
   5950 	    goto error;
   5951 
   5952 	  psr_field |= psr->field;
   5953 	}
   5954     }
   5955   else
   5956     {
   5957       if (ISALNUM (*p))
   5958 	goto error;    /* Garbage after "[CS]PSR".  */
   5959 
   5960       /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
   5961 	 is deprecated, but allow it anyway.  */
   5962       if (is_apsr && lhs)
   5963 	{
   5964 	  psr_field |= PSR_f;
   5965 	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
   5966 		       "deprecated"));
   5967 	}
   5968       else if (!m_profile)
   5969 	/* These bits are never right for M-profile devices: don't set them
   5970 	   (only code paths which read/write APSR reach here).  */
   5971 	psr_field |= (PSR_c | PSR_f);
   5972     }
   5973   *str = p;
   5974   return psr_field;
   5975 
   5976  unsupported_psr:
   5977   inst.error = _("selected processor does not support requested special "
   5978 		 "purpose register");
   5979   return FAIL;
   5980 
   5981  error:
   5982   inst.error = _("flag for {c}psr instruction expected");
   5983   return FAIL;
   5984 }
   5985 
   5986 /* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
   5987    value suitable for splatting into the AIF field of the instruction.	*/
   5988 
   5989 static int
   5990 parse_cps_flags (char **str)
   5991 {
   5992   int val = 0;
   5993   int saw_a_flag = 0;
   5994   char *s = *str;
   5995 
   5996   for (;;)
   5997     switch (*s++)
   5998       {
   5999       case '\0': case ',':
   6000 	goto done;
   6001 
   6002       case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
   6003       case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
   6004       case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
   6005 
   6006       default:
   6007 	inst.error = _("unrecognized CPS flag");
   6008 	return FAIL;
   6009       }
   6010 
   6011  done:
   6012   if (saw_a_flag == 0)
   6013     {
   6014       inst.error = _("missing CPS flags");
   6015       return FAIL;
   6016     }
   6017 
   6018   *str = s - 1;
   6019   return val;
   6020 }
   6021 
   6022 /* Parse an endian specifier ("BE" or "LE", case insensitive);
   6023    returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
   6024 
   6025 static int
   6026 parse_endian_specifier (char **str)
   6027 {
   6028   int little_endian;
   6029   char *s = *str;
   6030 
   6031   if (strncasecmp (s, "BE", 2))
   6032     little_endian = 0;
   6033   else if (strncasecmp (s, "LE", 2))
   6034     little_endian = 1;
   6035   else
   6036     {
   6037       inst.error = _("valid endian specifiers are be or le");
   6038       return FAIL;
   6039     }
   6040 
   6041   if (ISALNUM (s[2]) || s[2] == '_')
   6042     {
   6043       inst.error = _("valid endian specifiers are be or le");
   6044       return FAIL;
   6045     }
   6046 
   6047   *str = s + 2;
   6048   return little_endian;
   6049 }
   6050 
   6051 /* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
   6052    value suitable for poking into the rotate field of an sxt or sxta
   6053    instruction, or FAIL on error.  */
   6054 
   6055 static int
   6056 parse_ror (char **str)
   6057 {
   6058   int rot;
   6059   char *s = *str;
   6060 
   6061   if (strncasecmp (s, "ROR", 3) == 0)
   6062     s += 3;
   6063   else
   6064     {
   6065       inst.error = _("missing rotation field after comma");
   6066       return FAIL;
   6067     }
   6068 
   6069   if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
   6070     return FAIL;
   6071 
   6072   switch (rot)
   6073     {
   6074     case  0: *str = s; return 0x0;
   6075     case  8: *str = s; return 0x1;
   6076     case 16: *str = s; return 0x2;
   6077     case 24: *str = s; return 0x3;
   6078 
   6079     default:
   6080       inst.error = _("rotation can only be 0, 8, 16, or 24");
   6081       return FAIL;
   6082     }
   6083 }
   6084 
   6085 /* Parse a conditional code (from conds[] below).  The value returned is in the
   6086    range 0 .. 14, or FAIL.  */
   6087 static int
   6088 parse_cond (char **str)
   6089 {
   6090   char *q;
   6091   const struct asm_cond *c;
   6092   int n;
   6093   /* Condition codes are always 2 characters, so matching up to
   6094      3 characters is sufficient.  */
   6095   char cond[3];
   6096 
   6097   q = *str;
   6098   n = 0;
   6099   while (ISALPHA (*q) && n < 3)
   6100     {
   6101       cond[n] = TOLOWER (*q);
   6102       q++;
   6103       n++;
   6104     }
   6105 
   6106   c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
   6107   if (!c)
   6108     {
   6109       inst.error = _("condition required");
   6110       return FAIL;
   6111     }
   6112 
   6113   *str = q;
   6114   return c->value;
   6115 }
   6116 
   6117 /* Record a use of the given feature.  */
   6118 static void
   6119 record_feature_use (const arm_feature_set *feature)
   6120 {
   6121   if (thumb_mode)
   6122     ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
   6123   else
   6124     ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
   6125 }
   6126 
   6127 /* If the given feature available in the selected CPU, mark it as used.
   6128    Returns TRUE iff feature is available.  */
   6129 static bfd_boolean
   6130 mark_feature_used (const arm_feature_set *feature)
   6131 {
   6132   /* Ensure the option is valid on the current architecture.  */
   6133   if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
   6134     return FALSE;
   6135 
   6136   /* Add the appropriate architecture feature for the barrier option used.
   6137      */
   6138   record_feature_use (feature);
   6139 
   6140   return TRUE;
   6141 }
   6142 
   6143 /* Parse an option for a barrier instruction.  Returns the encoding for the
   6144    option, or FAIL.  */
   6145 static int
   6146 parse_barrier (char **str)
   6147 {
   6148   char *p, *q;
   6149   const struct asm_barrier_opt *o;
   6150 
   6151   p = q = *str;
   6152   while (ISALPHA (*q))
   6153     q++;
   6154 
   6155   o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
   6156 						    q - p);
   6157   if (!o)
   6158     return FAIL;
   6159 
   6160   if (!mark_feature_used (&o->arch))
   6161     return FAIL;
   6162 
   6163   *str = q;
   6164   return o->value;
   6165 }
   6166 
   6167 /* Parse the operands of a table branch instruction.  Similar to a memory
   6168    operand.  */
   6169 static int
   6170 parse_tb (char **str)
   6171 {
   6172   char * p = *str;
   6173   int reg;
   6174 
   6175   if (skip_past_char (&p, '[') == FAIL)
   6176     {
   6177       inst.error = _("'[' expected");
   6178       return FAIL;
   6179     }
   6180 
   6181   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   6182     {
   6183       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   6184       return FAIL;
   6185     }
   6186   inst.operands[0].reg = reg;
   6187 
   6188   if (skip_past_comma (&p) == FAIL)
   6189     {
   6190       inst.error = _("',' expected");
   6191       return FAIL;
   6192     }
   6193 
   6194   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
   6195     {
   6196       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
   6197       return FAIL;
   6198     }
   6199   inst.operands[0].imm = reg;
   6200 
   6201   if (skip_past_comma (&p) == SUCCESS)
   6202     {
   6203       if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
   6204 	return FAIL;
   6205       if (inst.reloc.exp.X_add_number != 1)
   6206 	{
   6207 	  inst.error = _("invalid shift");
   6208 	  return FAIL;
   6209 	}
   6210       inst.operands[0].shifted = 1;
   6211     }
   6212 
   6213   if (skip_past_char (&p, ']') == FAIL)
   6214     {
   6215       inst.error = _("']' expected");
   6216       return FAIL;
   6217     }
   6218   *str = p;
   6219   return SUCCESS;
   6220 }
   6221 
   6222 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
   6223    information on the types the operands can take and how they are encoded.
   6224    Up to four operands may be read; this function handles setting the
   6225    ".present" field for each read operand itself.
   6226    Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
   6227    else returns FAIL.  */
   6228 
   6229 static int
   6230 parse_neon_mov (char **str, int *which_operand)
   6231 {
   6232   int i = *which_operand, val;
   6233   enum arm_reg_type rtype;
   6234   char *ptr = *str;
   6235   struct neon_type_el optype;
   6236 
   6237   if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
   6238     {
   6239       /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
   6240       inst.operands[i].reg = val;
   6241       inst.operands[i].isscalar = 1;
   6242       inst.operands[i].vectype = optype;
   6243       inst.operands[i++].present = 1;
   6244 
   6245       if (skip_past_comma (&ptr) == FAIL)
   6246 	goto wanted_comma;
   6247 
   6248       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6249 	goto wanted_arm;
   6250 
   6251       inst.operands[i].reg = val;
   6252       inst.operands[i].isreg = 1;
   6253       inst.operands[i].present = 1;
   6254     }
   6255   else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
   6256 	   != FAIL)
   6257     {
   6258       /* Cases 0, 1, 2, 3, 5 (D only).  */
   6259       if (skip_past_comma (&ptr) == FAIL)
   6260 	goto wanted_comma;
   6261 
   6262       inst.operands[i].reg = val;
   6263       inst.operands[i].isreg = 1;
   6264       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
   6265       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6266       inst.operands[i].isvec = 1;
   6267       inst.operands[i].vectype = optype;
   6268       inst.operands[i++].present = 1;
   6269 
   6270       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6271 	{
   6272 	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
   6273 	     Case 13: VMOV <Sd>, <Rm>  */
   6274 	  inst.operands[i].reg = val;
   6275 	  inst.operands[i].isreg = 1;
   6276 	  inst.operands[i].present = 1;
   6277 
   6278 	  if (rtype == REG_TYPE_NQ)
   6279 	    {
   6280 	      first_error (_("can't use Neon quad register here"));
   6281 	      return FAIL;
   6282 	    }
   6283 	  else if (rtype != REG_TYPE_VFS)
   6284 	    {
   6285 	      i++;
   6286 	      if (skip_past_comma (&ptr) == FAIL)
   6287 		goto wanted_comma;
   6288 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6289 		goto wanted_arm;
   6290 	      inst.operands[i].reg = val;
   6291 	      inst.operands[i].isreg = 1;
   6292 	      inst.operands[i].present = 1;
   6293 	    }
   6294 	}
   6295       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
   6296 					   &optype)) != FAIL)
   6297 	{
   6298 	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
   6299 	     Case 1: VMOV<c><q> <Dd>, <Dm>
   6300 	     Case 8: VMOV.F32 <Sd>, <Sm>
   6301 	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
   6302 
   6303 	  inst.operands[i].reg = val;
   6304 	  inst.operands[i].isreg = 1;
   6305 	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
   6306 	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6307 	  inst.operands[i].isvec = 1;
   6308 	  inst.operands[i].vectype = optype;
   6309 	  inst.operands[i].present = 1;
   6310 
   6311 	  if (skip_past_comma (&ptr) == SUCCESS)
   6312 	    {
   6313 	      /* Case 15.  */
   6314 	      i++;
   6315 
   6316 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6317 		goto wanted_arm;
   6318 
   6319 	      inst.operands[i].reg = val;
   6320 	      inst.operands[i].isreg = 1;
   6321 	      inst.operands[i++].present = 1;
   6322 
   6323 	      if (skip_past_comma (&ptr) == FAIL)
   6324 		goto wanted_comma;
   6325 
   6326 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
   6327 		goto wanted_arm;
   6328 
   6329 	      inst.operands[i].reg = val;
   6330 	      inst.operands[i].isreg = 1;
   6331 	      inst.operands[i].present = 1;
   6332 	    }
   6333 	}
   6334       else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
   6335 	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
   6336 	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
   6337 	     Case 10: VMOV.F32 <Sd>, #<imm>
   6338 	     Case 11: VMOV.F64 <Dd>, #<imm>  */
   6339 	inst.operands[i].immisfloat = 1;
   6340       else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
   6341 	       == SUCCESS)
   6342 	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
   6343 	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
   6344 	;
   6345       else
   6346 	{
   6347 	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
   6348 	  return FAIL;
   6349 	}
   6350     }
   6351   else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6352     {
   6353       /* Cases 6, 7.  */
   6354       inst.operands[i].reg = val;
   6355       inst.operands[i].isreg = 1;
   6356       inst.operands[i++].present = 1;
   6357 
   6358       if (skip_past_comma (&ptr) == FAIL)
   6359 	goto wanted_comma;
   6360 
   6361       if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
   6362 	{
   6363 	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
   6364 	  inst.operands[i].reg = val;
   6365 	  inst.operands[i].isscalar = 1;
   6366 	  inst.operands[i].present = 1;
   6367 	  inst.operands[i].vectype = optype;
   6368 	}
   6369       else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
   6370 	{
   6371 	  /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
   6372 	  inst.operands[i].reg = val;
   6373 	  inst.operands[i].isreg = 1;
   6374 	  inst.operands[i++].present = 1;
   6375 
   6376 	  if (skip_past_comma (&ptr) == FAIL)
   6377 	    goto wanted_comma;
   6378 
   6379 	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
   6380 	      == FAIL)
   6381 	    {
   6382 	      first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
   6383 	      return FAIL;
   6384 	    }
   6385 
   6386 	  inst.operands[i].reg = val;
   6387 	  inst.operands[i].isreg = 1;
   6388 	  inst.operands[i].isvec = 1;
   6389 	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
   6390 	  inst.operands[i].vectype = optype;
   6391 	  inst.operands[i].present = 1;
   6392 
   6393 	  if (rtype == REG_TYPE_VFS)
   6394 	    {
   6395 	      /* Case 14.  */
   6396 	      i++;
   6397 	      if (skip_past_comma (&ptr) == FAIL)
   6398 		goto wanted_comma;
   6399 	      if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
   6400 					      &optype)) == FAIL)
   6401 		{
   6402 		  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
   6403 		  return FAIL;
   6404 		}
   6405 	      inst.operands[i].reg = val;
   6406 	      inst.operands[i].isreg = 1;
   6407 	      inst.operands[i].isvec = 1;
   6408 	      inst.operands[i].issingle = 1;
   6409 	      inst.operands[i].vectype = optype;
   6410 	      inst.operands[i].present = 1;
   6411 	    }
   6412 	}
   6413       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
   6414 	       != FAIL)
   6415 	{
   6416 	  /* Case 13.  */
   6417 	  inst.operands[i].reg = val;
   6418 	  inst.operands[i].isreg = 1;
   6419 	  inst.operands[i].isvec = 1;
   6420 	  inst.operands[i].issingle = 1;
   6421 	  inst.operands[i].vectype = optype;
   6422 	  inst.operands[i].present = 1;
   6423 	}
   6424     }
   6425   else
   6426     {
   6427       first_error (_("parse error"));
   6428       return FAIL;
   6429     }
   6430 
   6431   /* Successfully parsed the operands. Update args.  */
   6432   *which_operand = i;
   6433   *str = ptr;
   6434   return SUCCESS;
   6435 
   6436  wanted_comma:
   6437   first_error (_("expected comma"));
   6438   return FAIL;
   6439 
   6440  wanted_arm:
   6441   first_error (_(reg_expected_msgs[REG_TYPE_RN]));
   6442   return FAIL;
   6443 }
   6444 
   6445 /* Use this macro when the operand constraints are different
   6446    for ARM and THUMB (e.g. ldrd).  */
   6447 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
   6448 	((arm_operand) | ((thumb_operand) << 16))
   6449 
   6450 /* Matcher codes for parse_operands.  */
   6451 enum operand_parse_code
   6452 {
   6453   OP_stop,	/* end of line */
   6454 
   6455   OP_RR,	/* ARM register */
   6456   OP_RRnpc,	/* ARM register, not r15 */
   6457   OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
   6458   OP_RRnpcb,	/* ARM register, not r15, in square brackets */
   6459   OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
   6460 		   optional trailing ! */
   6461   OP_RRw,	/* ARM register, not r15, optional trailing ! */
   6462   OP_RCP,	/* Coprocessor number */
   6463   OP_RCN,	/* Coprocessor register */
   6464   OP_RF,	/* FPA register */
   6465   OP_RVS,	/* VFP single precision register */
   6466   OP_RVD,	/* VFP double precision register (0..15) */
   6467   OP_RND,       /* Neon double precision register (0..31) */
   6468   OP_RNQ,	/* Neon quad precision register */
   6469   OP_RVSD,	/* VFP single or double precision register */
   6470   OP_RNDQ,      /* Neon double or quad precision register */
   6471   OP_RNSDQ,	/* Neon single, double or quad precision register */
   6472   OP_RNSC,      /* Neon scalar D[X] */
   6473   OP_RVC,	/* VFP control register */
   6474   OP_RMF,	/* Maverick F register */
   6475   OP_RMD,	/* Maverick D register */
   6476   OP_RMFX,	/* Maverick FX register */
   6477   OP_RMDX,	/* Maverick DX register */
   6478   OP_RMAX,	/* Maverick AX register */
   6479   OP_RMDS,	/* Maverick DSPSC register */
   6480   OP_RIWR,	/* iWMMXt wR register */
   6481   OP_RIWC,	/* iWMMXt wC register */
   6482   OP_RIWG,	/* iWMMXt wCG register */
   6483   OP_RXA,	/* XScale accumulator register */
   6484 
   6485   OP_REGLST,	/* ARM register list */
   6486   OP_VRSLST,	/* VFP single-precision register list */
   6487   OP_VRDLST,	/* VFP double-precision register list */
   6488   OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
   6489   OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
   6490   OP_NSTRLST,   /* Neon element/structure list */
   6491 
   6492   OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
   6493   OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
   6494   OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
   6495   OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
   6496   OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
   6497   OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
   6498   OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
   6499   OP_VMOV,      /* Neon VMOV operands.  */
   6500   OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
   6501   OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
   6502   OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
   6503 
   6504   OP_I0,        /* immediate zero */
   6505   OP_I7,	/* immediate value 0 .. 7 */
   6506   OP_I15,	/*		   0 .. 15 */
   6507   OP_I16,	/*		   1 .. 16 */
   6508   OP_I16z,      /*                 0 .. 16 */
   6509   OP_I31,	/*		   0 .. 31 */
   6510   OP_I31w,	/*		   0 .. 31, optional trailing ! */
   6511   OP_I32,	/*		   1 .. 32 */
   6512   OP_I32z,	/*		   0 .. 32 */
   6513   OP_I63,	/*		   0 .. 63 */
   6514   OP_I63s,	/*		 -64 .. 63 */
   6515   OP_I64,	/*		   1 .. 64 */
   6516   OP_I64z,	/*		   0 .. 64 */
   6517   OP_I255,	/*		   0 .. 255 */
   6518 
   6519   OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
   6520   OP_I7b,	/*			       0 .. 7 */
   6521   OP_I15b,	/*			       0 .. 15 */
   6522   OP_I31b,	/*			       0 .. 31 */
   6523 
   6524   OP_SH,	/* shifter operand */
   6525   OP_SHG,	/* shifter operand with possible group relocation */
   6526   OP_ADDR,	/* Memory address expression (any mode) */
   6527   OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
   6528   OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
   6529   OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
   6530   OP_EXP,	/* arbitrary expression */
   6531   OP_EXPi,	/* same, with optional immediate prefix */
   6532   OP_EXPr,	/* same, with optional relocation suffix */
   6533   OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
   6534 
   6535   OP_CPSF,	/* CPS flags */
   6536   OP_ENDI,	/* Endianness specifier */
   6537   OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
   6538   OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
   6539   OP_COND,	/* conditional code */
   6540   OP_TB,	/* Table branch.  */
   6541 
   6542   OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
   6543 
   6544   OP_RRnpc_I0,	/* ARM register or literal 0 */
   6545   OP_RR_EXr,	/* ARM register or expression with opt. reloc suff. */
   6546   OP_RR_EXi,	/* ARM register or expression with imm prefix */
   6547   OP_RF_IF,	/* FPA register or immediate */
   6548   OP_RIWR_RIWC, /* iWMMXt R or C reg */
   6549   OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
   6550 
   6551   /* Optional operands.	 */
   6552   OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
   6553   OP_oI31b,	 /*				0 .. 31 */
   6554   OP_oI32b,      /*                             1 .. 32 */
   6555   OP_oI32z,      /*                             0 .. 32 */
   6556   OP_oIffffb,	 /*				0 .. 65535 */
   6557   OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
   6558 
   6559   OP_oRR,	 /* ARM register */
   6560   OP_oRRnpc,	 /* ARM register, not the PC */
   6561   OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
   6562   OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
   6563   OP_oRND,       /* Optional Neon double precision register */
   6564   OP_oRNQ,       /* Optional Neon quad precision register */
   6565   OP_oRNDQ,      /* Optional Neon double or quad precision register */
   6566   OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
   6567   OP_oSHll,	 /* LSL immediate */
   6568   OP_oSHar,	 /* ASR immediate */
   6569   OP_oSHllar,	 /* LSL or ASR immediate */
   6570   OP_oROR,	 /* ROR 0/8/16/24 */
   6571   OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
   6572 
   6573   /* Some pre-defined mixed (ARM/THUMB) operands.  */
   6574   OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
   6575   OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
   6576   OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
   6577 
   6578   OP_FIRST_OPTIONAL = OP_oI7b
   6579 };
   6580 
   6581 /* Generic instruction operand parser.	This does no encoding and no
   6582    semantic validation; it merely squirrels values away in the inst
   6583    structure.  Returns SUCCESS or FAIL depending on whether the
   6584    specified grammar matched.  */
   6585 static int
   6586 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
   6587 {
   6588   unsigned const int *upat = pattern;
   6589   char *backtrack_pos = 0;
   6590   const char *backtrack_error = 0;
   6591   int i, val = 0, backtrack_index = 0;
   6592   enum arm_reg_type rtype;
   6593   parse_operand_result result;
   6594   unsigned int op_parse_code;
   6595 
   6596 #define po_char_or_fail(chr)			\
   6597   do						\
   6598     {						\
   6599       if (skip_past_char (&str, chr) == FAIL)	\
   6600 	goto bad_args;				\
   6601     }						\
   6602   while (0)
   6603 
   6604 #define po_reg_or_fail(regtype)					\
   6605   do								\
   6606     {								\
   6607       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
   6608 				 & inst.operands[i].vectype);	\
   6609       if (val == FAIL)						\
   6610 	{							\
   6611 	  first_error (_(reg_expected_msgs[regtype]));		\
   6612 	  goto failure;						\
   6613 	}							\
   6614       inst.operands[i].reg = val;				\
   6615       inst.operands[i].isreg = 1;				\
   6616       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
   6617       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
   6618       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
   6619 			     || rtype == REG_TYPE_VFD		\
   6620 			     || rtype == REG_TYPE_NQ);		\
   6621     }								\
   6622   while (0)
   6623 
   6624 #define po_reg_or_goto(regtype, label)				\
   6625   do								\
   6626     {								\
   6627       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
   6628 				 & inst.operands[i].vectype);	\
   6629       if (val == FAIL)						\
   6630 	goto label;						\
   6631 								\
   6632       inst.operands[i].reg = val;				\
   6633       inst.operands[i].isreg = 1;				\
   6634       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
   6635       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
   6636       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
   6637 			     || rtype == REG_TYPE_VFD		\
   6638 			     || rtype == REG_TYPE_NQ);		\
   6639     }								\
   6640   while (0)
   6641 
   6642 #define po_imm_or_fail(min, max, popt)				\
   6643   do								\
   6644     {								\
   6645       if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
   6646 	goto failure;						\
   6647       inst.operands[i].imm = val;				\
   6648     }								\
   6649   while (0)
   6650 
   6651 #define po_scalar_or_goto(elsz, label)					\
   6652   do									\
   6653     {									\
   6654       val = parse_scalar (& str, elsz, & inst.operands[i].vectype);	\
   6655       if (val == FAIL)							\
   6656 	goto label;							\
   6657       inst.operands[i].reg = val;					\
   6658       inst.operands[i].isscalar = 1;					\
   6659     }									\
   6660   while (0)
   6661 
   6662 #define po_misc_or_fail(expr)			\
   6663   do						\
   6664     {						\
   6665       if (expr)					\
   6666 	goto failure;				\
   6667     }						\
   6668   while (0)
   6669 
   6670 #define po_misc_or_fail_no_backtrack(expr)		\
   6671   do							\
   6672     {							\
   6673       result = expr;					\
   6674       if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
   6675 	backtrack_pos = 0;				\
   6676       if (result != PARSE_OPERAND_SUCCESS)		\
   6677 	goto failure;					\
   6678     }							\
   6679   while (0)
   6680 
   6681 #define po_barrier_or_imm(str)				   \
   6682   do							   \
   6683     {						 	   \
   6684       val = parse_barrier (&str);			   \
   6685       if (val == FAIL && ! ISALPHA (*str))		   \
   6686 	goto immediate;					   \
   6687       if (val == FAIL					   \
   6688 	  /* ISB can only take SY as an option.  */	   \
   6689 	  || ((inst.instruction & 0xf0) == 0x60		   \
   6690 	       && val != 0xf))				   \
   6691 	{						   \
   6692 	   inst.error = _("invalid barrier type");	   \
   6693 	   backtrack_pos = 0;				   \
   6694 	   goto failure;				   \
   6695 	}						   \
   6696     }							   \
   6697   while (0)
   6698 
   6699   skip_whitespace (str);
   6700 
   6701   for (i = 0; upat[i] != OP_stop; i++)
   6702     {
   6703       op_parse_code = upat[i];
   6704       if (op_parse_code >= 1<<16)
   6705 	op_parse_code = thumb ? (op_parse_code >> 16)
   6706 				: (op_parse_code & ((1<<16)-1));
   6707 
   6708       if (op_parse_code >= OP_FIRST_OPTIONAL)
   6709 	{
   6710 	  /* Remember where we are in case we need to backtrack.  */
   6711 	  gas_assert (!backtrack_pos);
   6712 	  backtrack_pos = str;
   6713 	  backtrack_error = inst.error;
   6714 	  backtrack_index = i;
   6715 	}
   6716 
   6717       if (i > 0 && (i > 1 || inst.operands[0].present))
   6718 	po_char_or_fail (',');
   6719 
   6720       switch (op_parse_code)
   6721 	{
   6722 	  /* Registers */
   6723 	case OP_oRRnpc:
   6724 	case OP_oRRnpcsp:
   6725 	case OP_RRnpc:
   6726 	case OP_RRnpcsp:
   6727 	case OP_oRR:
   6728 	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
   6729 	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
   6730 	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
   6731 	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
   6732 	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
   6733 	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
   6734 	case OP_oRND:
   6735 	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
   6736 	case OP_RVC:
   6737 	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
   6738 	  break;
   6739 	  /* Also accept generic coprocessor regs for unknown registers.  */
   6740 	  coproc_reg:
   6741 	  po_reg_or_fail (REG_TYPE_CN);
   6742 	  break;
   6743 	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
   6744 	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
   6745 	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
   6746 	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
   6747 	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
   6748 	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
   6749 	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
   6750 	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
   6751 	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
   6752 	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
   6753 	case OP_oRNQ:
   6754 	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
   6755 	case OP_oRNDQ:
   6756 	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
   6757 	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
   6758 	case OP_oRNSDQ:
   6759 	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
   6760 
   6761 	/* Neon scalar. Using an element size of 8 means that some invalid
   6762 	   scalars are accepted here, so deal with those in later code.  */
   6763 	case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
   6764 
   6765 	case OP_RNDQ_I0:
   6766 	  {
   6767 	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
   6768 	    break;
   6769 	    try_imm0:
   6770 	    po_imm_or_fail (0, 0, TRUE);
   6771 	  }
   6772 	  break;
   6773 
   6774 	case OP_RVSD_I0:
   6775 	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
   6776 	  break;
   6777 
   6778 	case OP_RSVD_FI0:
   6779 	  {
   6780 	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
   6781 	    break;
   6782 	    try_ifimm0:
   6783 	    if (parse_ifimm_zero (&str))
   6784 	      inst.operands[i].imm = 0;
   6785 	    else
   6786 	    {
   6787 	      inst.error
   6788 	        = _("only floating point zero is allowed as immediate value");
   6789 	      goto failure;
   6790 	    }
   6791 	  }
   6792 	  break;
   6793 
   6794 	case OP_RR_RNSC:
   6795 	  {
   6796 	    po_scalar_or_goto (8, try_rr);
   6797 	    break;
   6798 	    try_rr:
   6799 	    po_reg_or_fail (REG_TYPE_RN);
   6800 	  }
   6801 	  break;
   6802 
   6803 	case OP_RNSDQ_RNSC:
   6804 	  {
   6805 	    po_scalar_or_goto (8, try_nsdq);
   6806 	    break;
   6807 	    try_nsdq:
   6808 	    po_reg_or_fail (REG_TYPE_NSDQ);
   6809 	  }
   6810 	  break;
   6811 
   6812 	case OP_RNDQ_RNSC:
   6813 	  {
   6814 	    po_scalar_or_goto (8, try_ndq);
   6815 	    break;
   6816 	    try_ndq:
   6817 	    po_reg_or_fail (REG_TYPE_NDQ);
   6818 	  }
   6819 	  break;
   6820 
   6821 	case OP_RND_RNSC:
   6822 	  {
   6823 	    po_scalar_or_goto (8, try_vfd);
   6824 	    break;
   6825 	    try_vfd:
   6826 	    po_reg_or_fail (REG_TYPE_VFD);
   6827 	  }
   6828 	  break;
   6829 
   6830 	case OP_VMOV:
   6831 	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
   6832 	     not careful then bad things might happen.  */
   6833 	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
   6834 	  break;
   6835 
   6836 	case OP_RNDQ_Ibig:
   6837 	  {
   6838 	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
   6839 	    break;
   6840 	    try_immbig:
   6841 	    /* There's a possibility of getting a 64-bit immediate here, so
   6842 	       we need special handling.  */
   6843 	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
   6844 		== FAIL)
   6845 	      {
   6846 		inst.error = _("immediate value is out of range");
   6847 		goto failure;
   6848 	      }
   6849 	  }
   6850 	  break;
   6851 
   6852 	case OP_RNDQ_I63b:
   6853 	  {
   6854 	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
   6855 	    break;
   6856 	    try_shimm:
   6857 	    po_imm_or_fail (0, 63, TRUE);
   6858 	  }
   6859 	  break;
   6860 
   6861 	case OP_RRnpcb:
   6862 	  po_char_or_fail ('[');
   6863 	  po_reg_or_fail  (REG_TYPE_RN);
   6864 	  po_char_or_fail (']');
   6865 	  break;
   6866 
   6867 	case OP_RRnpctw:
   6868 	case OP_RRw:
   6869 	case OP_oRRw:
   6870 	  po_reg_or_fail (REG_TYPE_RN);
   6871 	  if (skip_past_char (&str, '!') == SUCCESS)
   6872 	    inst.operands[i].writeback = 1;
   6873 	  break;
   6874 
   6875 	  /* Immediates */
   6876 	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
   6877 	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
   6878 	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
   6879 	case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
   6880 	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
   6881 	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
   6882 	case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
   6883 	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
   6884 	case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
   6885 	case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
   6886 	case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
   6887 	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
   6888 
   6889 	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
   6890 	case OP_oI7b:
   6891 	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
   6892 	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
   6893 	case OP_oI31b:
   6894 	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
   6895 	case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
   6896 	case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
   6897 	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
   6898 
   6899 	  /* Immediate variants */
   6900 	case OP_oI255c:
   6901 	  po_char_or_fail ('{');
   6902 	  po_imm_or_fail (0, 255, TRUE);
   6903 	  po_char_or_fail ('}');
   6904 	  break;
   6905 
   6906 	case OP_I31w:
   6907 	  /* The expression parser chokes on a trailing !, so we have
   6908 	     to find it first and zap it.  */
   6909 	  {
   6910 	    char *s = str;
   6911 	    while (*s && *s != ',')
   6912 	      s++;
   6913 	    if (s[-1] == '!')
   6914 	      {
   6915 		s[-1] = '\0';
   6916 		inst.operands[i].writeback = 1;
   6917 	      }
   6918 	    po_imm_or_fail (0, 31, TRUE);
   6919 	    if (str == s - 1)
   6920 	      str = s;
   6921 	  }
   6922 	  break;
   6923 
   6924 	  /* Expressions */
   6925 	case OP_EXPi:	EXPi:
   6926 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6927 					      GE_OPT_PREFIX));
   6928 	  break;
   6929 
   6930 	case OP_EXP:
   6931 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6932 					      GE_NO_PREFIX));
   6933 	  break;
   6934 
   6935 	case OP_EXPr:	EXPr:
   6936 	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
   6937 					      GE_NO_PREFIX));
   6938 	  if (inst.reloc.exp.X_op == O_symbol)
   6939 	    {
   6940 	      val = parse_reloc (&str);
   6941 	      if (val == -1)
   6942 		{
   6943 		  inst.error = _("unrecognized relocation suffix");
   6944 		  goto failure;
   6945 		}
   6946 	      else if (val != BFD_RELOC_UNUSED)
   6947 		{
   6948 		  inst.operands[i].imm = val;
   6949 		  inst.operands[i].hasreloc = 1;
   6950 		}
   6951 	    }
   6952 	  break;
   6953 
   6954 	  /* Operand for MOVW or MOVT.  */
   6955 	case OP_HALF:
   6956 	  po_misc_or_fail (parse_half (&str));
   6957 	  break;
   6958 
   6959 	  /* Register or expression.  */
   6960 	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
   6961 	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
   6962 
   6963 	  /* Register or immediate.  */
   6964 	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
   6965 	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
   6966 
   6967 	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
   6968 	IF:
   6969 	  if (!is_immediate_prefix (*str))
   6970 	    goto bad_args;
   6971 	  str++;
   6972 	  val = parse_fpa_immediate (&str);
   6973 	  if (val == FAIL)
   6974 	    goto failure;
   6975 	  /* FPA immediates are encoded as registers 8-15.
   6976 	     parse_fpa_immediate has already applied the offset.  */
   6977 	  inst.operands[i].reg = val;
   6978 	  inst.operands[i].isreg = 1;
   6979 	  break;
   6980 
   6981 	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
   6982 	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
   6983 
   6984 	  /* Two kinds of register.  */
   6985 	case OP_RIWR_RIWC:
   6986 	  {
   6987 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
   6988 	    if (!rege
   6989 		|| (rege->type != REG_TYPE_MMXWR
   6990 		    && rege->type != REG_TYPE_MMXWC
   6991 		    && rege->type != REG_TYPE_MMXWCG))
   6992 	      {
   6993 		inst.error = _("iWMMXt data or control register expected");
   6994 		goto failure;
   6995 	      }
   6996 	    inst.operands[i].reg = rege->number;
   6997 	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
   6998 	  }
   6999 	  break;
   7000 
   7001 	case OP_RIWC_RIWG:
   7002 	  {
   7003 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
   7004 	    if (!rege
   7005 		|| (rege->type != REG_TYPE_MMXWC
   7006 		    && rege->type != REG_TYPE_MMXWCG))
   7007 	      {
   7008 		inst.error = _("iWMMXt control register expected");
   7009 		goto failure;
   7010 	      }
   7011 	    inst.operands[i].reg = rege->number;
   7012 	    inst.operands[i].isreg = 1;
   7013 	  }
   7014 	  break;
   7015 
   7016 	  /* Misc */
   7017 	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
   7018 	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
   7019 	case OP_oROR:	 val = parse_ror (&str);		break;
   7020 	case OP_COND:	 val = parse_cond (&str);		break;
   7021 	case OP_oBARRIER_I15:
   7022 	  po_barrier_or_imm (str); break;
   7023 	  immediate:
   7024 	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
   7025 	    goto failure;
   7026 	  break;
   7027 
   7028 	case OP_wPSR:
   7029 	case OP_rPSR:
   7030 	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
   7031 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
   7032 	    {
   7033 	      inst.error = _("Banked registers are not available with this "
   7034 			     "architecture.");
   7035 	      goto failure;
   7036 	    }
   7037 	  break;
   7038 	  try_psr:
   7039 	  val = parse_psr (&str, op_parse_code == OP_wPSR);
   7040 	  break;
   7041 
   7042 	case OP_APSR_RR:
   7043 	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
   7044 	  break;
   7045 	  try_apsr:
   7046 	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
   7047 	     instruction).  */
   7048 	  if (strncasecmp (str, "APSR_", 5) == 0)
   7049 	    {
   7050 	      unsigned found = 0;
   7051 	      str += 5;
   7052 	      while (found < 15)
   7053 		switch (*str++)
   7054 		  {
   7055 		  case 'c': found = (found & 1) ? 16 : found | 1; break;
   7056 		  case 'n': found = (found & 2) ? 16 : found | 2; break;
   7057 		  case 'z': found = (found & 4) ? 16 : found | 4; break;
   7058 		  case 'v': found = (found & 8) ? 16 : found | 8; break;
   7059 		  default: found = 16;
   7060 		  }
   7061 	      if (found != 15)
   7062 		goto failure;
   7063 	      inst.operands[i].isvec = 1;
   7064 	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
   7065 	      inst.operands[i].reg = REG_PC;
   7066 	    }
   7067 	  else
   7068 	    goto failure;
   7069 	  break;
   7070 
   7071 	case OP_TB:
   7072 	  po_misc_or_fail (parse_tb (&str));
   7073 	  break;
   7074 
   7075 	  /* Register lists.  */
   7076 	case OP_REGLST:
   7077 	  val = parse_reg_list (&str);
   7078 	  if (*str == '^')
   7079 	    {
   7080 	      inst.operands[i].writeback = 1;
   7081 	      str++;
   7082 	    }
   7083 	  break;
   7084 
   7085 	case OP_VRSLST:
   7086 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
   7087 	  break;
   7088 
   7089 	case OP_VRDLST:
   7090 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
   7091 	  break;
   7092 
   7093 	case OP_VRSDLST:
   7094 	  /* Allow Q registers too.  */
   7095 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7096 				    REGLIST_NEON_D);
   7097 	  if (val == FAIL)
   7098 	    {
   7099 	      inst.error = NULL;
   7100 	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7101 					REGLIST_VFP_S);
   7102 	      inst.operands[i].issingle = 1;
   7103 	    }
   7104 	  break;
   7105 
   7106 	case OP_NRDLST:
   7107 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
   7108 				    REGLIST_NEON_D);
   7109 	  break;
   7110 
   7111 	case OP_NSTRLST:
   7112 	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
   7113 					   &inst.operands[i].vectype);
   7114 	  break;
   7115 
   7116 	  /* Addressing modes */
   7117 	case OP_ADDR:
   7118 	  po_misc_or_fail (parse_address (&str, i));
   7119 	  break;
   7120 
   7121 	case OP_ADDRGLDR:
   7122 	  po_misc_or_fail_no_backtrack (
   7123 	    parse_address_group_reloc (&str, i, GROUP_LDR));
   7124 	  break;
   7125 
   7126 	case OP_ADDRGLDRS:
   7127 	  po_misc_or_fail_no_backtrack (
   7128 	    parse_address_group_reloc (&str, i, GROUP_LDRS));
   7129 	  break;
   7130 
   7131 	case OP_ADDRGLDC:
   7132 	  po_misc_or_fail_no_backtrack (
   7133 	    parse_address_group_reloc (&str, i, GROUP_LDC));
   7134 	  break;
   7135 
   7136 	case OP_SH:
   7137 	  po_misc_or_fail (parse_shifter_operand (&str, i));
   7138 	  break;
   7139 
   7140 	case OP_SHG:
   7141 	  po_misc_or_fail_no_backtrack (
   7142 	    parse_shifter_operand_group_reloc (&str, i));
   7143 	  break;
   7144 
   7145 	case OP_oSHll:
   7146 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
   7147 	  break;
   7148 
   7149 	case OP_oSHar:
   7150 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
   7151 	  break;
   7152 
   7153 	case OP_oSHllar:
   7154 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
   7155 	  break;
   7156 
   7157 	default:
   7158 	  as_fatal (_("unhandled operand code %d"), op_parse_code);
   7159 	}
   7160 
   7161       /* Various value-based sanity checks and shared operations.  We
   7162 	 do not signal immediate failures for the register constraints;
   7163 	 this allows a syntax error to take precedence.	 */
   7164       switch (op_parse_code)
   7165 	{
   7166 	case OP_oRRnpc:
   7167 	case OP_RRnpc:
   7168 	case OP_RRnpcb:
   7169 	case OP_RRw:
   7170 	case OP_oRRw:
   7171 	case OP_RRnpc_I0:
   7172 	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
   7173 	    inst.error = BAD_PC;
   7174 	  break;
   7175 
   7176 	case OP_oRRnpcsp:
   7177 	case OP_RRnpcsp:
   7178 	  if (inst.operands[i].isreg)
   7179 	    {
   7180 	      if (inst.operands[i].reg == REG_PC)
   7181 		inst.error = BAD_PC;
   7182 	      else if (inst.operands[i].reg == REG_SP)
   7183 		inst.error = BAD_SP;
   7184 	    }
   7185 	  break;
   7186 
   7187 	case OP_RRnpctw:
   7188 	  if (inst.operands[i].isreg
   7189 	      && inst.operands[i].reg == REG_PC
   7190 	      && (inst.operands[i].writeback || thumb))
   7191 	    inst.error = BAD_PC;
   7192 	  break;
   7193 
   7194 	case OP_CPSF:
   7195 	case OP_ENDI:
   7196 	case OP_oROR:
   7197 	case OP_wPSR:
   7198 	case OP_rPSR:
   7199 	case OP_COND:
   7200 	case OP_oBARRIER_I15:
   7201 	case OP_REGLST:
   7202 	case OP_VRSLST:
   7203 	case OP_VRDLST:
   7204 	case OP_VRSDLST:
   7205 	case OP_NRDLST:
   7206 	case OP_NSTRLST:
   7207 	  if (val == FAIL)
   7208 	    goto failure;
   7209 	  inst.operands[i].imm = val;
   7210 	  break;
   7211 
   7212 	default:
   7213 	  break;
   7214 	}
   7215 
   7216       /* If we get here, this operand was successfully parsed.	*/
   7217       inst.operands[i].present = 1;
   7218       continue;
   7219 
   7220     bad_args:
   7221       inst.error = BAD_ARGS;
   7222 
   7223     failure:
   7224       if (!backtrack_pos)
   7225 	{
   7226 	  /* The parse routine should already have set inst.error, but set a
   7227 	     default here just in case.  */
   7228 	  if (!inst.error)
   7229 	    inst.error = _("syntax error");
   7230 	  return FAIL;
   7231 	}
   7232 
   7233       /* Do not backtrack over a trailing optional argument that
   7234 	 absorbed some text.  We will only fail again, with the
   7235 	 'garbage following instruction' error message, which is
   7236 	 probably less helpful than the current one.  */
   7237       if (backtrack_index == i && backtrack_pos != str
   7238 	  && upat[i+1] == OP_stop)
   7239 	{
   7240 	  if (!inst.error)
   7241 	    inst.error = _("syntax error");
   7242 	  return FAIL;
   7243 	}
   7244 
   7245       /* Try again, skipping the optional argument at backtrack_pos.  */
   7246       str = backtrack_pos;
   7247       inst.error = backtrack_error;
   7248       inst.operands[backtrack_index].present = 0;
   7249       i = backtrack_index;
   7250       backtrack_pos = 0;
   7251     }
   7252 
   7253   /* Check that we have parsed all the arguments.  */
   7254   if (*str != '\0' && !inst.error)
   7255     inst.error = _("garbage following instruction");
   7256 
   7257   return inst.error ? FAIL : SUCCESS;
   7258 }
   7259 
   7260 #undef po_char_or_fail
   7261 #undef po_reg_or_fail
   7262 #undef po_reg_or_goto
   7263 #undef po_imm_or_fail
   7264 #undef po_scalar_or_fail
   7265 #undef po_barrier_or_imm
   7266 
   7267 /* Shorthand macro for instruction encoding functions issuing errors.  */
   7268 #define constraint(expr, err)			\
   7269   do						\
   7270     {						\
   7271       if (expr)					\
   7272 	{					\
   7273 	  inst.error = err;			\
   7274 	  return;				\
   7275 	}					\
   7276     }						\
   7277   while (0)
   7278 
   7279 /* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
   7280    instructions are unpredictable if these registers are used.  This
   7281    is the BadReg predicate in ARM's Thumb-2 documentation.  */
   7282 #define reject_bad_reg(reg)				\
   7283   do							\
   7284    if (reg == REG_SP || reg == REG_PC)			\
   7285      {							\
   7286        inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;	\
   7287        return;						\
   7288      }							\
   7289   while (0)
   7290 
   7291 /* If REG is R13 (the stack pointer), warn that its use is
   7292    deprecated.  */
   7293 #define warn_deprecated_sp(reg)			\
   7294   do						\
   7295     if (warn_on_deprecated && reg == REG_SP)	\
   7296        as_tsktsk (_("use of r13 is deprecated"));	\
   7297   while (0)
   7298 
   7299 /* Functions for operand encoding.  ARM, then Thumb.  */
   7300 
   7301 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
   7302 
   7303 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
   7304 
   7305    The only binary encoding difference is the Coprocessor number.  Coprocessor
   7306    9 is used for half-precision calculations or conversions.  The format of the
   7307    instruction is the same as the equivalent Coprocessor 10 instuction that
   7308    exists for Single-Precision operation.  */
   7309 
   7310 static void
   7311 do_scalar_fp16_v82_encode (void)
   7312 {
   7313   if (inst.cond != COND_ALWAYS)
   7314     as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
   7315 	       " the behaviour is UNPREDICTABLE"));
   7316   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
   7317 	      _(BAD_FP16));
   7318 
   7319   inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
   7320   mark_feature_used (&arm_ext_fp16);
   7321 }
   7322 
   7323 /* If VAL can be encoded in the immediate field of an ARM instruction,
   7324    return the encoded form.  Otherwise, return FAIL.  */
   7325 
   7326 static unsigned int
   7327 encode_arm_immediate (unsigned int val)
   7328 {
   7329   unsigned int a, i;
   7330 
   7331   if (val <= 0xff)
   7332     return val;
   7333 
   7334   for (i = 2; i < 32; i += 2)
   7335     if ((a = rotate_left (val, i)) <= 0xff)
   7336       return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
   7337 
   7338   return FAIL;
   7339 }
   7340 
   7341 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
   7342    return the encoded form.  Otherwise, return FAIL.  */
   7343 static unsigned int
   7344 encode_thumb32_immediate (unsigned int val)
   7345 {
   7346   unsigned int a, i;
   7347 
   7348   if (val <= 0xff)
   7349     return val;
   7350 
   7351   for (i = 1; i <= 24; i++)
   7352     {
   7353       a = val >> i;
   7354       if ((val & ~(0xff << i)) == 0)
   7355 	return ((val >> i) & 0x7f) | ((32 - i) << 7);
   7356     }
   7357 
   7358   a = val & 0xff;
   7359   if (val == ((a << 16) | a))
   7360     return 0x100 | a;
   7361   if (val == ((a << 24) | (a << 16) | (a << 8) | a))
   7362     return 0x300 | a;
   7363 
   7364   a = val & 0xff00;
   7365   if (val == ((a << 16) | a))
   7366     return 0x200 | (a >> 8);
   7367 
   7368   return FAIL;
   7369 }
   7370 /* Encode a VFP SP or DP register number into inst.instruction.  */
   7371 
   7372 static void
   7373 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
   7374 {
   7375   if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
   7376       && reg > 15)
   7377     {
   7378       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
   7379 	{
   7380 	  if (thumb_mode)
   7381 	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   7382 				    fpu_vfp_ext_d32);
   7383 	  else
   7384 	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
   7385 				    fpu_vfp_ext_d32);
   7386 	}
   7387       else
   7388 	{
   7389 	  first_error (_("D register out of range for selected VFP version"));
   7390 	  return;
   7391 	}
   7392     }
   7393 
   7394   switch (pos)
   7395     {
   7396     case VFP_REG_Sd:
   7397       inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
   7398       break;
   7399 
   7400     case VFP_REG_Sn:
   7401       inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
   7402       break;
   7403 
   7404     case VFP_REG_Sm:
   7405       inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
   7406       break;
   7407 
   7408     case VFP_REG_Dd:
   7409       inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
   7410       break;
   7411 
   7412     case VFP_REG_Dn:
   7413       inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
   7414       break;
   7415 
   7416     case VFP_REG_Dm:
   7417       inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
   7418       break;
   7419 
   7420     default:
   7421       abort ();
   7422     }
   7423 }
   7424 
   7425 /* Encode a <shift> in an ARM-format instruction.  The immediate,
   7426    if any, is handled by md_apply_fix.	 */
   7427 static void
   7428 encode_arm_shift (int i)
   7429 {
   7430   if (inst.operands[i].shift_kind == SHIFT_RRX)
   7431     inst.instruction |= SHIFT_ROR << 5;
   7432   else
   7433     {
   7434       inst.instruction |= inst.operands[i].shift_kind << 5;
   7435       if (inst.operands[i].immisreg)
   7436 	{
   7437 	  inst.instruction |= SHIFT_BY_REG;
   7438 	  inst.instruction |= inst.operands[i].imm << 8;
   7439 	}
   7440       else
   7441 	inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   7442     }
   7443 }
   7444 
   7445 static void
   7446 encode_arm_shifter_operand (int i)
   7447 {
   7448   if (inst.operands[i].isreg)
   7449     {
   7450       inst.instruction |= inst.operands[i].reg;
   7451       encode_arm_shift (i);
   7452     }
   7453   else
   7454     {
   7455       inst.instruction |= INST_IMMEDIATE;
   7456       if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
   7457 	inst.instruction |= inst.operands[i].imm;
   7458     }
   7459 }
   7460 
   7461 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
   7462 static void
   7463 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
   7464 {
   7465   /* PR 14260:
   7466      Generate an error if the operand is not a register.  */
   7467   constraint (!inst.operands[i].isreg,
   7468 	      _("Instruction does not support =N addresses"));
   7469 
   7470   inst.instruction |= inst.operands[i].reg << 16;
   7471 
   7472   if (inst.operands[i].preind)
   7473     {
   7474       if (is_t)
   7475 	{
   7476 	  inst.error = _("instruction does not accept preindexed addressing");
   7477 	  return;
   7478 	}
   7479       inst.instruction |= PRE_INDEX;
   7480       if (inst.operands[i].writeback)
   7481 	inst.instruction |= WRITE_BACK;
   7482 
   7483     }
   7484   else if (inst.operands[i].postind)
   7485     {
   7486       gas_assert (inst.operands[i].writeback);
   7487       if (is_t)
   7488 	inst.instruction |= WRITE_BACK;
   7489     }
   7490   else /* unindexed - only for coprocessor */
   7491     {
   7492       inst.error = _("instruction does not accept unindexed addressing");
   7493       return;
   7494     }
   7495 
   7496   if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
   7497       && (((inst.instruction & 0x000f0000) >> 16)
   7498 	  == ((inst.instruction & 0x0000f000) >> 12)))
   7499     as_warn ((inst.instruction & LOAD_BIT)
   7500 	     ? _("destination register same as write-back base")
   7501 	     : _("source register same as write-back base"));
   7502 }
   7503 
   7504 /* inst.operands[i] was set up by parse_address.  Encode it into an
   7505    ARM-format mode 2 load or store instruction.	 If is_t is true,
   7506    reject forms that cannot be used with a T instruction (i.e. not
   7507    post-indexed).  */
   7508 static void
   7509 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
   7510 {
   7511   const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
   7512 
   7513   encode_arm_addr_mode_common (i, is_t);
   7514 
   7515   if (inst.operands[i].immisreg)
   7516     {
   7517       constraint ((inst.operands[i].imm == REG_PC
   7518 		   || (is_pc && inst.operands[i].writeback)),
   7519 		  BAD_PC_ADDRESSING);
   7520       inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
   7521       inst.instruction |= inst.operands[i].imm;
   7522       if (!inst.operands[i].negative)
   7523 	inst.instruction |= INDEX_UP;
   7524       if (inst.operands[i].shifted)
   7525 	{
   7526 	  if (inst.operands[i].shift_kind == SHIFT_RRX)
   7527 	    inst.instruction |= SHIFT_ROR << 5;
   7528 	  else
   7529 	    {
   7530 	      inst.instruction |= inst.operands[i].shift_kind << 5;
   7531 	      inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   7532 	    }
   7533 	}
   7534     }
   7535   else /* immediate offset in inst.reloc */
   7536     {
   7537       if (is_pc && !inst.reloc.pc_rel)
   7538 	{
   7539 	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
   7540 
   7541 	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
   7542 	     cannot use PC in addressing.
   7543 	     PC cannot be used in writeback addressing, either.  */
   7544 	  constraint ((is_t || inst.operands[i].writeback),
   7545 		      BAD_PC_ADDRESSING);
   7546 
   7547 	  /* Use of PC in str is deprecated for ARMv7.  */
   7548 	  if (warn_on_deprecated
   7549 	      && !is_load
   7550 	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
   7551 	    as_tsktsk (_("use of PC in this instruction is deprecated"));
   7552 	}
   7553 
   7554       if (inst.reloc.type == BFD_RELOC_UNUSED)
   7555 	{
   7556 	  /* Prefer + for zero encoded value.  */
   7557 	  if (!inst.operands[i].negative)
   7558 	    inst.instruction |= INDEX_UP;
   7559 	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
   7560 	}
   7561     }
   7562 }
   7563 
   7564 /* inst.operands[i] was set up by parse_address.  Encode it into an
   7565    ARM-format mode 3 load or store instruction.	 Reject forms that
   7566    cannot be used with such instructions.  If is_t is true, reject
   7567    forms that cannot be used with a T instruction (i.e. not
   7568    post-indexed).  */
   7569 static void
   7570 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
   7571 {
   7572   if (inst.operands[i].immisreg && inst.operands[i].shifted)
   7573     {
   7574       inst.error = _("instruction does not accept scaled register index");
   7575       return;
   7576     }
   7577 
   7578   encode_arm_addr_mode_common (i, is_t);
   7579 
   7580   if (inst.operands[i].immisreg)
   7581     {
   7582       constraint ((inst.operands[i].imm == REG_PC
   7583 		   || (is_t && inst.operands[i].reg == REG_PC)),
   7584 		  BAD_PC_ADDRESSING);
   7585       constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
   7586 		  BAD_PC_WRITEBACK);
   7587       inst.instruction |= inst.operands[i].imm;
   7588       if (!inst.operands[i].negative)
   7589 	inst.instruction |= INDEX_UP;
   7590     }
   7591   else /* immediate offset in inst.reloc */
   7592     {
   7593       constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
   7594 		   && inst.operands[i].writeback),
   7595 		  BAD_PC_WRITEBACK);
   7596       inst.instruction |= HWOFFSET_IMM;
   7597       if (inst.reloc.type == BFD_RELOC_UNUSED)
   7598 	{
   7599 	  /* Prefer + for zero encoded value.  */
   7600 	  if (!inst.operands[i].negative)
   7601 	    inst.instruction |= INDEX_UP;
   7602 
   7603 	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
   7604 	}
   7605     }
   7606 }
   7607 
   7608 /* Write immediate bits [7:0] to the following locations:
   7609 
   7610   |28/24|23     19|18 16|15                    4|3     0|
   7611   |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
   7612 
   7613   This function is used by VMOV/VMVN/VORR/VBIC.  */
   7614 
   7615 static void
   7616 neon_write_immbits (unsigned immbits)
   7617 {
   7618   inst.instruction |= immbits & 0xf;
   7619   inst.instruction |= ((immbits >> 4) & 0x7) << 16;
   7620   inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
   7621 }
   7622 
   7623 /* Invert low-order SIZE bits of XHI:XLO.  */
   7624 
   7625 static void
   7626 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
   7627 {
   7628   unsigned immlo = xlo ? *xlo : 0;
   7629   unsigned immhi = xhi ? *xhi : 0;
   7630 
   7631   switch (size)
   7632     {
   7633     case 8:
   7634       immlo = (~immlo) & 0xff;
   7635       break;
   7636 
   7637     case 16:
   7638       immlo = (~immlo) & 0xffff;
   7639       break;
   7640 
   7641     case 64:
   7642       immhi = (~immhi) & 0xffffffff;
   7643       /* fall through.  */
   7644 
   7645     case 32:
   7646       immlo = (~immlo) & 0xffffffff;
   7647       break;
   7648 
   7649     default:
   7650       abort ();
   7651     }
   7652 
   7653   if (xlo)
   7654     *xlo = immlo;
   7655 
   7656   if (xhi)
   7657     *xhi = immhi;
   7658 }
   7659 
   7660 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
   7661    A, B, C, D.  */
   7662 
   7663 static int
   7664 neon_bits_same_in_bytes (unsigned imm)
   7665 {
   7666   return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
   7667 	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
   7668 	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
   7669 	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
   7670 }
   7671 
   7672 /* For immediate of above form, return 0bABCD.  */
   7673 
   7674 static unsigned
   7675 neon_squash_bits (unsigned imm)
   7676 {
   7677   return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
   7678 	 | ((imm & 0x01000000) >> 21);
   7679 }
   7680 
   7681 /* Compress quarter-float representation to 0b...000 abcdefgh.  */
   7682 
   7683 static unsigned
   7684 neon_qfloat_bits (unsigned imm)
   7685 {
   7686   return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
   7687 }
   7688 
   7689 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
   7690    the instruction. *OP is passed as the initial value of the op field, and
   7691    may be set to a different value depending on the constant (i.e.
   7692    "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
   7693    MVN).  If the immediate looks like a repeated pattern then also
   7694    try smaller element sizes.  */
   7695 
   7696 static int
   7697 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
   7698 			 unsigned *immbits, int *op, int size,
   7699 			 enum neon_el_type type)
   7700 {
   7701   /* Only permit float immediates (including 0.0/-0.0) if the operand type is
   7702      float.  */
   7703   if (type == NT_float && !float_p)
   7704     return FAIL;
   7705 
   7706   if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
   7707     {
   7708       if (size != 32 || *op == 1)
   7709 	return FAIL;
   7710       *immbits = neon_qfloat_bits (immlo);
   7711       return 0xf;
   7712     }
   7713 
   7714   if (size == 64)
   7715     {
   7716       if (neon_bits_same_in_bytes (immhi)
   7717 	  && neon_bits_same_in_bytes (immlo))
   7718 	{
   7719 	  if (*op == 1)
   7720 	    return FAIL;
   7721 	  *immbits = (neon_squash_bits (immhi) << 4)
   7722 		     | neon_squash_bits (immlo);
   7723 	  *op = 1;
   7724 	  return 0xe;
   7725 	}
   7726 
   7727       if (immhi != immlo)
   7728 	return FAIL;
   7729     }
   7730 
   7731   if (size >= 32)
   7732     {
   7733       if (immlo == (immlo & 0x000000ff))
   7734 	{
   7735 	  *immbits = immlo;
   7736 	  return 0x0;
   7737 	}
   7738       else if (immlo == (immlo & 0x0000ff00))
   7739 	{
   7740 	  *immbits = immlo >> 8;
   7741 	  return 0x2;
   7742 	}
   7743       else if (immlo == (immlo & 0x00ff0000))
   7744 	{
   7745 	  *immbits = immlo >> 16;
   7746 	  return 0x4;
   7747 	}
   7748       else if (immlo == (immlo & 0xff000000))
   7749 	{
   7750 	  *immbits = immlo >> 24;
   7751 	  return 0x6;
   7752 	}
   7753       else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
   7754 	{
   7755 	  *immbits = (immlo >> 8) & 0xff;
   7756 	  return 0xc;
   7757 	}
   7758       else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
   7759 	{
   7760 	  *immbits = (immlo >> 16) & 0xff;
   7761 	  return 0xd;
   7762 	}
   7763 
   7764       if ((immlo & 0xffff) != (immlo >> 16))
   7765 	return FAIL;
   7766       immlo &= 0xffff;
   7767     }
   7768 
   7769   if (size >= 16)
   7770     {
   7771       if (immlo == (immlo & 0x000000ff))
   7772 	{
   7773 	  *immbits = immlo;
   7774 	  return 0x8;
   7775 	}
   7776       else if (immlo == (immlo & 0x0000ff00))
   7777 	{
   7778 	  *immbits = immlo >> 8;
   7779 	  return 0xa;
   7780 	}
   7781 
   7782       if ((immlo & 0xff) != (immlo >> 8))
   7783 	return FAIL;
   7784       immlo &= 0xff;
   7785     }
   7786 
   7787   if (immlo == (immlo & 0x000000ff))
   7788     {
   7789       /* Don't allow MVN with 8-bit immediate.  */
   7790       if (*op == 1)
   7791 	return FAIL;
   7792       *immbits = immlo;
   7793       return 0xe;
   7794     }
   7795 
   7796   return FAIL;
   7797 }
   7798 
   7799 #if defined BFD_HOST_64_BIT
   7800 /* Returns TRUE if double precision value V may be cast
   7801    to single precision without loss of accuracy.  */
   7802 
   7803 static bfd_boolean
   7804 is_double_a_single (bfd_int64_t v)
   7805 {
   7806   int exp = (int)((v >> 52) & 0x7FF);
   7807   bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
   7808 
   7809   return (exp == 0 || exp == 0x7FF
   7810 	  || (exp >= 1023 - 126 && exp <= 1023 + 127))
   7811     && (mantissa & 0x1FFFFFFFl) == 0;
   7812 }
   7813 
   7814 /* Returns a double precision value casted to single precision
   7815    (ignoring the least significant bits in exponent and mantissa).  */
   7816 
   7817 static int
   7818 double_to_single (bfd_int64_t v)
   7819 {
   7820   int sign = (int) ((v >> 63) & 1l);
   7821   int exp = (int) ((v >> 52) & 0x7FF);
   7822   bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
   7823 
   7824   if (exp == 0x7FF)
   7825     exp = 0xFF;
   7826   else
   7827     {
   7828       exp = exp - 1023 + 127;
   7829       if (exp >= 0xFF)
   7830 	{
   7831 	  /* Infinity.  */
   7832 	  exp = 0x7F;
   7833 	  mantissa = 0;
   7834 	}
   7835       else if (exp < 0)
   7836 	{
   7837 	  /* No denormalized numbers.  */
   7838 	  exp = 0;
   7839 	  mantissa = 0;
   7840 	}
   7841     }
   7842   mantissa >>= 29;
   7843   return (sign << 31) | (exp << 23) | mantissa;
   7844 }
   7845 #endif /* BFD_HOST_64_BIT */
   7846 
   7847 enum lit_type
   7848 {
   7849   CONST_THUMB,
   7850   CONST_ARM,
   7851   CONST_VEC
   7852 };
   7853 
   7854 static void do_vfp_nsyn_opcode (const char *);
   7855 
   7856 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
   7857    Determine whether it can be performed with a move instruction; if
   7858    it can, convert inst.instruction to that move instruction and
   7859    return TRUE; if it can't, convert inst.instruction to a literal-pool
   7860    load and return FALSE.  If this is not a valid thing to do in the
   7861    current context, set inst.error and return TRUE.
   7862 
   7863    inst.operands[i] describes the destination register.	 */
   7864 
   7865 static bfd_boolean
   7866 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
   7867 {
   7868   unsigned long tbit;
   7869   bfd_boolean thumb_p = (t == CONST_THUMB);
   7870   bfd_boolean arm_p   = (t == CONST_ARM);
   7871 
   7872   if (thumb_p)
   7873     tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
   7874   else
   7875     tbit = LOAD_BIT;
   7876 
   7877   if ((inst.instruction & tbit) == 0)
   7878     {
   7879       inst.error = _("invalid pseudo operation");
   7880       return TRUE;
   7881     }
   7882 
   7883   if (inst.reloc.exp.X_op != O_constant
   7884       && inst.reloc.exp.X_op != O_symbol
   7885       && inst.reloc.exp.X_op != O_big)
   7886     {
   7887       inst.error = _("constant expression expected");
   7888       return TRUE;
   7889     }
   7890 
   7891   if (inst.reloc.exp.X_op == O_constant
   7892       || inst.reloc.exp.X_op == O_big)
   7893     {
   7894 #if defined BFD_HOST_64_BIT
   7895       bfd_int64_t v;
   7896 #else
   7897       offsetT v;
   7898 #endif
   7899       if (inst.reloc.exp.X_op == O_big)
   7900 	{
   7901 	  LITTLENUM_TYPE w[X_PRECISION];
   7902 	  LITTLENUM_TYPE * l;
   7903 
   7904 	  if (inst.reloc.exp.X_add_number == -1)
   7905 	    {
   7906 	      gen_to_words (w, X_PRECISION, E_PRECISION);
   7907 	      l = w;
   7908 	      /* FIXME: Should we check words w[2..5] ?  */
   7909 	    }
   7910 	  else
   7911 	    l = generic_bignum;
   7912 
   7913 #if defined BFD_HOST_64_BIT
   7914 	  v =
   7915 	    ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
   7916 		  << LITTLENUM_NUMBER_OF_BITS)
   7917 		 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
   7918 		<< LITTLENUM_NUMBER_OF_BITS)
   7919 	       | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
   7920 	      << LITTLENUM_NUMBER_OF_BITS)
   7921 	     | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
   7922 #else
   7923 	  v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
   7924 	    |  (l[0] & LITTLENUM_MASK);
   7925 #endif
   7926 	}
   7927       else
   7928 	v = inst.reloc.exp.X_add_number;
   7929 
   7930       if (!inst.operands[i].issingle)
   7931 	{
   7932 	  if (thumb_p)
   7933 	    {
   7934 	      /* LDR should not use lead in a flag-setting instruction being
   7935 		 chosen so we do not check whether movs can be used.  */
   7936 
   7937 	      if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
   7938 		  || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
   7939 		  && inst.operands[i].reg != 13
   7940 		  && inst.operands[i].reg != 15)
   7941 		{
   7942 		  /* Check if on thumb2 it can be done with a mov.w, mvn or
   7943 		     movw instruction.  */
   7944 		  unsigned int newimm;
   7945 		  bfd_boolean isNegated;
   7946 
   7947 		  newimm = encode_thumb32_immediate (v);
   7948 		  if (newimm != (unsigned int) FAIL)
   7949 		    isNegated = FALSE;
   7950 		  else
   7951 		    {
   7952 		      newimm = encode_thumb32_immediate (~v);
   7953 		      if (newimm != (unsigned int) FAIL)
   7954 			isNegated = TRUE;
   7955 		    }
   7956 
   7957 		  /* The number can be loaded with a mov.w or mvn
   7958 		     instruction.  */
   7959 		  if (newimm != (unsigned int) FAIL
   7960 		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
   7961 		    {
   7962 		      inst.instruction = (0xf04f0000  /*  MOV.W.  */
   7963 					  | (inst.operands[i].reg << 8));
   7964 		      /* Change to MOVN.  */
   7965 		      inst.instruction |= (isNegated ? 0x200000 : 0);
   7966 		      inst.instruction |= (newimm & 0x800) << 15;
   7967 		      inst.instruction |= (newimm & 0x700) << 4;
   7968 		      inst.instruction |= (newimm & 0x0ff);
   7969 		      return TRUE;
   7970 		    }
   7971 		  /* The number can be loaded with a movw instruction.  */
   7972 		  else if ((v & ~0xFFFF) == 0
   7973 			   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
   7974 		    {
   7975 		      int imm = v & 0xFFFF;
   7976 
   7977 		      inst.instruction = 0xf2400000;  /* MOVW.  */
   7978 		      inst.instruction |= (inst.operands[i].reg << 8);
   7979 		      inst.instruction |= (imm & 0xf000) << 4;
   7980 		      inst.instruction |= (imm & 0x0800) << 15;
   7981 		      inst.instruction |= (imm & 0x0700) << 4;
   7982 		      inst.instruction |= (imm & 0x00ff);
   7983 		      return TRUE;
   7984 		    }
   7985 		}
   7986 	    }
   7987 	  else if (arm_p)
   7988 	    {
   7989 	      int value = encode_arm_immediate (v);
   7990 
   7991 	      if (value != FAIL)
   7992 		{
   7993 		  /* This can be done with a mov instruction.  */
   7994 		  inst.instruction &= LITERAL_MASK;
   7995 		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
   7996 		  inst.instruction |= value & 0xfff;
   7997 		  return TRUE;
   7998 		}
   7999 
   8000 	      value = encode_arm_immediate (~ v);
   8001 	      if (value != FAIL)
   8002 		{
   8003 		  /* This can be done with a mvn instruction.  */
   8004 		  inst.instruction &= LITERAL_MASK;
   8005 		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
   8006 		  inst.instruction |= value & 0xfff;
   8007 		  return TRUE;
   8008 		}
   8009 	    }
   8010 	  else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
   8011 	    {
   8012 	      int op = 0;
   8013 	      unsigned immbits = 0;
   8014 	      unsigned immlo = inst.operands[1].imm;
   8015 	      unsigned immhi = inst.operands[1].regisimm
   8016 		? inst.operands[1].reg
   8017 		: inst.reloc.exp.X_unsigned
   8018 		? 0
   8019 		: ((bfd_int64_t)((int) immlo)) >> 32;
   8020 	      int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
   8021 						   &op, 64, NT_invtype);
   8022 
   8023 	      if (cmode == FAIL)
   8024 		{
   8025 		  neon_invert_size (&immlo, &immhi, 64);
   8026 		  op = !op;
   8027 		  cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
   8028 						   &op, 64, NT_invtype);
   8029 		}
   8030 
   8031 	      if (cmode != FAIL)
   8032 		{
   8033 		  inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
   8034 		    | (1 << 23)
   8035 		    | (cmode << 8)
   8036 		    | (op << 5)
   8037 		    | (1 << 4);
   8038 
   8039 		  /* Fill other bits in vmov encoding for both thumb and arm.  */
   8040 		  if (thumb_mode)
   8041 		    inst.instruction |= (0x7U << 29) | (0xF << 24);
   8042 		  else
   8043 		    inst.instruction |= (0xFU << 28) | (0x1 << 25);
   8044 		  neon_write_immbits (immbits);
   8045 		  return TRUE;
   8046 		}
   8047 	    }
   8048 	}
   8049 
   8050       if (t == CONST_VEC)
   8051 	{
   8052 	  /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant.  */
   8053 	  if (inst.operands[i].issingle
   8054 	      && is_quarter_float (inst.operands[1].imm)
   8055 	      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
   8056 	    {
   8057 	      inst.operands[1].imm =
   8058 		neon_qfloat_bits (v);
   8059 	      do_vfp_nsyn_opcode ("fconsts");
   8060 	      return TRUE;
   8061 	    }
   8062 
   8063 	  /* If our host does not support a 64-bit type then we cannot perform
   8064 	     the following optimization.  This mean that there will be a
   8065 	     discrepancy between the output produced by an assembler built for
   8066 	     a 32-bit-only host and the output produced from a 64-bit host, but
   8067 	     this cannot be helped.  */
   8068 #if defined BFD_HOST_64_BIT
   8069 	  else if (!inst.operands[1].issingle
   8070 		   && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
   8071 	    {
   8072 	      if (is_double_a_single (v)
   8073 		  && is_quarter_float (double_to_single (v)))
   8074 		{
   8075 		  inst.operands[1].imm =
   8076 		    neon_qfloat_bits (double_to_single (v));
   8077 		  do_vfp_nsyn_opcode ("fconstd");
   8078 		  return TRUE;
   8079 		}
   8080 	    }
   8081 #endif
   8082 	}
   8083     }
   8084 
   8085   if (add_to_lit_pool ((!inst.operands[i].isvec
   8086 			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
   8087     return TRUE;
   8088 
   8089   inst.operands[1].reg = REG_PC;
   8090   inst.operands[1].isreg = 1;
   8091   inst.operands[1].preind = 1;
   8092   inst.reloc.pc_rel = 1;
   8093   inst.reloc.type = (thumb_p
   8094 		     ? BFD_RELOC_ARM_THUMB_OFFSET
   8095 		     : (mode_3
   8096 			? BFD_RELOC_ARM_HWLITERAL
   8097 			: BFD_RELOC_ARM_LITERAL));
   8098   return FALSE;
   8099 }
   8100 
   8101 /* inst.operands[i] was set up by parse_address.  Encode it into an
   8102    ARM-format instruction.  Reject all forms which cannot be encoded
   8103    into a coprocessor load/store instruction.  If wb_ok is false,
   8104    reject use of writeback; if unind_ok is false, reject use of
   8105    unindexed addressing.  If reloc_override is not 0, use it instead
   8106    of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
   8107    (in which case it is preserved).  */
   8108 
   8109 static int
   8110 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
   8111 {
   8112   if (!inst.operands[i].isreg)
   8113     {
   8114       /* PR 18256 */
   8115       if (! inst.operands[0].isvec)
   8116 	{
   8117 	  inst.error = _("invalid co-processor operand");
   8118 	  return FAIL;
   8119 	}
   8120       if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
   8121 	return SUCCESS;
   8122     }
   8123 
   8124   inst.instruction |= inst.operands[i].reg << 16;
   8125 
   8126   gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
   8127 
   8128   if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
   8129     {
   8130       gas_assert (!inst.operands[i].writeback);
   8131       if (!unind_ok)
   8132 	{
   8133 	  inst.error = _("instruction does not support unindexed addressing");
   8134 	  return FAIL;
   8135 	}
   8136       inst.instruction |= inst.operands[i].imm;
   8137       inst.instruction |= INDEX_UP;
   8138       return SUCCESS;
   8139     }
   8140 
   8141   if (inst.operands[i].preind)
   8142     inst.instruction |= PRE_INDEX;
   8143 
   8144   if (inst.operands[i].writeback)
   8145     {
   8146       if (inst.operands[i].reg == REG_PC)
   8147 	{
   8148 	  inst.error = _("pc may not be used with write-back");
   8149 	  return FAIL;
   8150 	}
   8151       if (!wb_ok)
   8152 	{
   8153 	  inst.error = _("instruction does not support writeback");
   8154 	  return FAIL;
   8155 	}
   8156       inst.instruction |= WRITE_BACK;
   8157     }
   8158 
   8159   if (reloc_override)
   8160     inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
   8161   else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
   8162 	    || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
   8163 	   && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
   8164     {
   8165       if (thumb_mode)
   8166 	inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
   8167       else
   8168 	inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
   8169     }
   8170 
   8171   /* Prefer + for zero encoded value.  */
   8172   if (!inst.operands[i].negative)
   8173     inst.instruction |= INDEX_UP;
   8174 
   8175   return SUCCESS;
   8176 }
   8177 
   8178 /* Functions for instruction encoding, sorted by sub-architecture.
   8179    First some generics; their names are taken from the conventional
   8180    bit positions for register arguments in ARM format instructions.  */
   8181 
   8182 static void
   8183 do_noargs (void)
   8184 {
   8185 }
   8186 
   8187 static void
   8188 do_rd (void)
   8189 {
   8190   inst.instruction |= inst.operands[0].reg << 12;
   8191 }
   8192 
   8193 static void
   8194 do_rn (void)
   8195 {
   8196   inst.instruction |= inst.operands[0].reg << 16;
   8197 }
   8198 
   8199 static void
   8200 do_rd_rm (void)
   8201 {
   8202   inst.instruction |= inst.operands[0].reg << 12;
   8203   inst.instruction |= inst.operands[1].reg;
   8204 }
   8205 
   8206 static void
   8207 do_rm_rn (void)
   8208 {
   8209   inst.instruction |= inst.operands[0].reg;
   8210   inst.instruction |= inst.operands[1].reg << 16;
   8211 }
   8212 
   8213 static void
   8214 do_rd_rn (void)
   8215 {
   8216   inst.instruction |= inst.operands[0].reg << 12;
   8217   inst.instruction |= inst.operands[1].reg << 16;
   8218 }
   8219 
   8220 static void
   8221 do_rn_rd (void)
   8222 {
   8223   inst.instruction |= inst.operands[0].reg << 16;
   8224   inst.instruction |= inst.operands[1].reg << 12;
   8225 }
   8226 
   8227 static void
   8228 do_tt (void)
   8229 {
   8230   inst.instruction |= inst.operands[0].reg << 8;
   8231   inst.instruction |= inst.operands[1].reg << 16;
   8232 }
   8233 
   8234 static bfd_boolean
   8235 check_obsolete (const arm_feature_set *feature, const char *msg)
   8236 {
   8237   if (ARM_CPU_IS_ANY (cpu_variant))
   8238     {
   8239       as_tsktsk ("%s", msg);
   8240       return TRUE;
   8241     }
   8242   else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
   8243     {
   8244       as_bad ("%s", msg);
   8245       return TRUE;
   8246     }
   8247 
   8248   return FALSE;
   8249 }
   8250 
   8251 static void
   8252 do_rd_rm_rn (void)
   8253 {
   8254   unsigned Rn = inst.operands[2].reg;
   8255   /* Enforce restrictions on SWP instruction.  */
   8256   if ((inst.instruction & 0x0fbfffff) == 0x01000090)
   8257     {
   8258       constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
   8259 		  _("Rn must not overlap other operands"));
   8260 
   8261       /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
   8262        */
   8263       if (!check_obsolete (&arm_ext_v8,
   8264 			   _("swp{b} use is obsoleted for ARMv8 and later"))
   8265 	  && warn_on_deprecated
   8266 	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
   8267 	as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
   8268     }
   8269 
   8270   inst.instruction |= inst.operands[0].reg << 12;
   8271   inst.instruction |= inst.operands[1].reg;
   8272   inst.instruction |= Rn << 16;
   8273 }
   8274 
   8275 static void
   8276 do_rd_rn_rm (void)
   8277 {
   8278   inst.instruction |= inst.operands[0].reg << 12;
   8279   inst.instruction |= inst.operands[1].reg << 16;
   8280   inst.instruction |= inst.operands[2].reg;
   8281 }
   8282 
   8283 static void
   8284 do_rm_rd_rn (void)
   8285 {
   8286   constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
   8287   constraint (((inst.reloc.exp.X_op != O_constant
   8288 		&& inst.reloc.exp.X_op != O_illegal)
   8289 	       || inst.reloc.exp.X_add_number != 0),
   8290 	      BAD_ADDR_MODE);
   8291   inst.instruction |= inst.operands[0].reg;
   8292   inst.instruction |= inst.operands[1].reg << 12;
   8293   inst.instruction |= inst.operands[2].reg << 16;
   8294 }
   8295 
   8296 static void
   8297 do_imm0 (void)
   8298 {
   8299   inst.instruction |= inst.operands[0].imm;
   8300 }
   8301 
   8302 static void
   8303 do_rd_cpaddr (void)
   8304 {
   8305   inst.instruction |= inst.operands[0].reg << 12;
   8306   encode_arm_cp_address (1, TRUE, TRUE, 0);
   8307 }
   8308 
   8309 /* ARM instructions, in alphabetical order by function name (except
   8310    that wrapper functions appear immediately after the function they
   8311    wrap).  */
   8312 
   8313 /* This is a pseudo-op of the form "adr rd, label" to be converted
   8314    into a relative address of the form "add rd, pc, #label-.-8".  */
   8315 
   8316 static void
   8317 do_adr (void)
   8318 {
   8319   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
   8320 
   8321   /* Frag hacking will turn this into a sub instruction if the offset turns
   8322      out to be negative.  */
   8323   inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   8324   inst.reloc.pc_rel = 1;
   8325   inst.reloc.exp.X_add_number -= 8;
   8326 }
   8327 
   8328 /* This is a pseudo-op of the form "adrl rd, label" to be converted
   8329    into a relative address of the form:
   8330    add rd, pc, #low(label-.-8)"
   8331    add rd, rd, #high(label-.-8)"  */
   8332 
   8333 static void
   8334 do_adrl (void)
   8335 {
   8336   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
   8337 
   8338   /* Frag hacking will turn this into a sub instruction if the offset turns
   8339      out to be negative.  */
   8340   inst.reloc.type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
   8341   inst.reloc.pc_rel	       = 1;
   8342   inst.size		       = INSN_SIZE * 2;
   8343   inst.reloc.exp.X_add_number -= 8;
   8344 }
   8345 
   8346 static void
   8347 do_arit (void)
   8348 {
   8349   constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   8350 	      && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
   8351 	      THUMB1_RELOC_ONLY);
   8352   if (!inst.operands[1].present)
   8353     inst.operands[1].reg = inst.operands[0].reg;
   8354   inst.instruction |= inst.operands[0].reg << 12;
   8355   inst.instruction |= inst.operands[1].reg << 16;
   8356   encode_arm_shifter_operand (2);
   8357 }
   8358 
   8359 static void
   8360 do_barrier (void)
   8361 {
   8362   if (inst.operands[0].present)
   8363     inst.instruction |= inst.operands[0].imm;
   8364   else
   8365     inst.instruction |= 0xf;
   8366 }
   8367 
   8368 static void
   8369 do_bfc (void)
   8370 {
   8371   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
   8372   constraint (msb > 32, _("bit-field extends past end of register"));
   8373   /* The instruction encoding stores the LSB and MSB,
   8374      not the LSB and width.  */
   8375   inst.instruction |= inst.operands[0].reg << 12;
   8376   inst.instruction |= inst.operands[1].imm << 7;
   8377   inst.instruction |= (msb - 1) << 16;
   8378 }
   8379 
   8380 static void
   8381 do_bfi (void)
   8382 {
   8383   unsigned int msb;
   8384 
   8385   /* #0 in second position is alternative syntax for bfc, which is
   8386      the same instruction but with REG_PC in the Rm field.  */
   8387   if (!inst.operands[1].isreg)
   8388     inst.operands[1].reg = REG_PC;
   8389 
   8390   msb = inst.operands[2].imm + inst.operands[3].imm;
   8391   constraint (msb > 32, _("bit-field extends past end of register"));
   8392   /* The instruction encoding stores the LSB and MSB,
   8393      not the LSB and width.  */
   8394   inst.instruction |= inst.operands[0].reg << 12;
   8395   inst.instruction |= inst.operands[1].reg;
   8396   inst.instruction |= inst.operands[2].imm << 7;
   8397   inst.instruction |= (msb - 1) << 16;
   8398 }
   8399 
   8400 static void
   8401 do_bfx (void)
   8402 {
   8403   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
   8404 	      _("bit-field extends past end of register"));
   8405   inst.instruction |= inst.operands[0].reg << 12;
   8406   inst.instruction |= inst.operands[1].reg;
   8407   inst.instruction |= inst.operands[2].imm << 7;
   8408   inst.instruction |= (inst.operands[3].imm - 1) << 16;
   8409 }
   8410 
   8411 /* ARM V5 breakpoint instruction (argument parse)
   8412      BKPT <16 bit unsigned immediate>
   8413      Instruction is not conditional.
   8414 	The bit pattern given in insns[] has the COND_ALWAYS condition,
   8415 	and it is an error if the caller tried to override that.  */
   8416 
   8417 static void
   8418 do_bkpt (void)
   8419 {
   8420   /* Top 12 of 16 bits to bits 19:8.  */
   8421   inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
   8422 
   8423   /* Bottom 4 of 16 bits to bits 3:0.  */
   8424   inst.instruction |= inst.operands[0].imm & 0xf;
   8425 }
   8426 
   8427 static void
   8428 encode_branch (int default_reloc)
   8429 {
   8430   if (inst.operands[0].hasreloc)
   8431     {
   8432       constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
   8433 		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
   8434 		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
   8435       inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
   8436 	? BFD_RELOC_ARM_PLT32
   8437 	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
   8438     }
   8439   else
   8440     inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
   8441   inst.reloc.pc_rel = 1;
   8442 }
   8443 
   8444 static void
   8445 do_branch (void)
   8446 {
   8447 #ifdef OBJ_ELF
   8448   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   8449     encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
   8450   else
   8451 #endif
   8452     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
   8453 }
   8454 
   8455 static void
   8456 do_bl (void)
   8457 {
   8458 #ifdef OBJ_ELF
   8459   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
   8460     {
   8461       if (inst.cond == COND_ALWAYS)
   8462 	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
   8463       else
   8464 	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
   8465     }
   8466   else
   8467 #endif
   8468     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
   8469 }
   8470 
   8471 /* ARM V5 branch-link-exchange instruction (argument parse)
   8472      BLX <target_addr>		ie BLX(1)
   8473      BLX{<condition>} <Rm>	ie BLX(2)
   8474    Unfortunately, there are two different opcodes for this mnemonic.
   8475    So, the insns[].value is not used, and the code here zaps values
   8476 	into inst.instruction.
   8477    Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
   8478 
   8479 static void
   8480 do_blx (void)
   8481 {
   8482   if (inst.operands[0].isreg)
   8483     {
   8484       /* Arg is a register; the opcode provided by insns[] is correct.
   8485 	 It is not illegal to do "blx pc", just useless.  */
   8486       if (inst.operands[0].reg == REG_PC)
   8487 	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
   8488 
   8489       inst.instruction |= inst.operands[0].reg;
   8490     }
   8491   else
   8492     {
   8493       /* Arg is an address; this instruction cannot be executed
   8494 	 conditionally, and the opcode must be adjusted.
   8495 	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
   8496 	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
   8497       constraint (inst.cond != COND_ALWAYS, BAD_COND);
   8498       inst.instruction = 0xfa000000;
   8499       encode_branch (BFD_RELOC_ARM_PCREL_BLX);
   8500     }
   8501 }
   8502 
   8503 static void
   8504 do_bx (void)
   8505 {
   8506   bfd_boolean want_reloc;
   8507 
   8508   if (inst.operands[0].reg == REG_PC)
   8509     as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
   8510 
   8511   inst.instruction |= inst.operands[0].reg;
   8512   /* Output R_ARM_V4BX relocations if is an EABI object that looks like
   8513      it is for ARMv4t or earlier.  */
   8514   want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
   8515   if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
   8516       want_reloc = TRUE;
   8517 
   8518 #ifdef OBJ_ELF
   8519   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
   8520 #endif
   8521     want_reloc = FALSE;
   8522 
   8523   if (want_reloc)
   8524     inst.reloc.type = BFD_RELOC_ARM_V4BX;
   8525 }
   8526 
   8527 
   8528 /* ARM v5TEJ.  Jump to Jazelle code.  */
   8529 
   8530 static void
   8531 do_bxj (void)
   8532 {
   8533   if (inst.operands[0].reg == REG_PC)
   8534     as_tsktsk (_("use of r15 in bxj is not really useful"));
   8535 
   8536   inst.instruction |= inst.operands[0].reg;
   8537 }
   8538 
   8539 /* Co-processor data operation:
   8540       CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
   8541       CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
   8542 static void
   8543 do_cdp (void)
   8544 {
   8545   inst.instruction |= inst.operands[0].reg << 8;
   8546   inst.instruction |= inst.operands[1].imm << 20;
   8547   inst.instruction |= inst.operands[2].reg << 12;
   8548   inst.instruction |= inst.operands[3].reg << 16;
   8549   inst.instruction |= inst.operands[4].reg;
   8550   inst.instruction |= inst.operands[5].imm << 5;
   8551 }
   8552 
   8553 static void
   8554 do_cmp (void)
   8555 {
   8556   inst.instruction |= inst.operands[0].reg << 16;
   8557   encode_arm_shifter_operand (1);
   8558 }
   8559 
   8560 /* Transfer between coprocessor and ARM registers.
   8561    MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
   8562    MRC2
   8563    MCR{cond}
   8564    MCR2
   8565 
   8566    No special properties.  */
   8567 
   8568 struct deprecated_coproc_regs_s
   8569 {
   8570   unsigned cp;
   8571   int opc1;
   8572   unsigned crn;
   8573   unsigned crm;
   8574   int opc2;
   8575   arm_feature_set deprecated;
   8576   arm_feature_set obsoleted;
   8577   const char *dep_msg;
   8578   const char *obs_msg;
   8579 };
   8580 
   8581 #define DEPR_ACCESS_V8 \
   8582   N_("This coprocessor register access is deprecated in ARMv8")
   8583 
   8584 /* Table of all deprecated coprocessor registers.  */
   8585 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
   8586 {
   8587     {15, 0, 7, 10, 5,					/* CP15DMB.  */
   8588      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8589      DEPR_ACCESS_V8, NULL},
   8590     {15, 0, 7, 10, 4,					/* CP15DSB.  */
   8591      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8592      DEPR_ACCESS_V8, NULL},
   8593     {15, 0, 7,  5, 4,					/* CP15ISB.  */
   8594      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8595      DEPR_ACCESS_V8, NULL},
   8596     {14, 6, 1,  0, 0,					/* TEEHBR.  */
   8597      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8598      DEPR_ACCESS_V8, NULL},
   8599     {14, 6, 0,  0, 0,					/* TEECR.  */
   8600      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
   8601      DEPR_ACCESS_V8, NULL},
   8602 };
   8603 
   8604 #undef DEPR_ACCESS_V8
   8605 
   8606 static const size_t deprecated_coproc_reg_count =
   8607   sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
   8608 
   8609 static void
   8610 do_co_reg (void)
   8611 {
   8612   unsigned Rd;
   8613   size_t i;
   8614 
   8615   Rd = inst.operands[2].reg;
   8616   if (thumb_mode)
   8617     {
   8618       if (inst.instruction == 0xee000010
   8619 	  || inst.instruction == 0xfe000010)
   8620 	/* MCR, MCR2  */
   8621 	reject_bad_reg (Rd);
   8622       else
   8623 	/* MRC, MRC2  */
   8624 	constraint (Rd == REG_SP, BAD_SP);
   8625     }
   8626   else
   8627     {
   8628       /* MCR */
   8629       if (inst.instruction == 0xe000010)
   8630 	constraint (Rd == REG_PC, BAD_PC);
   8631     }
   8632 
   8633     for (i = 0; i < deprecated_coproc_reg_count; ++i)
   8634       {
   8635 	const struct deprecated_coproc_regs_s *r =
   8636 	  deprecated_coproc_regs + i;
   8637 
   8638 	if (inst.operands[0].reg == r->cp
   8639 	    && inst.operands[1].imm == r->opc1
   8640 	    && inst.operands[3].reg == r->crn
   8641 	    && inst.operands[4].reg == r->crm
   8642 	    && inst.operands[5].imm == r->opc2)
   8643 	  {
   8644 	    if (! ARM_CPU_IS_ANY (cpu_variant)
   8645 		&& warn_on_deprecated
   8646 		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
   8647 	      as_tsktsk ("%s", r->dep_msg);
   8648 	  }
   8649       }
   8650 
   8651   inst.instruction |= inst.operands[0].reg << 8;
   8652   inst.instruction |= inst.operands[1].imm << 21;
   8653   inst.instruction |= Rd << 12;
   8654   inst.instruction |= inst.operands[3].reg << 16;
   8655   inst.instruction |= inst.operands[4].reg;
   8656   inst.instruction |= inst.operands[5].imm << 5;
   8657 }
   8658 
   8659 /* Transfer between coprocessor register and pair of ARM registers.
   8660    MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
   8661    MCRR2
   8662    MRRC{cond}
   8663    MRRC2
   8664 
   8665    Two XScale instructions are special cases of these:
   8666 
   8667      MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
   8668      MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
   8669 
   8670    Result unpredictable if Rd or Rn is R15.  */
   8671 
   8672 static void
   8673 do_co_reg2c (void)
   8674 {
   8675   unsigned Rd, Rn;
   8676 
   8677   Rd = inst.operands[2].reg;
   8678   Rn = inst.operands[3].reg;
   8679 
   8680   if (thumb_mode)
   8681     {
   8682       reject_bad_reg (Rd);
   8683       reject_bad_reg (Rn);
   8684     }
   8685   else
   8686     {
   8687       constraint (Rd == REG_PC, BAD_PC);
   8688       constraint (Rn == REG_PC, BAD_PC);
   8689     }
   8690 
   8691   inst.instruction |= inst.operands[0].reg << 8;
   8692   inst.instruction |= inst.operands[1].imm << 4;
   8693   inst.instruction |= Rd << 12;
   8694   inst.instruction |= Rn << 16;
   8695   inst.instruction |= inst.operands[4].reg;
   8696 }
   8697 
   8698 static void
   8699 do_cpsi (void)
   8700 {
   8701   inst.instruction |= inst.operands[0].imm << 6;
   8702   if (inst.operands[1].present)
   8703     {
   8704       inst.instruction |= CPSI_MMOD;
   8705       inst.instruction |= inst.operands[1].imm;
   8706     }
   8707 }
   8708 
   8709 static void
   8710 do_dbg (void)
   8711 {
   8712   inst.instruction |= inst.operands[0].imm;
   8713 }
   8714 
   8715 static void
   8716 do_div (void)
   8717 {
   8718   unsigned Rd, Rn, Rm;
   8719 
   8720   Rd = inst.operands[0].reg;
   8721   Rn = (inst.operands[1].present
   8722 	? inst.operands[1].reg : Rd);
   8723   Rm = inst.operands[2].reg;
   8724 
   8725   constraint ((Rd == REG_PC), BAD_PC);
   8726   constraint ((Rn == REG_PC), BAD_PC);
   8727   constraint ((Rm == REG_PC), BAD_PC);
   8728 
   8729   inst.instruction |= Rd << 16;
   8730   inst.instruction |= Rn << 0;
   8731   inst.instruction |= Rm << 8;
   8732 }
   8733 
   8734 static void
   8735 do_it (void)
   8736 {
   8737   /* There is no IT instruction in ARM mode.  We
   8738      process it to do the validation as if in
   8739      thumb mode, just in case the code gets
   8740      assembled for thumb using the unified syntax.  */
   8741 
   8742   inst.size = 0;
   8743   if (unified_syntax)
   8744     {
   8745       set_it_insn_type (IT_INSN);
   8746       now_it.mask = (inst.instruction & 0xf) | 0x10;
   8747       now_it.cc = inst.operands[0].imm;
   8748     }
   8749 }
   8750 
   8751 /* If there is only one register in the register list,
   8752    then return its register number.  Otherwise return -1.  */
   8753 static int
   8754 only_one_reg_in_list (int range)
   8755 {
   8756   int i = ffs (range) - 1;
   8757   return (i > 15 || range != (1 << i)) ? -1 : i;
   8758 }
   8759 
   8760 static void
   8761 encode_ldmstm(int from_push_pop_mnem)
   8762 {
   8763   int base_reg = inst.operands[0].reg;
   8764   int range = inst.operands[1].imm;
   8765   int one_reg;
   8766 
   8767   inst.instruction |= base_reg << 16;
   8768   inst.instruction |= range;
   8769 
   8770   if (inst.operands[1].writeback)
   8771     inst.instruction |= LDM_TYPE_2_OR_3;
   8772 
   8773   if (inst.operands[0].writeback)
   8774     {
   8775       inst.instruction |= WRITE_BACK;
   8776       /* Check for unpredictable uses of writeback.  */
   8777       if (inst.instruction & LOAD_BIT)
   8778 	{
   8779 	  /* Not allowed in LDM type 2.	 */
   8780 	  if ((inst.instruction & LDM_TYPE_2_OR_3)
   8781 	      && ((range & (1 << REG_PC)) == 0))
   8782 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
   8783 	  /* Only allowed if base reg not in list for other types.  */
   8784 	  else if (range & (1 << base_reg))
   8785 	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
   8786 	}
   8787       else /* STM.  */
   8788 	{
   8789 	  /* Not allowed for type 2.  */
   8790 	  if (inst.instruction & LDM_TYPE_2_OR_3)
   8791 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
   8792 	  /* Only allowed if base reg not in list, or first in list.  */
   8793 	  else if ((range & (1 << base_reg))
   8794 		   && (range & ((1 << base_reg) - 1)))
   8795 	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
   8796 	}
   8797     }
   8798 
   8799   /* If PUSH/POP has only one register, then use the A2 encoding.  */
   8800   one_reg = only_one_reg_in_list (range);
   8801   if (from_push_pop_mnem && one_reg >= 0)
   8802     {
   8803       int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
   8804 
   8805       inst.instruction &= A_COND_MASK;
   8806       inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
   8807       inst.instruction |= one_reg << 12;
   8808     }
   8809 }
   8810 
   8811 static void
   8812 do_ldmstm (void)
   8813 {
   8814   encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
   8815 }
   8816 
   8817 /* ARMv5TE load-consecutive (argument parse)
   8818    Mode is like LDRH.
   8819 
   8820      LDRccD R, mode
   8821      STRccD R, mode.  */
   8822 
   8823 static void
   8824 do_ldrd (void)
   8825 {
   8826   constraint (inst.operands[0].reg % 2 != 0,
   8827 	      _("first transfer register must be even"));
   8828   constraint (inst.operands[1].present
   8829 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
   8830 	      _("can only transfer two consecutive registers"));
   8831   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
   8832   constraint (!inst.operands[2].isreg, _("'[' expected"));
   8833 
   8834   if (!inst.operands[1].present)
   8835     inst.operands[1].reg = inst.operands[0].reg + 1;
   8836 
   8837   /* encode_arm_addr_mode_3 will diagnose overlap between the base
   8838      register and the first register written; we have to diagnose
   8839      overlap between the base and the second register written here.  */
   8840 
   8841   if (inst.operands[2].reg == inst.operands[1].reg
   8842       && (inst.operands[2].writeback || inst.operands[2].postind))
   8843     as_warn (_("base register written back, and overlaps "
   8844 	       "second transfer register"));
   8845 
   8846   if (!(inst.instruction & V4_STR_BIT))
   8847     {
   8848       /* For an index-register load, the index register must not overlap the
   8849 	destination (even if not write-back).  */
   8850       if (inst.operands[2].immisreg
   8851 	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
   8852 	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
   8853 	as_warn (_("index register overlaps transfer register"));
   8854     }
   8855   inst.instruction |= inst.operands[0].reg << 12;
   8856   encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
   8857 }
   8858 
   8859 static void
   8860 do_ldrex (void)
   8861 {
   8862   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
   8863 	      || inst.operands[1].postind || inst.operands[1].writeback
   8864 	      || inst.operands[1].immisreg || inst.operands[1].shifted
   8865 	      || inst.operands[1].negative
   8866 	      /* This can arise if the programmer has written
   8867 		   strex rN, rM, foo
   8868 		 or if they have mistakenly used a register name as the last
   8869 		 operand,  eg:
   8870 		   strex rN, rM, rX
   8871 		 It is very difficult to distinguish between these two cases
   8872 		 because "rX" might actually be a label. ie the register
   8873 		 name has been occluded by a symbol of the same name. So we
   8874 		 just generate a general 'bad addressing mode' type error
   8875 		 message and leave it up to the programmer to discover the
   8876 		 true cause and fix their mistake.  */
   8877 	      || (inst.operands[1].reg == REG_PC),
   8878 	      BAD_ADDR_MODE);
   8879 
   8880   constraint (inst.reloc.exp.X_op != O_constant
   8881 	      || inst.reloc.exp.X_add_number != 0,
   8882 	      _("offset must be zero in ARM encoding"));
   8883 
   8884   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
   8885 
   8886   inst.instruction |= inst.operands[0].reg << 12;
   8887   inst.instruction |= inst.operands[1].reg << 16;
   8888   inst.reloc.type = BFD_RELOC_UNUSED;
   8889 }
   8890 
   8891 static void
   8892 do_ldrexd (void)
   8893 {
   8894   constraint (inst.operands[0].reg % 2 != 0,
   8895 	      _("even register required"));
   8896   constraint (inst.operands[1].present
   8897 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
   8898 	      _("can only load two consecutive registers"));
   8899   /* If op 1 were present and equal to PC, this function wouldn't
   8900      have been called in the first place.  */
   8901   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
   8902 
   8903   inst.instruction |= inst.operands[0].reg << 12;
   8904   inst.instruction |= inst.operands[2].reg << 16;
   8905 }
   8906 
   8907 /* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
   8908    which is not a multiple of four is UNPREDICTABLE.  */
   8909 static void
   8910 check_ldr_r15_aligned (void)
   8911 {
   8912   constraint (!(inst.operands[1].immisreg)
   8913 	      && (inst.operands[0].reg == REG_PC
   8914 	      && inst.operands[1].reg == REG_PC
   8915 	      && (inst.reloc.exp.X_add_number & 0x3)),
   8916 	      _("ldr to register 15 must be 4-byte alligned"));
   8917 }
   8918 
   8919 static void
   8920 do_ldst (void)
   8921 {
   8922   inst.instruction |= inst.operands[0].reg << 12;
   8923   if (!inst.operands[1].isreg)
   8924     if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
   8925       return;
   8926   encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
   8927   check_ldr_r15_aligned ();
   8928 }
   8929 
   8930 static void
   8931 do_ldstt (void)
   8932 {
   8933   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
   8934      reject [Rn,...].  */
   8935   if (inst.operands[1].preind)
   8936     {
   8937       constraint (inst.reloc.exp.X_op != O_constant
   8938 		  || inst.reloc.exp.X_add_number != 0,
   8939 		  _("this instruction requires a post-indexed address"));
   8940 
   8941       inst.operands[1].preind = 0;
   8942       inst.operands[1].postind = 1;
   8943       inst.operands[1].writeback = 1;
   8944     }
   8945   inst.instruction |= inst.operands[0].reg << 12;
   8946   encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
   8947 }
   8948 
   8949 /* Halfword and signed-byte load/store operations.  */
   8950 
   8951 static void
   8952 do_ldstv4 (void)
   8953 {
   8954   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   8955   inst.instruction |= inst.operands[0].reg << 12;
   8956   if (!inst.operands[1].isreg)
   8957     if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
   8958       return;
   8959   encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
   8960 }
   8961 
   8962 static void
   8963 do_ldsttv4 (void)
   8964 {
   8965   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
   8966      reject [Rn,...].  */
   8967   if (inst.operands[1].preind)
   8968     {
   8969       constraint (inst.reloc.exp.X_op != O_constant
   8970 		  || inst.reloc.exp.X_add_number != 0,
   8971 		  _("this instruction requires a post-indexed address"));
   8972 
   8973       inst.operands[1].preind = 0;
   8974       inst.operands[1].postind = 1;
   8975       inst.operands[1].writeback = 1;
   8976     }
   8977   inst.instruction |= inst.operands[0].reg << 12;
   8978   encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
   8979 }
   8980 
   8981 /* Co-processor register load/store.
   8982    Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
   8983 static void
   8984 do_lstc (void)
   8985 {
   8986   inst.instruction |= inst.operands[0].reg << 8;
   8987   inst.instruction |= inst.operands[1].reg << 12;
   8988   encode_arm_cp_address (2, TRUE, TRUE, 0);
   8989 }
   8990 
   8991 static void
   8992 do_mlas (void)
   8993 {
   8994   /* This restriction does not apply to mls (nor to mla in v6 or later).  */
   8995   if (inst.operands[0].reg == inst.operands[1].reg
   8996       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
   8997       && !(inst.instruction & 0x00400000))
   8998     as_tsktsk (_("Rd and Rm should be different in mla"));
   8999 
   9000   inst.instruction |= inst.operands[0].reg << 16;
   9001   inst.instruction |= inst.operands[1].reg;
   9002   inst.instruction |= inst.operands[2].reg << 8;
   9003   inst.instruction |= inst.operands[3].reg << 12;
   9004 }
   9005 
   9006 static void
   9007 do_mov (void)
   9008 {
   9009   constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   9010 	      && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
   9011 	      THUMB1_RELOC_ONLY);
   9012   inst.instruction |= inst.operands[0].reg << 12;
   9013   encode_arm_shifter_operand (1);
   9014 }
   9015 
   9016 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
   9017 static void
   9018 do_mov16 (void)
   9019 {
   9020   bfd_vma imm;
   9021   bfd_boolean top;
   9022 
   9023   top = (inst.instruction & 0x00400000) != 0;
   9024   constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
   9025 	      _(":lower16: not allowed this instruction"));
   9026   constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
   9027 	      _(":upper16: not allowed instruction"));
   9028   inst.instruction |= inst.operands[0].reg << 12;
   9029   if (inst.reloc.type == BFD_RELOC_UNUSED)
   9030     {
   9031       imm = inst.reloc.exp.X_add_number;
   9032       /* The value is in two pieces: 0:11, 16:19.  */
   9033       inst.instruction |= (imm & 0x00000fff);
   9034       inst.instruction |= (imm & 0x0000f000) << 4;
   9035     }
   9036 }
   9037 
   9038 static int
   9039 do_vfp_nsyn_mrs (void)
   9040 {
   9041   if (inst.operands[0].isvec)
   9042     {
   9043       if (inst.operands[1].reg != 1)
   9044 	first_error (_("operand 1 must be FPSCR"));
   9045       memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
   9046       memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
   9047       do_vfp_nsyn_opcode ("fmstat");
   9048     }
   9049   else if (inst.operands[1].isvec)
   9050     do_vfp_nsyn_opcode ("fmrx");
   9051   else
   9052     return FAIL;
   9053 
   9054   return SUCCESS;
   9055 }
   9056 
   9057 static int
   9058 do_vfp_nsyn_msr (void)
   9059 {
   9060   if (inst.operands[0].isvec)
   9061     do_vfp_nsyn_opcode ("fmxr");
   9062   else
   9063     return FAIL;
   9064 
   9065   return SUCCESS;
   9066 }
   9067 
   9068 static void
   9069 do_vmrs (void)
   9070 {
   9071   unsigned Rt = inst.operands[0].reg;
   9072 
   9073   if (thumb_mode && Rt == REG_SP)
   9074     {
   9075       inst.error = BAD_SP;
   9076       return;
   9077     }
   9078 
   9079   /* APSR_ sets isvec. All other refs to PC are illegal.  */
   9080   if (!inst.operands[0].isvec && Rt == REG_PC)
   9081     {
   9082       inst.error = BAD_PC;
   9083       return;
   9084     }
   9085 
   9086   /* If we get through parsing the register name, we just insert the number
   9087      generated into the instruction without further validation.  */
   9088   inst.instruction |= (inst.operands[1].reg << 16);
   9089   inst.instruction |= (Rt << 12);
   9090 }
   9091 
   9092 static void
   9093 do_vmsr (void)
   9094 {
   9095   unsigned Rt = inst.operands[1].reg;
   9096 
   9097   if (thumb_mode)
   9098     reject_bad_reg (Rt);
   9099   else if (Rt == REG_PC)
   9100     {
   9101       inst.error = BAD_PC;
   9102       return;
   9103     }
   9104 
   9105   /* If we get through parsing the register name, we just insert the number
   9106      generated into the instruction without further validation.  */
   9107   inst.instruction |= (inst.operands[0].reg << 16);
   9108   inst.instruction |= (Rt << 12);
   9109 }
   9110 
   9111 static void
   9112 do_mrs (void)
   9113 {
   9114   unsigned br;
   9115 
   9116   if (do_vfp_nsyn_mrs () == SUCCESS)
   9117     return;
   9118 
   9119   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   9120   inst.instruction |= inst.operands[0].reg << 12;
   9121 
   9122   if (inst.operands[1].isreg)
   9123     {
   9124       br = inst.operands[1].reg;
   9125       if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
   9126 	as_bad (_("bad register for mrs"));
   9127     }
   9128   else
   9129     {
   9130       /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
   9131       constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
   9132 		  != (PSR_c|PSR_f),
   9133 		  _("'APSR', 'CPSR' or 'SPSR' expected"));
   9134       br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
   9135     }
   9136 
   9137   inst.instruction |= br;
   9138 }
   9139 
   9140 /* Two possible forms:
   9141       "{C|S}PSR_<field>, Rm",
   9142       "{C|S}PSR_f, #expression".  */
   9143 
   9144 static void
   9145 do_msr (void)
   9146 {
   9147   if (do_vfp_nsyn_msr () == SUCCESS)
   9148     return;
   9149 
   9150   inst.instruction |= inst.operands[0].imm;
   9151   if (inst.operands[1].isreg)
   9152     inst.instruction |= inst.operands[1].reg;
   9153   else
   9154     {
   9155       inst.instruction |= INST_IMMEDIATE;
   9156       inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
   9157       inst.reloc.pc_rel = 0;
   9158     }
   9159 }
   9160 
   9161 static void
   9162 do_mul (void)
   9163 {
   9164   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
   9165 
   9166   if (!inst.operands[2].present)
   9167     inst.operands[2].reg = inst.operands[0].reg;
   9168   inst.instruction |= inst.operands[0].reg << 16;
   9169   inst.instruction |= inst.operands[1].reg;
   9170   inst.instruction |= inst.operands[2].reg << 8;
   9171 
   9172   if (inst.operands[0].reg == inst.operands[1].reg
   9173       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
   9174     as_tsktsk (_("Rd and Rm should be different in mul"));
   9175 }
   9176 
   9177 /* Long Multiply Parser
   9178    UMULL RdLo, RdHi, Rm, Rs
   9179    SMULL RdLo, RdHi, Rm, Rs
   9180    UMLAL RdLo, RdHi, Rm, Rs
   9181    SMLAL RdLo, RdHi, Rm, Rs.  */
   9182 
   9183 static void
   9184 do_mull (void)
   9185 {
   9186   inst.instruction |= inst.operands[0].reg << 12;
   9187   inst.instruction |= inst.operands[1].reg << 16;
   9188   inst.instruction |= inst.operands[2].reg;
   9189   inst.instruction |= inst.operands[3].reg << 8;
   9190 
   9191   /* rdhi and rdlo must be different.  */
   9192   if (inst.operands[0].reg == inst.operands[1].reg)
   9193     as_tsktsk (_("rdhi and rdlo must be different"));
   9194 
   9195   /* rdhi, rdlo and rm must all be different before armv6.  */
   9196   if ((inst.operands[0].reg == inst.operands[2].reg
   9197       || inst.operands[1].reg == inst.operands[2].reg)
   9198       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
   9199     as_tsktsk (_("rdhi, rdlo and rm must all be different"));
   9200 }
   9201 
   9202 static void
   9203 do_nop (void)
   9204 {
   9205   if (inst.operands[0].present
   9206       || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
   9207     {
   9208       /* Architectural NOP hints are CPSR sets with no bits selected.  */
   9209       inst.instruction &= 0xf0000000;
   9210       inst.instruction |= 0x0320f000;
   9211       if (inst.operands[0].present)
   9212 	inst.instruction |= inst.operands[0].imm;
   9213     }
   9214 }
   9215 
   9216 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
   9217    PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
   9218    Condition defaults to COND_ALWAYS.
   9219    Error if Rd, Rn or Rm are R15.  */
   9220 
   9221 static void
   9222 do_pkhbt (void)
   9223 {
   9224   inst.instruction |= inst.operands[0].reg << 12;
   9225   inst.instruction |= inst.operands[1].reg << 16;
   9226   inst.instruction |= inst.operands[2].reg;
   9227   if (inst.operands[3].present)
   9228     encode_arm_shift (3);
   9229 }
   9230 
   9231 /* ARM V6 PKHTB (Argument Parse).  */
   9232 
   9233 static void
   9234 do_pkhtb (void)
   9235 {
   9236   if (!inst.operands[3].present)
   9237     {
   9238       /* If the shift specifier is omitted, turn the instruction
   9239 	 into pkhbt rd, rm, rn. */
   9240       inst.instruction &= 0xfff00010;
   9241       inst.instruction |= inst.operands[0].reg << 12;
   9242       inst.instruction |= inst.operands[1].reg;
   9243       inst.instruction |= inst.operands[2].reg << 16;
   9244     }
   9245   else
   9246     {
   9247       inst.instruction |= inst.operands[0].reg << 12;
   9248       inst.instruction |= inst.operands[1].reg << 16;
   9249       inst.instruction |= inst.operands[2].reg;
   9250       encode_arm_shift (3);
   9251     }
   9252 }
   9253 
   9254 /* ARMv5TE: Preload-Cache
   9255    MP Extensions: Preload for write
   9256 
   9257     PLD(W) <addr_mode>
   9258 
   9259   Syntactically, like LDR with B=1, W=0, L=1.  */
   9260 
   9261 static void
   9262 do_pld (void)
   9263 {
   9264   constraint (!inst.operands[0].isreg,
   9265 	      _("'[' expected after PLD mnemonic"));
   9266   constraint (inst.operands[0].postind,
   9267 	      _("post-indexed expression used in preload instruction"));
   9268   constraint (inst.operands[0].writeback,
   9269 	      _("writeback used in preload instruction"));
   9270   constraint (!inst.operands[0].preind,
   9271 	      _("unindexed addressing used in preload instruction"));
   9272   encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
   9273 }
   9274 
   9275 /* ARMv7: PLI <addr_mode>  */
   9276 static void
   9277 do_pli (void)
   9278 {
   9279   constraint (!inst.operands[0].isreg,
   9280 	      _("'[' expected after PLI mnemonic"));
   9281   constraint (inst.operands[0].postind,
   9282 	      _("post-indexed expression used in preload instruction"));
   9283   constraint (inst.operands[0].writeback,
   9284 	      _("writeback used in preload instruction"));
   9285   constraint (!inst.operands[0].preind,
   9286 	      _("unindexed addressing used in preload instruction"));
   9287   encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
   9288   inst.instruction &= ~PRE_INDEX;
   9289 }
   9290 
   9291 static void
   9292 do_push_pop (void)
   9293 {
   9294   constraint (inst.operands[0].writeback,
   9295 	      _("push/pop do not support {reglist}^"));
   9296   inst.operands[1] = inst.operands[0];
   9297   memset (&inst.operands[0], 0, sizeof inst.operands[0]);
   9298   inst.operands[0].isreg = 1;
   9299   inst.operands[0].writeback = 1;
   9300   inst.operands[0].reg = REG_SP;
   9301   encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
   9302 }
   9303 
   9304 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
   9305    word at the specified address and the following word
   9306    respectively.
   9307    Unconditionally executed.
   9308    Error if Rn is R15.	*/
   9309 
   9310 static void
   9311 do_rfe (void)
   9312 {
   9313   inst.instruction |= inst.operands[0].reg << 16;
   9314   if (inst.operands[0].writeback)
   9315     inst.instruction |= WRITE_BACK;
   9316 }
   9317 
   9318 /* ARM V6 ssat (argument parse).  */
   9319 
   9320 static void
   9321 do_ssat (void)
   9322 {
   9323   inst.instruction |= inst.operands[0].reg << 12;
   9324   inst.instruction |= (inst.operands[1].imm - 1) << 16;
   9325   inst.instruction |= inst.operands[2].reg;
   9326 
   9327   if (inst.operands[3].present)
   9328     encode_arm_shift (3);
   9329 }
   9330 
   9331 /* ARM V6 usat (argument parse).  */
   9332 
   9333 static void
   9334 do_usat (void)
   9335 {
   9336   inst.instruction |= inst.operands[0].reg << 12;
   9337   inst.instruction |= inst.operands[1].imm << 16;
   9338   inst.instruction |= inst.operands[2].reg;
   9339 
   9340   if (inst.operands[3].present)
   9341     encode_arm_shift (3);
   9342 }
   9343 
   9344 /* ARM V6 ssat16 (argument parse).  */
   9345 
   9346 static void
   9347 do_ssat16 (void)
   9348 {
   9349   inst.instruction |= inst.operands[0].reg << 12;
   9350   inst.instruction |= ((inst.operands[1].imm - 1) << 16);
   9351   inst.instruction |= inst.operands[2].reg;
   9352 }
   9353 
   9354 static void
   9355 do_usat16 (void)
   9356 {
   9357   inst.instruction |= inst.operands[0].reg << 12;
   9358   inst.instruction |= inst.operands[1].imm << 16;
   9359   inst.instruction |= inst.operands[2].reg;
   9360 }
   9361 
   9362 /* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
   9363    preserving the other bits.
   9364 
   9365    setend <endian_specifier>, where <endian_specifier> is either
   9366    BE or LE.  */
   9367 
   9368 static void
   9369 do_setend (void)
   9370 {
   9371   if (warn_on_deprecated
   9372       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
   9373       as_tsktsk (_("setend use is deprecated for ARMv8"));
   9374 
   9375   if (inst.operands[0].imm)
   9376     inst.instruction |= 0x200;
   9377 }
   9378 
   9379 static void
   9380 do_shift (void)
   9381 {
   9382   unsigned int Rm = (inst.operands[1].present
   9383 		     ? inst.operands[1].reg
   9384 		     : inst.operands[0].reg);
   9385 
   9386   inst.instruction |= inst.operands[0].reg << 12;
   9387   inst.instruction |= Rm;
   9388   if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
   9389     {
   9390       inst.instruction |= inst.operands[2].reg << 8;
   9391       inst.instruction |= SHIFT_BY_REG;
   9392       /* PR 12854: Error on extraneous shifts.  */
   9393       constraint (inst.operands[2].shifted,
   9394 		  _("extraneous shift as part of operand to shift insn"));
   9395     }
   9396   else
   9397     inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
   9398 }
   9399 
   9400 static void
   9401 do_smc (void)
   9402 {
   9403   inst.reloc.type = BFD_RELOC_ARM_SMC;
   9404   inst.reloc.pc_rel = 0;
   9405 }
   9406 
   9407 static void
   9408 do_hvc (void)
   9409 {
   9410   inst.reloc.type = BFD_RELOC_ARM_HVC;
   9411   inst.reloc.pc_rel = 0;
   9412 }
   9413 
   9414 static void
   9415 do_swi (void)
   9416 {
   9417   inst.reloc.type = BFD_RELOC_ARM_SWI;
   9418   inst.reloc.pc_rel = 0;
   9419 }
   9420 
   9421 static void
   9422 do_setpan (void)
   9423 {
   9424   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
   9425 	      _("selected processor does not support SETPAN instruction"));
   9426 
   9427   inst.instruction |= ((inst.operands[0].imm & 1) << 9);
   9428 }
   9429 
   9430 static void
   9431 do_t_setpan (void)
   9432 {
   9433   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
   9434 	      _("selected processor does not support SETPAN instruction"));
   9435 
   9436   inst.instruction |= (inst.operands[0].imm << 3);
   9437 }
   9438 
   9439 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
   9440    SMLAxy{cond} Rd,Rm,Rs,Rn
   9441    SMLAWy{cond} Rd,Rm,Rs,Rn
   9442    Error if any register is R15.  */
   9443 
   9444 static void
   9445 do_smla (void)
   9446 {
   9447   inst.instruction |= inst.operands[0].reg << 16;
   9448   inst.instruction |= inst.operands[1].reg;
   9449   inst.instruction |= inst.operands[2].reg << 8;
   9450   inst.instruction |= inst.operands[3].reg << 12;
   9451 }
   9452 
   9453 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
   9454    SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
   9455    Error if any register is R15.
   9456    Warning if Rdlo == Rdhi.  */
   9457 
   9458 static void
   9459 do_smlal (void)
   9460 {
   9461   inst.instruction |= inst.operands[0].reg << 12;
   9462   inst.instruction |= inst.operands[1].reg << 16;
   9463   inst.instruction |= inst.operands[2].reg;
   9464   inst.instruction |= inst.operands[3].reg << 8;
   9465 
   9466   if (inst.operands[0].reg == inst.operands[1].reg)
   9467     as_tsktsk (_("rdhi and rdlo must be different"));
   9468 }
   9469 
   9470 /* ARM V5E (El Segundo) signed-multiply (argument parse)
   9471    SMULxy{cond} Rd,Rm,Rs
   9472    Error if any register is R15.  */
   9473 
   9474 static void
   9475 do_smul (void)
   9476 {
   9477   inst.instruction |= inst.operands[0].reg << 16;
   9478   inst.instruction |= inst.operands[1].reg;
   9479   inst.instruction |= inst.operands[2].reg << 8;
   9480 }
   9481 
   9482 /* ARM V6 srs (argument parse).  The variable fields in the encoding are
   9483    the same for both ARM and Thumb-2.  */
   9484 
   9485 static void
   9486 do_srs (void)
   9487 {
   9488   int reg;
   9489 
   9490   if (inst.operands[0].present)
   9491     {
   9492       reg = inst.operands[0].reg;
   9493       constraint (reg != REG_SP, _("SRS base register must be r13"));
   9494     }
   9495   else
   9496     reg = REG_SP;
   9497 
   9498   inst.instruction |= reg << 16;
   9499   inst.instruction |= inst.operands[1].imm;
   9500   if (inst.operands[0].writeback || inst.operands[1].writeback)
   9501     inst.instruction |= WRITE_BACK;
   9502 }
   9503 
   9504 /* ARM V6 strex (argument parse).  */
   9505 
   9506 static void
   9507 do_strex (void)
   9508 {
   9509   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   9510 	      || inst.operands[2].postind || inst.operands[2].writeback
   9511 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   9512 	      || inst.operands[2].negative
   9513 	      /* See comment in do_ldrex().  */
   9514 	      || (inst.operands[2].reg == REG_PC),
   9515 	      BAD_ADDR_MODE);
   9516 
   9517   constraint (inst.operands[0].reg == inst.operands[1].reg
   9518 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9519 
   9520   constraint (inst.reloc.exp.X_op != O_constant
   9521 	      || inst.reloc.exp.X_add_number != 0,
   9522 	      _("offset must be zero in ARM encoding"));
   9523 
   9524   inst.instruction |= inst.operands[0].reg << 12;
   9525   inst.instruction |= inst.operands[1].reg;
   9526   inst.instruction |= inst.operands[2].reg << 16;
   9527   inst.reloc.type = BFD_RELOC_UNUSED;
   9528 }
   9529 
   9530 static void
   9531 do_t_strexbh (void)
   9532 {
   9533   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   9534 	      || inst.operands[2].postind || inst.operands[2].writeback
   9535 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   9536 	      || inst.operands[2].negative,
   9537 	      BAD_ADDR_MODE);
   9538 
   9539   constraint (inst.operands[0].reg == inst.operands[1].reg
   9540 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9541 
   9542   do_rm_rd_rn ();
   9543 }
   9544 
   9545 static void
   9546 do_strexd (void)
   9547 {
   9548   constraint (inst.operands[1].reg % 2 != 0,
   9549 	      _("even register required"));
   9550   constraint (inst.operands[2].present
   9551 	      && inst.operands[2].reg != inst.operands[1].reg + 1,
   9552 	      _("can only store two consecutive registers"));
   9553   /* If op 2 were present and equal to PC, this function wouldn't
   9554      have been called in the first place.  */
   9555   constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
   9556 
   9557   constraint (inst.operands[0].reg == inst.operands[1].reg
   9558 	      || inst.operands[0].reg == inst.operands[1].reg + 1
   9559 	      || inst.operands[0].reg == inst.operands[3].reg,
   9560 	      BAD_OVERLAP);
   9561 
   9562   inst.instruction |= inst.operands[0].reg << 12;
   9563   inst.instruction |= inst.operands[1].reg;
   9564   inst.instruction |= inst.operands[3].reg << 16;
   9565 }
   9566 
   9567 /* ARM V8 STRL.  */
   9568 static void
   9569 do_stlex (void)
   9570 {
   9571   constraint (inst.operands[0].reg == inst.operands[1].reg
   9572 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9573 
   9574   do_rd_rm_rn ();
   9575 }
   9576 
   9577 static void
   9578 do_t_stlex (void)
   9579 {
   9580   constraint (inst.operands[0].reg == inst.operands[1].reg
   9581 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
   9582 
   9583   do_rm_rd_rn ();
   9584 }
   9585 
   9586 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
   9587    extends it to 32-bits, and adds the result to a value in another
   9588    register.  You can specify a rotation by 0, 8, 16, or 24 bits
   9589    before extracting the 16-bit value.
   9590    SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
   9591    Condition defaults to COND_ALWAYS.
   9592    Error if any register uses R15.  */
   9593 
   9594 static void
   9595 do_sxtah (void)
   9596 {
   9597   inst.instruction |= inst.operands[0].reg << 12;
   9598   inst.instruction |= inst.operands[1].reg << 16;
   9599   inst.instruction |= inst.operands[2].reg;
   9600   inst.instruction |= inst.operands[3].imm << 10;
   9601 }
   9602 
   9603 /* ARM V6 SXTH.
   9604 
   9605    SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
   9606    Condition defaults to COND_ALWAYS.
   9607    Error if any register uses R15.  */
   9608 
   9609 static void
   9610 do_sxth (void)
   9611 {
   9612   inst.instruction |= inst.operands[0].reg << 12;
   9613   inst.instruction |= inst.operands[1].reg;
   9614   inst.instruction |= inst.operands[2].imm << 10;
   9615 }
   9616 
   9617 /* VFP instructions.  In a logical order: SP variant first, monad
   9619    before dyad, arithmetic then move then load/store.  */
   9620 
   9621 static void
   9622 do_vfp_sp_monadic (void)
   9623 {
   9624   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9625   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
   9626 }
   9627 
   9628 static void
   9629 do_vfp_sp_dyadic (void)
   9630 {
   9631   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9632   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
   9633   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
   9634 }
   9635 
   9636 static void
   9637 do_vfp_sp_compare_z (void)
   9638 {
   9639   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9640 }
   9641 
   9642 static void
   9643 do_vfp_dp_sp_cvt (void)
   9644 {
   9645   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9646   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
   9647 }
   9648 
   9649 static void
   9650 do_vfp_sp_dp_cvt (void)
   9651 {
   9652   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9653   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
   9654 }
   9655 
   9656 static void
   9657 do_vfp_reg_from_sp (void)
   9658 {
   9659   inst.instruction |= inst.operands[0].reg << 12;
   9660   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
   9661 }
   9662 
   9663 static void
   9664 do_vfp_reg2_from_sp2 (void)
   9665 {
   9666   constraint (inst.operands[2].imm != 2,
   9667 	      _("only two consecutive VFP SP registers allowed here"));
   9668   inst.instruction |= inst.operands[0].reg << 12;
   9669   inst.instruction |= inst.operands[1].reg << 16;
   9670   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
   9671 }
   9672 
   9673 static void
   9674 do_vfp_sp_from_reg (void)
   9675 {
   9676   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
   9677   inst.instruction |= inst.operands[1].reg << 12;
   9678 }
   9679 
   9680 static void
   9681 do_vfp_sp2_from_reg2 (void)
   9682 {
   9683   constraint (inst.operands[0].imm != 2,
   9684 	      _("only two consecutive VFP SP registers allowed here"));
   9685   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
   9686   inst.instruction |= inst.operands[1].reg << 12;
   9687   inst.instruction |= inst.operands[2].reg << 16;
   9688 }
   9689 
   9690 static void
   9691 do_vfp_sp_ldst (void)
   9692 {
   9693   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9694   encode_arm_cp_address (1, FALSE, TRUE, 0);
   9695 }
   9696 
   9697 static void
   9698 do_vfp_dp_ldst (void)
   9699 {
   9700   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9701   encode_arm_cp_address (1, FALSE, TRUE, 0);
   9702 }
   9703 
   9704 
   9705 static void
   9706 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
   9707 {
   9708   if (inst.operands[0].writeback)
   9709     inst.instruction |= WRITE_BACK;
   9710   else
   9711     constraint (ldstm_type != VFP_LDSTMIA,
   9712 		_("this addressing mode requires base-register writeback"));
   9713   inst.instruction |= inst.operands[0].reg << 16;
   9714   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
   9715   inst.instruction |= inst.operands[1].imm;
   9716 }
   9717 
   9718 static void
   9719 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
   9720 {
   9721   int count;
   9722 
   9723   if (inst.operands[0].writeback)
   9724     inst.instruction |= WRITE_BACK;
   9725   else
   9726     constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
   9727 		_("this addressing mode requires base-register writeback"));
   9728 
   9729   inst.instruction |= inst.operands[0].reg << 16;
   9730   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9731 
   9732   count = inst.operands[1].imm << 1;
   9733   if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
   9734     count += 1;
   9735 
   9736   inst.instruction |= count;
   9737 }
   9738 
   9739 static void
   9740 do_vfp_sp_ldstmia (void)
   9741 {
   9742   vfp_sp_ldstm (VFP_LDSTMIA);
   9743 }
   9744 
   9745 static void
   9746 do_vfp_sp_ldstmdb (void)
   9747 {
   9748   vfp_sp_ldstm (VFP_LDSTMDB);
   9749 }
   9750 
   9751 static void
   9752 do_vfp_dp_ldstmia (void)
   9753 {
   9754   vfp_dp_ldstm (VFP_LDSTMIA);
   9755 }
   9756 
   9757 static void
   9758 do_vfp_dp_ldstmdb (void)
   9759 {
   9760   vfp_dp_ldstm (VFP_LDSTMDB);
   9761 }
   9762 
   9763 static void
   9764 do_vfp_xp_ldstmia (void)
   9765 {
   9766   vfp_dp_ldstm (VFP_LDSTMIAX);
   9767 }
   9768 
   9769 static void
   9770 do_vfp_xp_ldstmdb (void)
   9771 {
   9772   vfp_dp_ldstm (VFP_LDSTMDBX);
   9773 }
   9774 
   9775 static void
   9776 do_vfp_dp_rd_rm (void)
   9777 {
   9778   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9779   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
   9780 }
   9781 
   9782 static void
   9783 do_vfp_dp_rn_rd (void)
   9784 {
   9785   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
   9786   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9787 }
   9788 
   9789 static void
   9790 do_vfp_dp_rd_rn (void)
   9791 {
   9792   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9793   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
   9794 }
   9795 
   9796 static void
   9797 do_vfp_dp_rd_rn_rm (void)
   9798 {
   9799   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9800   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
   9801   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
   9802 }
   9803 
   9804 static void
   9805 do_vfp_dp_rd (void)
   9806 {
   9807   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9808 }
   9809 
   9810 static void
   9811 do_vfp_dp_rm_rd_rn (void)
   9812 {
   9813   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
   9814   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
   9815   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
   9816 }
   9817 
   9818 /* VFPv3 instructions.  */
   9819 static void
   9820 do_vfp_sp_const (void)
   9821 {
   9822   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9823   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
   9824   inst.instruction |= (inst.operands[1].imm & 0x0f);
   9825 }
   9826 
   9827 static void
   9828 do_vfp_dp_const (void)
   9829 {
   9830   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9831   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
   9832   inst.instruction |= (inst.operands[1].imm & 0x0f);
   9833 }
   9834 
   9835 static void
   9836 vfp_conv (int srcsize)
   9837 {
   9838   int immbits = srcsize - inst.operands[1].imm;
   9839 
   9840   if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
   9841     {
   9842       /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
   9843 	 i.e. immbits must be in range 0 - 16.  */
   9844       inst.error = _("immediate value out of range, expected range [0, 16]");
   9845       return;
   9846     }
   9847   else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
   9848     {
   9849       /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
   9850 	 i.e. immbits must be in range 0 - 31.  */
   9851       inst.error = _("immediate value out of range, expected range [1, 32]");
   9852       return;
   9853     }
   9854 
   9855   inst.instruction |= (immbits & 1) << 5;
   9856   inst.instruction |= (immbits >> 1);
   9857 }
   9858 
   9859 static void
   9860 do_vfp_sp_conv_16 (void)
   9861 {
   9862   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9863   vfp_conv (16);
   9864 }
   9865 
   9866 static void
   9867 do_vfp_dp_conv_16 (void)
   9868 {
   9869   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9870   vfp_conv (16);
   9871 }
   9872 
   9873 static void
   9874 do_vfp_sp_conv_32 (void)
   9875 {
   9876   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   9877   vfp_conv (32);
   9878 }
   9879 
   9880 static void
   9881 do_vfp_dp_conv_32 (void)
   9882 {
   9883   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
   9884   vfp_conv (32);
   9885 }
   9886 
   9887 /* FPA instructions.  Also in a logical order.	*/
   9889 
   9890 static void
   9891 do_fpa_cmp (void)
   9892 {
   9893   inst.instruction |= inst.operands[0].reg << 16;
   9894   inst.instruction |= inst.operands[1].reg;
   9895 }
   9896 
   9897 static void
   9898 do_fpa_ldmstm (void)
   9899 {
   9900   inst.instruction |= inst.operands[0].reg << 12;
   9901   switch (inst.operands[1].imm)
   9902     {
   9903     case 1: inst.instruction |= CP_T_X;		 break;
   9904     case 2: inst.instruction |= CP_T_Y;		 break;
   9905     case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
   9906     case 4:					 break;
   9907     default: abort ();
   9908     }
   9909 
   9910   if (inst.instruction & (PRE_INDEX | INDEX_UP))
   9911     {
   9912       /* The instruction specified "ea" or "fd", so we can only accept
   9913 	 [Rn]{!}.  The instruction does not really support stacking or
   9914 	 unstacking, so we have to emulate these by setting appropriate
   9915 	 bits and offsets.  */
   9916       constraint (inst.reloc.exp.X_op != O_constant
   9917 		  || inst.reloc.exp.X_add_number != 0,
   9918 		  _("this instruction does not support indexing"));
   9919 
   9920       if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
   9921 	inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
   9922 
   9923       if (!(inst.instruction & INDEX_UP))
   9924 	inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
   9925 
   9926       if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
   9927 	{
   9928 	  inst.operands[2].preind = 0;
   9929 	  inst.operands[2].postind = 1;
   9930 	}
   9931     }
   9932 
   9933   encode_arm_cp_address (2, TRUE, TRUE, 0);
   9934 }
   9935 
   9936 /* iWMMXt instructions: strictly in alphabetical order.	 */
   9938 
   9939 static void
   9940 do_iwmmxt_tandorc (void)
   9941 {
   9942   constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
   9943 }
   9944 
   9945 static void
   9946 do_iwmmxt_textrc (void)
   9947 {
   9948   inst.instruction |= inst.operands[0].reg << 12;
   9949   inst.instruction |= inst.operands[1].imm;
   9950 }
   9951 
   9952 static void
   9953 do_iwmmxt_textrm (void)
   9954 {
   9955   inst.instruction |= inst.operands[0].reg << 12;
   9956   inst.instruction |= inst.operands[1].reg << 16;
   9957   inst.instruction |= inst.operands[2].imm;
   9958 }
   9959 
   9960 static void
   9961 do_iwmmxt_tinsr (void)
   9962 {
   9963   inst.instruction |= inst.operands[0].reg << 16;
   9964   inst.instruction |= inst.operands[1].reg << 12;
   9965   inst.instruction |= inst.operands[2].imm;
   9966 }
   9967 
   9968 static void
   9969 do_iwmmxt_tmia (void)
   9970 {
   9971   inst.instruction |= inst.operands[0].reg << 5;
   9972   inst.instruction |= inst.operands[1].reg;
   9973   inst.instruction |= inst.operands[2].reg << 12;
   9974 }
   9975 
   9976 static void
   9977 do_iwmmxt_waligni (void)
   9978 {
   9979   inst.instruction |= inst.operands[0].reg << 12;
   9980   inst.instruction |= inst.operands[1].reg << 16;
   9981   inst.instruction |= inst.operands[2].reg;
   9982   inst.instruction |= inst.operands[3].imm << 20;
   9983 }
   9984 
   9985 static void
   9986 do_iwmmxt_wmerge (void)
   9987 {
   9988   inst.instruction |= inst.operands[0].reg << 12;
   9989   inst.instruction |= inst.operands[1].reg << 16;
   9990   inst.instruction |= inst.operands[2].reg;
   9991   inst.instruction |= inst.operands[3].imm << 21;
   9992 }
   9993 
   9994 static void
   9995 do_iwmmxt_wmov (void)
   9996 {
   9997   /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
   9998   inst.instruction |= inst.operands[0].reg << 12;
   9999   inst.instruction |= inst.operands[1].reg << 16;
   10000   inst.instruction |= inst.operands[1].reg;
   10001 }
   10002 
   10003 static void
   10004 do_iwmmxt_wldstbh (void)
   10005 {
   10006   int reloc;
   10007   inst.instruction |= inst.operands[0].reg << 12;
   10008   if (thumb_mode)
   10009     reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
   10010   else
   10011     reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
   10012   encode_arm_cp_address (1, TRUE, FALSE, reloc);
   10013 }
   10014 
   10015 static void
   10016 do_iwmmxt_wldstw (void)
   10017 {
   10018   /* RIWR_RIWC clears .isreg for a control register.  */
   10019   if (!inst.operands[0].isreg)
   10020     {
   10021       constraint (inst.cond != COND_ALWAYS, BAD_COND);
   10022       inst.instruction |= 0xf0000000;
   10023     }
   10024 
   10025   inst.instruction |= inst.operands[0].reg << 12;
   10026   encode_arm_cp_address (1, TRUE, TRUE, 0);
   10027 }
   10028 
   10029 static void
   10030 do_iwmmxt_wldstd (void)
   10031 {
   10032   inst.instruction |= inst.operands[0].reg << 12;
   10033   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
   10034       && inst.operands[1].immisreg)
   10035     {
   10036       inst.instruction &= ~0x1a000ff;
   10037       inst.instruction |= (0xfU << 28);
   10038       if (inst.operands[1].preind)
   10039 	inst.instruction |= PRE_INDEX;
   10040       if (!inst.operands[1].negative)
   10041 	inst.instruction |= INDEX_UP;
   10042       if (inst.operands[1].writeback)
   10043 	inst.instruction |= WRITE_BACK;
   10044       inst.instruction |= inst.operands[1].reg << 16;
   10045       inst.instruction |= inst.reloc.exp.X_add_number << 4;
   10046       inst.instruction |= inst.operands[1].imm;
   10047     }
   10048   else
   10049     encode_arm_cp_address (1, TRUE, FALSE, 0);
   10050 }
   10051 
   10052 static void
   10053 do_iwmmxt_wshufh (void)
   10054 {
   10055   inst.instruction |= inst.operands[0].reg << 12;
   10056   inst.instruction |= inst.operands[1].reg << 16;
   10057   inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
   10058   inst.instruction |= (inst.operands[2].imm & 0x0f);
   10059 }
   10060 
   10061 static void
   10062 do_iwmmxt_wzero (void)
   10063 {
   10064   /* WZERO reg is an alias for WANDN reg, reg, reg.  */
   10065   inst.instruction |= inst.operands[0].reg;
   10066   inst.instruction |= inst.operands[0].reg << 12;
   10067   inst.instruction |= inst.operands[0].reg << 16;
   10068 }
   10069 
   10070 static void
   10071 do_iwmmxt_wrwrwr_or_imm5 (void)
   10072 {
   10073   if (inst.operands[2].isreg)
   10074     do_rd_rn_rm ();
   10075   else {
   10076     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
   10077 		_("immediate operand requires iWMMXt2"));
   10078     do_rd_rn ();
   10079     if (inst.operands[2].imm == 0)
   10080       {
   10081 	switch ((inst.instruction >> 20) & 0xf)
   10082 	  {
   10083 	  case 4:
   10084 	  case 5:
   10085 	  case 6:
   10086 	  case 7:
   10087 	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
   10088 	    inst.operands[2].imm = 16;
   10089 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
   10090 	    break;
   10091 	  case 8:
   10092 	  case 9:
   10093 	  case 10:
   10094 	  case 11:
   10095 	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
   10096 	    inst.operands[2].imm = 32;
   10097 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
   10098 	    break;
   10099 	  case 12:
   10100 	  case 13:
   10101 	  case 14:
   10102 	  case 15:
   10103 	    {
   10104 	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
   10105 	      unsigned long wrn;
   10106 	      wrn = (inst.instruction >> 16) & 0xf;
   10107 	      inst.instruction &= 0xff0fff0f;
   10108 	      inst.instruction |= wrn;
   10109 	      /* Bail out here; the instruction is now assembled.  */
   10110 	      return;
   10111 	    }
   10112 	  }
   10113       }
   10114     /* Map 32 -> 0, etc.  */
   10115     inst.operands[2].imm &= 0x1f;
   10116     inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
   10117   }
   10118 }
   10119 
   10120 /* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
   10122    operations first, then control, shift, and load/store.  */
   10123 
   10124 /* Insns like "foo X,Y,Z".  */
   10125 
   10126 static void
   10127 do_mav_triple (void)
   10128 {
   10129   inst.instruction |= inst.operands[0].reg << 16;
   10130   inst.instruction |= inst.operands[1].reg;
   10131   inst.instruction |= inst.operands[2].reg << 12;
   10132 }
   10133 
   10134 /* Insns like "foo W,X,Y,Z".
   10135     where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
   10136 
   10137 static void
   10138 do_mav_quad (void)
   10139 {
   10140   inst.instruction |= inst.operands[0].reg << 5;
   10141   inst.instruction |= inst.operands[1].reg << 12;
   10142   inst.instruction |= inst.operands[2].reg << 16;
   10143   inst.instruction |= inst.operands[3].reg;
   10144 }
   10145 
   10146 /* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
   10147 static void
   10148 do_mav_dspsc (void)
   10149 {
   10150   inst.instruction |= inst.operands[1].reg << 12;
   10151 }
   10152 
   10153 /* Maverick shift immediate instructions.
   10154    cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
   10155    cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
   10156 
   10157 static void
   10158 do_mav_shift (void)
   10159 {
   10160   int imm = inst.operands[2].imm;
   10161 
   10162   inst.instruction |= inst.operands[0].reg << 12;
   10163   inst.instruction |= inst.operands[1].reg << 16;
   10164 
   10165   /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
   10166      Bits 5-7 of the insn should have bits 4-6 of the immediate.
   10167      Bit 4 should be 0.	 */
   10168   imm = (imm & 0xf) | ((imm & 0x70) << 1);
   10169 
   10170   inst.instruction |= imm;
   10171 }
   10172 
   10173 /* XScale instructions.	 Also sorted arithmetic before move.  */
   10175 
   10176 /* Xscale multiply-accumulate (argument parse)
   10177      MIAcc   acc0,Rm,Rs
   10178      MIAPHcc acc0,Rm,Rs
   10179      MIAxycc acc0,Rm,Rs.  */
   10180 
   10181 static void
   10182 do_xsc_mia (void)
   10183 {
   10184   inst.instruction |= inst.operands[1].reg;
   10185   inst.instruction |= inst.operands[2].reg << 12;
   10186 }
   10187 
   10188 /* Xscale move-accumulator-register (argument parse)
   10189 
   10190      MARcc   acc0,RdLo,RdHi.  */
   10191 
   10192 static void
   10193 do_xsc_mar (void)
   10194 {
   10195   inst.instruction |= inst.operands[1].reg << 12;
   10196   inst.instruction |= inst.operands[2].reg << 16;
   10197 }
   10198 
   10199 /* Xscale move-register-accumulator (argument parse)
   10200 
   10201      MRAcc   RdLo,RdHi,acc0.  */
   10202 
   10203 static void
   10204 do_xsc_mra (void)
   10205 {
   10206   constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
   10207   inst.instruction |= inst.operands[0].reg << 12;
   10208   inst.instruction |= inst.operands[1].reg << 16;
   10209 }
   10210 
   10211 /* Encoding functions relevant only to Thumb.  */
   10213 
   10214 /* inst.operands[i] is a shifted-register operand; encode
   10215    it into inst.instruction in the format used by Thumb32.  */
   10216 
   10217 static void
   10218 encode_thumb32_shifted_operand (int i)
   10219 {
   10220   unsigned int value = inst.reloc.exp.X_add_number;
   10221   unsigned int shift = inst.operands[i].shift_kind;
   10222 
   10223   constraint (inst.operands[i].immisreg,
   10224 	      _("shift by register not allowed in thumb mode"));
   10225   inst.instruction |= inst.operands[i].reg;
   10226   if (shift == SHIFT_RRX)
   10227     inst.instruction |= SHIFT_ROR << 4;
   10228   else
   10229     {
   10230       constraint (inst.reloc.exp.X_op != O_constant,
   10231 		  _("expression too complex"));
   10232 
   10233       constraint (value > 32
   10234 		  || (value == 32 && (shift == SHIFT_LSL
   10235 				      || shift == SHIFT_ROR)),
   10236 		  _("shift expression is too large"));
   10237 
   10238       if (value == 0)
   10239 	shift = SHIFT_LSL;
   10240       else if (value == 32)
   10241 	value = 0;
   10242 
   10243       inst.instruction |= shift << 4;
   10244       inst.instruction |= (value & 0x1c) << 10;
   10245       inst.instruction |= (value & 0x03) << 6;
   10246     }
   10247 }
   10248 
   10249 
   10250 /* inst.operands[i] was set up by parse_address.  Encode it into a
   10251    Thumb32 format load or store instruction.  Reject forms that cannot
   10252    be used with such instructions.  If is_t is true, reject forms that
   10253    cannot be used with a T instruction; if is_d is true, reject forms
   10254    that cannot be used with a D instruction.  If it is a store insn,
   10255    reject PC in Rn.  */
   10256 
   10257 static void
   10258 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
   10259 {
   10260   const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
   10261 
   10262   constraint (!inst.operands[i].isreg,
   10263 	      _("Instruction does not support =N addresses"));
   10264 
   10265   inst.instruction |= inst.operands[i].reg << 16;
   10266   if (inst.operands[i].immisreg)
   10267     {
   10268       constraint (is_pc, BAD_PC_ADDRESSING);
   10269       constraint (is_t || is_d, _("cannot use register index with this instruction"));
   10270       constraint (inst.operands[i].negative,
   10271 		  _("Thumb does not support negative register indexing"));
   10272       constraint (inst.operands[i].postind,
   10273 		  _("Thumb does not support register post-indexing"));
   10274       constraint (inst.operands[i].writeback,
   10275 		  _("Thumb does not support register indexing with writeback"));
   10276       constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
   10277 		  _("Thumb supports only LSL in shifted register indexing"));
   10278 
   10279       inst.instruction |= inst.operands[i].imm;
   10280       if (inst.operands[i].shifted)
   10281 	{
   10282 	  constraint (inst.reloc.exp.X_op != O_constant,
   10283 		      _("expression too complex"));
   10284 	  constraint (inst.reloc.exp.X_add_number < 0
   10285 		      || inst.reloc.exp.X_add_number > 3,
   10286 		      _("shift out of range"));
   10287 	  inst.instruction |= inst.reloc.exp.X_add_number << 4;
   10288 	}
   10289       inst.reloc.type = BFD_RELOC_UNUSED;
   10290     }
   10291   else if (inst.operands[i].preind)
   10292     {
   10293       constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
   10294       constraint (is_t && inst.operands[i].writeback,
   10295 		  _("cannot use writeback with this instruction"));
   10296       constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
   10297 		  BAD_PC_ADDRESSING);
   10298 
   10299       if (is_d)
   10300 	{
   10301 	  inst.instruction |= 0x01000000;
   10302 	  if (inst.operands[i].writeback)
   10303 	    inst.instruction |= 0x00200000;
   10304 	}
   10305       else
   10306 	{
   10307 	  inst.instruction |= 0x00000c00;
   10308 	  if (inst.operands[i].writeback)
   10309 	    inst.instruction |= 0x00000100;
   10310 	}
   10311       inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
   10312     }
   10313   else if (inst.operands[i].postind)
   10314     {
   10315       gas_assert (inst.operands[i].writeback);
   10316       constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
   10317       constraint (is_t, _("cannot use post-indexing with this instruction"));
   10318 
   10319       if (is_d)
   10320 	inst.instruction |= 0x00200000;
   10321       else
   10322 	inst.instruction |= 0x00000900;
   10323       inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
   10324     }
   10325   else /* unindexed - only for coprocessor */
   10326     inst.error = _("instruction does not accept unindexed addressing");
   10327 }
   10328 
   10329 /* Table of Thumb instructions which exist in both 16- and 32-bit
   10330    encodings (the latter only in post-V6T2 cores).  The index is the
   10331    value used in the insns table below.  When there is more than one
   10332    possible 16-bit encoding for the instruction, this table always
   10333    holds variant (1).
   10334    Also contains several pseudo-instructions used during relaxation.  */
   10335 #define T16_32_TAB				\
   10336   X(_adc,   4140, eb400000),			\
   10337   X(_adcs,  4140, eb500000),			\
   10338   X(_add,   1c00, eb000000),			\
   10339   X(_adds,  1c00, eb100000),			\
   10340   X(_addi,  0000, f1000000),			\
   10341   X(_addis, 0000, f1100000),			\
   10342   X(_add_pc,000f, f20f0000),			\
   10343   X(_add_sp,000d, f10d0000),			\
   10344   X(_adr,   000f, f20f0000),			\
   10345   X(_and,   4000, ea000000),			\
   10346   X(_ands,  4000, ea100000),			\
   10347   X(_asr,   1000, fa40f000),			\
   10348   X(_asrs,  1000, fa50f000),			\
   10349   X(_b,     e000, f000b000),			\
   10350   X(_bcond, d000, f0008000),			\
   10351   X(_bic,   4380, ea200000),			\
   10352   X(_bics,  4380, ea300000),			\
   10353   X(_cmn,   42c0, eb100f00),			\
   10354   X(_cmp,   2800, ebb00f00),			\
   10355   X(_cpsie, b660, f3af8400),			\
   10356   X(_cpsid, b670, f3af8600),			\
   10357   X(_cpy,   4600, ea4f0000),			\
   10358   X(_dec_sp,80dd, f1ad0d00),			\
   10359   X(_eor,   4040, ea800000),			\
   10360   X(_eors,  4040, ea900000),			\
   10361   X(_inc_sp,00dd, f10d0d00),			\
   10362   X(_ldmia, c800, e8900000),			\
   10363   X(_ldr,   6800, f8500000),			\
   10364   X(_ldrb,  7800, f8100000),			\
   10365   X(_ldrh,  8800, f8300000),			\
   10366   X(_ldrsb, 5600, f9100000),			\
   10367   X(_ldrsh, 5e00, f9300000),			\
   10368   X(_ldr_pc,4800, f85f0000),			\
   10369   X(_ldr_pc2,4800, f85f0000),			\
   10370   X(_ldr_sp,9800, f85d0000),			\
   10371   X(_lsl,   0000, fa00f000),			\
   10372   X(_lsls,  0000, fa10f000),			\
   10373   X(_lsr,   0800, fa20f000),			\
   10374   X(_lsrs,  0800, fa30f000),			\
   10375   X(_mov,   2000, ea4f0000),			\
   10376   X(_movs,  2000, ea5f0000),			\
   10377   X(_mul,   4340, fb00f000),                     \
   10378   X(_muls,  4340, ffffffff), /* no 32b muls */	\
   10379   X(_mvn,   43c0, ea6f0000),			\
   10380   X(_mvns,  43c0, ea7f0000),			\
   10381   X(_neg,   4240, f1c00000), /* rsb #0 */	\
   10382   X(_negs,  4240, f1d00000), /* rsbs #0 */	\
   10383   X(_orr,   4300, ea400000),			\
   10384   X(_orrs,  4300, ea500000),			\
   10385   X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
   10386   X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
   10387   X(_rev,   ba00, fa90f080),			\
   10388   X(_rev16, ba40, fa90f090),			\
   10389   X(_revsh, bac0, fa90f0b0),			\
   10390   X(_ror,   41c0, fa60f000),			\
   10391   X(_rors,  41c0, fa70f000),			\
   10392   X(_sbc,   4180, eb600000),			\
   10393   X(_sbcs,  4180, eb700000),			\
   10394   X(_stmia, c000, e8800000),			\
   10395   X(_str,   6000, f8400000),			\
   10396   X(_strb,  7000, f8000000),			\
   10397   X(_strh,  8000, f8200000),			\
   10398   X(_str_sp,9000, f84d0000),			\
   10399   X(_sub,   1e00, eba00000),			\
   10400   X(_subs,  1e00, ebb00000),			\
   10401   X(_subi,  8000, f1a00000),			\
   10402   X(_subis, 8000, f1b00000),			\
   10403   X(_sxtb,  b240, fa4ff080),			\
   10404   X(_sxth,  b200, fa0ff080),			\
   10405   X(_tst,   4200, ea100f00),			\
   10406   X(_uxtb,  b2c0, fa5ff080),			\
   10407   X(_uxth,  b280, fa1ff080),			\
   10408   X(_nop,   bf00, f3af8000),			\
   10409   X(_yield, bf10, f3af8001),			\
   10410   X(_wfe,   bf20, f3af8002),			\
   10411   X(_wfi,   bf30, f3af8003),			\
   10412   X(_sev,   bf40, f3af8004),                    \
   10413   X(_sevl,  bf50, f3af8005),			\
   10414   X(_udf,   de00, f7f0a000)
   10415 
   10416 /* To catch errors in encoding functions, the codes are all offset by
   10417    0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
   10418    as 16-bit instructions.  */
   10419 #define X(a,b,c) T_MNEM##a
   10420 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
   10421 #undef X
   10422 
   10423 #define X(a,b,c) 0x##b
   10424 static const unsigned short thumb_op16[] = { T16_32_TAB };
   10425 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
   10426 #undef X
   10427 
   10428 #define X(a,b,c) 0x##c
   10429 static const unsigned int thumb_op32[] = { T16_32_TAB };
   10430 #define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
   10431 #define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
   10432 #undef X
   10433 #undef T16_32_TAB
   10434 
   10435 /* Thumb instruction encoders, in alphabetical order.  */
   10436 
   10437 /* ADDW or SUBW.  */
   10438 
   10439 static void
   10440 do_t_add_sub_w (void)
   10441 {
   10442   int Rd, Rn;
   10443 
   10444   Rd = inst.operands[0].reg;
   10445   Rn = inst.operands[1].reg;
   10446 
   10447   /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
   10448      is the SP-{plus,minus}-immediate form of the instruction.  */
   10449   if (Rn == REG_SP)
   10450     constraint (Rd == REG_PC, BAD_PC);
   10451   else
   10452     reject_bad_reg (Rd);
   10453 
   10454   inst.instruction |= (Rn << 16) | (Rd << 8);
   10455   inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
   10456 }
   10457 
   10458 /* Parse an add or subtract instruction.  We get here with inst.instruction
   10459    equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
   10460 
   10461 static void
   10462 do_t_add_sub (void)
   10463 {
   10464   int Rd, Rs, Rn;
   10465 
   10466   Rd = inst.operands[0].reg;
   10467   Rs = (inst.operands[1].present
   10468 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10469 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10470 
   10471   if (Rd == REG_PC)
   10472     set_it_insn_type_last ();
   10473 
   10474   if (unified_syntax)
   10475     {
   10476       bfd_boolean flags;
   10477       bfd_boolean narrow;
   10478       int opcode;
   10479 
   10480       flags = (inst.instruction == T_MNEM_adds
   10481 	       || inst.instruction == T_MNEM_subs);
   10482       if (flags)
   10483 	narrow = !in_it_block ();
   10484       else
   10485 	narrow = in_it_block ();
   10486       if (!inst.operands[2].isreg)
   10487 	{
   10488 	  int add;
   10489 
   10490 	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
   10491 
   10492 	  add = (inst.instruction == T_MNEM_add
   10493 		 || inst.instruction == T_MNEM_adds);
   10494 	  opcode = 0;
   10495 	  if (inst.size_req != 4)
   10496 	    {
   10497 	      /* Attempt to use a narrow opcode, with relaxation if
   10498 		 appropriate.  */
   10499 	      if (Rd == REG_SP && Rs == REG_SP && !flags)
   10500 		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
   10501 	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
   10502 		opcode = T_MNEM_add_sp;
   10503 	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
   10504 		opcode = T_MNEM_add_pc;
   10505 	      else if (Rd <= 7 && Rs <= 7 && narrow)
   10506 		{
   10507 		  if (flags)
   10508 		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
   10509 		  else
   10510 		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
   10511 		}
   10512 	      if (opcode)
   10513 		{
   10514 		  inst.instruction = THUMB_OP16(opcode);
   10515 		  inst.instruction |= (Rd << 4) | Rs;
   10516 		  if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   10517 		      || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
   10518 		  {
   10519 		    if (inst.size_req == 2)
   10520 		      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10521 		    else
   10522 		      inst.relax = opcode;
   10523 		  }
   10524 		}
   10525 	      else
   10526 		constraint (inst.size_req == 2, BAD_HIREG);
   10527 	    }
   10528 	  if (inst.size_req == 4
   10529 	      || (inst.size_req != 2 && !opcode))
   10530 	    {
   10531 	      constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   10532 			  && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
   10533 			  THUMB1_RELOC_ONLY);
   10534 	      if (Rd == REG_PC)
   10535 		{
   10536 		  constraint (add, BAD_PC);
   10537 		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
   10538 			     _("only SUBS PC, LR, #const allowed"));
   10539 		  constraint (inst.reloc.exp.X_op != O_constant,
   10540 			      _("expression too complex"));
   10541 		  constraint (inst.reloc.exp.X_add_number < 0
   10542 			      || inst.reloc.exp.X_add_number > 0xff,
   10543 			     _("immediate value out of range"));
   10544 		  inst.instruction = T2_SUBS_PC_LR
   10545 				     | inst.reloc.exp.X_add_number;
   10546 		  inst.reloc.type = BFD_RELOC_UNUSED;
   10547 		  return;
   10548 		}
   10549 	      else if (Rs == REG_PC)
   10550 		{
   10551 		  /* Always use addw/subw.  */
   10552 		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
   10553 		  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
   10554 		}
   10555 	      else
   10556 		{
   10557 		  inst.instruction = THUMB_OP32 (inst.instruction);
   10558 		  inst.instruction = (inst.instruction & 0xe1ffffff)
   10559 				     | 0x10000000;
   10560 		  if (flags)
   10561 		    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10562 		  else
   10563 		    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
   10564 		}
   10565 	      inst.instruction |= Rd << 8;
   10566 	      inst.instruction |= Rs << 16;
   10567 	    }
   10568 	}
   10569       else
   10570 	{
   10571 	  unsigned int value = inst.reloc.exp.X_add_number;
   10572 	  unsigned int shift = inst.operands[2].shift_kind;
   10573 
   10574 	  Rn = inst.operands[2].reg;
   10575 	  /* See if we can do this with a 16-bit instruction.  */
   10576 	  if (!inst.operands[2].shifted && inst.size_req != 4)
   10577 	    {
   10578 	      if (Rd > 7 || Rs > 7 || Rn > 7)
   10579 		narrow = FALSE;
   10580 
   10581 	      if (narrow)
   10582 		{
   10583 		  inst.instruction = ((inst.instruction == T_MNEM_adds
   10584 				       || inst.instruction == T_MNEM_add)
   10585 				      ? T_OPCODE_ADD_R3
   10586 				      : T_OPCODE_SUB_R3);
   10587 		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
   10588 		  return;
   10589 		}
   10590 
   10591 	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
   10592 		{
   10593 		  /* Thumb-1 cores (except v6-M) require at least one high
   10594 		     register in a narrow non flag setting add.  */
   10595 		  if (Rd > 7 || Rn > 7
   10596 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
   10597 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
   10598 		    {
   10599 		      if (Rd == Rn)
   10600 			{
   10601 			  Rn = Rs;
   10602 			  Rs = Rd;
   10603 			}
   10604 		      inst.instruction = T_OPCODE_ADD_HI;
   10605 		      inst.instruction |= (Rd & 8) << 4;
   10606 		      inst.instruction |= (Rd & 7);
   10607 		      inst.instruction |= Rn << 3;
   10608 		      return;
   10609 		    }
   10610 		}
   10611 	    }
   10612 
   10613 	  constraint (Rd == REG_PC, BAD_PC);
   10614 	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
   10615 	  constraint (Rs == REG_PC, BAD_PC);
   10616 	  reject_bad_reg (Rn);
   10617 
   10618 	  /* If we get here, it can't be done in 16 bits.  */
   10619 	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
   10620 		      _("shift must be constant"));
   10621 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10622 	  inst.instruction |= Rd << 8;
   10623 	  inst.instruction |= Rs << 16;
   10624 	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
   10625 		      _("shift value over 3 not allowed in thumb mode"));
   10626 	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
   10627 		      _("only LSL shift allowed in thumb mode"));
   10628 	  encode_thumb32_shifted_operand (2);
   10629 	}
   10630     }
   10631   else
   10632     {
   10633       constraint (inst.instruction == T_MNEM_adds
   10634 		  || inst.instruction == T_MNEM_subs,
   10635 		  BAD_THUMB32);
   10636 
   10637       if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
   10638 	{
   10639 	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
   10640 		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
   10641 		      BAD_HIREG);
   10642 
   10643 	  inst.instruction = (inst.instruction == T_MNEM_add
   10644 			      ? 0x0000 : 0x8000);
   10645 	  inst.instruction |= (Rd << 4) | Rs;
   10646 	  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10647 	  return;
   10648 	}
   10649 
   10650       Rn = inst.operands[2].reg;
   10651       constraint (inst.operands[2].shifted, _("unshifted register required"));
   10652 
   10653       /* We now have Rd, Rs, and Rn set to registers.  */
   10654       if (Rd > 7 || Rs > 7 || Rn > 7)
   10655 	{
   10656 	  /* Can't do this for SUB.	 */
   10657 	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
   10658 	  inst.instruction = T_OPCODE_ADD_HI;
   10659 	  inst.instruction |= (Rd & 8) << 4;
   10660 	  inst.instruction |= (Rd & 7);
   10661 	  if (Rs == Rd)
   10662 	    inst.instruction |= Rn << 3;
   10663 	  else if (Rn == Rd)
   10664 	    inst.instruction |= Rs << 3;
   10665 	  else
   10666 	    constraint (1, _("dest must overlap one source register"));
   10667 	}
   10668       else
   10669 	{
   10670 	  inst.instruction = (inst.instruction == T_MNEM_add
   10671 			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
   10672 	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
   10673 	}
   10674     }
   10675 }
   10676 
   10677 static void
   10678 do_t_adr (void)
   10679 {
   10680   unsigned Rd;
   10681 
   10682   Rd = inst.operands[0].reg;
   10683   reject_bad_reg (Rd);
   10684 
   10685   if (unified_syntax && inst.size_req == 0 && Rd <= 7)
   10686     {
   10687       /* Defer to section relaxation.  */
   10688       inst.relax = inst.instruction;
   10689       inst.instruction = THUMB_OP16 (inst.instruction);
   10690       inst.instruction |= Rd << 4;
   10691     }
   10692   else if (unified_syntax && inst.size_req != 2)
   10693     {
   10694       /* Generate a 32-bit opcode.  */
   10695       inst.instruction = THUMB_OP32 (inst.instruction);
   10696       inst.instruction |= Rd << 8;
   10697       inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
   10698       inst.reloc.pc_rel = 1;
   10699     }
   10700   else
   10701     {
   10702       /* Generate a 16-bit opcode.  */
   10703       inst.instruction = THUMB_OP16 (inst.instruction);
   10704       inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
   10705       inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
   10706       inst.reloc.pc_rel = 1;
   10707 
   10708       inst.instruction |= Rd << 4;
   10709     }
   10710 }
   10711 
   10712 /* Arithmetic instructions for which there is just one 16-bit
   10713    instruction encoding, and it allows only two low registers.
   10714    For maximal compatibility with ARM syntax, we allow three register
   10715    operands even when Thumb-32 instructions are not available, as long
   10716    as the first two are identical.  For instance, both "sbc r0,r1" and
   10717    "sbc r0,r0,r1" are allowed.  */
   10718 static void
   10719 do_t_arit3 (void)
   10720 {
   10721   int Rd, Rs, Rn;
   10722 
   10723   Rd = inst.operands[0].reg;
   10724   Rs = (inst.operands[1].present
   10725 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10726 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10727   Rn = inst.operands[2].reg;
   10728 
   10729   reject_bad_reg (Rd);
   10730   reject_bad_reg (Rs);
   10731   if (inst.operands[2].isreg)
   10732     reject_bad_reg (Rn);
   10733 
   10734   if (unified_syntax)
   10735     {
   10736       if (!inst.operands[2].isreg)
   10737 	{
   10738 	  /* For an immediate, we always generate a 32-bit opcode;
   10739 	     section relaxation will shrink it later if possible.  */
   10740 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10741 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   10742 	  inst.instruction |= Rd << 8;
   10743 	  inst.instruction |= Rs << 16;
   10744 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10745 	}
   10746       else
   10747 	{
   10748 	  bfd_boolean narrow;
   10749 
   10750 	  /* See if we can do this with a 16-bit instruction.  */
   10751 	  if (THUMB_SETS_FLAGS (inst.instruction))
   10752 	    narrow = !in_it_block ();
   10753 	  else
   10754 	    narrow = in_it_block ();
   10755 
   10756 	  if (Rd > 7 || Rn > 7 || Rs > 7)
   10757 	    narrow = FALSE;
   10758 	  if (inst.operands[2].shifted)
   10759 	    narrow = FALSE;
   10760 	  if (inst.size_req == 4)
   10761 	    narrow = FALSE;
   10762 
   10763 	  if (narrow
   10764 	      && Rd == Rs)
   10765 	    {
   10766 	      inst.instruction = THUMB_OP16 (inst.instruction);
   10767 	      inst.instruction |= Rd;
   10768 	      inst.instruction |= Rn << 3;
   10769 	      return;
   10770 	    }
   10771 
   10772 	  /* If we get here, it can't be done in 16 bits.  */
   10773 	  constraint (inst.operands[2].shifted
   10774 		      && inst.operands[2].immisreg,
   10775 		      _("shift must be constant"));
   10776 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10777 	  inst.instruction |= Rd << 8;
   10778 	  inst.instruction |= Rs << 16;
   10779 	  encode_thumb32_shifted_operand (2);
   10780 	}
   10781     }
   10782   else
   10783     {
   10784       /* On its face this is a lie - the instruction does set the
   10785 	 flags.  However, the only supported mnemonic in this mode
   10786 	 says it doesn't.  */
   10787       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   10788 
   10789       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
   10790 		  _("unshifted register required"));
   10791       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
   10792       constraint (Rd != Rs,
   10793 		  _("dest and source1 must be the same register"));
   10794 
   10795       inst.instruction = THUMB_OP16 (inst.instruction);
   10796       inst.instruction |= Rd;
   10797       inst.instruction |= Rn << 3;
   10798     }
   10799 }
   10800 
   10801 /* Similarly, but for instructions where the arithmetic operation is
   10802    commutative, so we can allow either of them to be different from
   10803    the destination operand in a 16-bit instruction.  For instance, all
   10804    three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
   10805    accepted.  */
   10806 static void
   10807 do_t_arit3c (void)
   10808 {
   10809   int Rd, Rs, Rn;
   10810 
   10811   Rd = inst.operands[0].reg;
   10812   Rs = (inst.operands[1].present
   10813 	? inst.operands[1].reg    /* Rd, Rs, foo */
   10814 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   10815   Rn = inst.operands[2].reg;
   10816 
   10817   reject_bad_reg (Rd);
   10818   reject_bad_reg (Rs);
   10819   if (inst.operands[2].isreg)
   10820     reject_bad_reg (Rn);
   10821 
   10822   if (unified_syntax)
   10823     {
   10824       if (!inst.operands[2].isreg)
   10825 	{
   10826 	  /* For an immediate, we always generate a 32-bit opcode;
   10827 	     section relaxation will shrink it later if possible.  */
   10828 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10829 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   10830 	  inst.instruction |= Rd << 8;
   10831 	  inst.instruction |= Rs << 16;
   10832 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   10833 	}
   10834       else
   10835 	{
   10836 	  bfd_boolean narrow;
   10837 
   10838 	  /* See if we can do this with a 16-bit instruction.  */
   10839 	  if (THUMB_SETS_FLAGS (inst.instruction))
   10840 	    narrow = !in_it_block ();
   10841 	  else
   10842 	    narrow = in_it_block ();
   10843 
   10844 	  if (Rd > 7 || Rn > 7 || Rs > 7)
   10845 	    narrow = FALSE;
   10846 	  if (inst.operands[2].shifted)
   10847 	    narrow = FALSE;
   10848 	  if (inst.size_req == 4)
   10849 	    narrow = FALSE;
   10850 
   10851 	  if (narrow)
   10852 	    {
   10853 	      if (Rd == Rs)
   10854 		{
   10855 		  inst.instruction = THUMB_OP16 (inst.instruction);
   10856 		  inst.instruction |= Rd;
   10857 		  inst.instruction |= Rn << 3;
   10858 		  return;
   10859 		}
   10860 	      if (Rd == Rn)
   10861 		{
   10862 		  inst.instruction = THUMB_OP16 (inst.instruction);
   10863 		  inst.instruction |= Rd;
   10864 		  inst.instruction |= Rs << 3;
   10865 		  return;
   10866 		}
   10867 	    }
   10868 
   10869 	  /* If we get here, it can't be done in 16 bits.  */
   10870 	  constraint (inst.operands[2].shifted
   10871 		      && inst.operands[2].immisreg,
   10872 		      _("shift must be constant"));
   10873 	  inst.instruction = THUMB_OP32 (inst.instruction);
   10874 	  inst.instruction |= Rd << 8;
   10875 	  inst.instruction |= Rs << 16;
   10876 	  encode_thumb32_shifted_operand (2);
   10877 	}
   10878     }
   10879   else
   10880     {
   10881       /* On its face this is a lie - the instruction does set the
   10882 	 flags.  However, the only supported mnemonic in this mode
   10883 	 says it doesn't.  */
   10884       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   10885 
   10886       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
   10887 		  _("unshifted register required"));
   10888       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
   10889 
   10890       inst.instruction = THUMB_OP16 (inst.instruction);
   10891       inst.instruction |= Rd;
   10892 
   10893       if (Rd == Rs)
   10894 	inst.instruction |= Rn << 3;
   10895       else if (Rd == Rn)
   10896 	inst.instruction |= Rs << 3;
   10897       else
   10898 	constraint (1, _("dest must overlap one source register"));
   10899     }
   10900 }
   10901 
   10902 static void
   10903 do_t_bfc (void)
   10904 {
   10905   unsigned Rd;
   10906   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
   10907   constraint (msb > 32, _("bit-field extends past end of register"));
   10908   /* The instruction encoding stores the LSB and MSB,
   10909      not the LSB and width.  */
   10910   Rd = inst.operands[0].reg;
   10911   reject_bad_reg (Rd);
   10912   inst.instruction |= Rd << 8;
   10913   inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
   10914   inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
   10915   inst.instruction |= msb - 1;
   10916 }
   10917 
   10918 static void
   10919 do_t_bfi (void)
   10920 {
   10921   int Rd, Rn;
   10922   unsigned int msb;
   10923 
   10924   Rd = inst.operands[0].reg;
   10925   reject_bad_reg (Rd);
   10926 
   10927   /* #0 in second position is alternative syntax for bfc, which is
   10928      the same instruction but with REG_PC in the Rm field.  */
   10929   if (!inst.operands[1].isreg)
   10930     Rn = REG_PC;
   10931   else
   10932     {
   10933       Rn = inst.operands[1].reg;
   10934       reject_bad_reg (Rn);
   10935     }
   10936 
   10937   msb = inst.operands[2].imm + inst.operands[3].imm;
   10938   constraint (msb > 32, _("bit-field extends past end of register"));
   10939   /* The instruction encoding stores the LSB and MSB,
   10940      not the LSB and width.  */
   10941   inst.instruction |= Rd << 8;
   10942   inst.instruction |= Rn << 16;
   10943   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
   10944   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
   10945   inst.instruction |= msb - 1;
   10946 }
   10947 
   10948 static void
   10949 do_t_bfx (void)
   10950 {
   10951   unsigned Rd, Rn;
   10952 
   10953   Rd = inst.operands[0].reg;
   10954   Rn = inst.operands[1].reg;
   10955 
   10956   reject_bad_reg (Rd);
   10957   reject_bad_reg (Rn);
   10958 
   10959   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
   10960 	      _("bit-field extends past end of register"));
   10961   inst.instruction |= Rd << 8;
   10962   inst.instruction |= Rn << 16;
   10963   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
   10964   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
   10965   inst.instruction |= inst.operands[3].imm - 1;
   10966 }
   10967 
   10968 /* ARM V5 Thumb BLX (argument parse)
   10969 	BLX <target_addr>	which is BLX(1)
   10970 	BLX <Rm>		which is BLX(2)
   10971    Unfortunately, there are two different opcodes for this mnemonic.
   10972    So, the insns[].value is not used, and the code here zaps values
   10973 	into inst.instruction.
   10974 
   10975    ??? How to take advantage of the additional two bits of displacement
   10976    available in Thumb32 mode?  Need new relocation?  */
   10977 
   10978 static void
   10979 do_t_blx (void)
   10980 {
   10981   set_it_insn_type_last ();
   10982 
   10983   if (inst.operands[0].isreg)
   10984     {
   10985       constraint (inst.operands[0].reg == REG_PC, BAD_PC);
   10986       /* We have a register, so this is BLX(2).  */
   10987       inst.instruction |= inst.operands[0].reg << 3;
   10988     }
   10989   else
   10990     {
   10991       /* No register.  This must be BLX(1).  */
   10992       inst.instruction = 0xf000e800;
   10993       encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
   10994     }
   10995 }
   10996 
   10997 static void
   10998 do_t_branch (void)
   10999 {
   11000   int opcode;
   11001   int cond;
   11002   bfd_reloc_code_real_type reloc;
   11003 
   11004   cond = inst.cond;
   11005   set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
   11006 
   11007   if (in_it_block ())
   11008     {
   11009       /* Conditional branches inside IT blocks are encoded as unconditional
   11010 	 branches.  */
   11011       cond = COND_ALWAYS;
   11012     }
   11013   else
   11014     cond = inst.cond;
   11015 
   11016   if (cond != COND_ALWAYS)
   11017     opcode = T_MNEM_bcond;
   11018   else
   11019     opcode = inst.instruction;
   11020 
   11021   if (unified_syntax
   11022       && (inst.size_req == 4
   11023 	  || (inst.size_req != 2
   11024 	      && (inst.operands[0].hasreloc
   11025 		  || inst.reloc.exp.X_op == O_constant))))
   11026     {
   11027       inst.instruction = THUMB_OP32(opcode);
   11028       if (cond == COND_ALWAYS)
   11029 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
   11030       else
   11031 	{
   11032 	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
   11033 		      _("selected architecture does not support "
   11034 			"wide conditional branch instruction"));
   11035 
   11036 	  gas_assert (cond != 0xF);
   11037 	  inst.instruction |= cond << 22;
   11038 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
   11039 	}
   11040     }
   11041   else
   11042     {
   11043       inst.instruction = THUMB_OP16(opcode);
   11044       if (cond == COND_ALWAYS)
   11045 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
   11046       else
   11047 	{
   11048 	  inst.instruction |= cond << 8;
   11049 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
   11050 	}
   11051       /* Allow section relaxation.  */
   11052       if (unified_syntax && inst.size_req != 2)
   11053 	inst.relax = opcode;
   11054     }
   11055   inst.reloc.type = reloc;
   11056   inst.reloc.pc_rel = 1;
   11057 }
   11058 
   11059 /* Actually do the work for Thumb state bkpt and hlt.  The only difference
   11060    between the two is the maximum immediate allowed - which is passed in
   11061    RANGE.  */
   11062 static void
   11063 do_t_bkpt_hlt1 (int range)
   11064 {
   11065   constraint (inst.cond != COND_ALWAYS,
   11066 	      _("instruction is always unconditional"));
   11067   if (inst.operands[0].present)
   11068     {
   11069       constraint (inst.operands[0].imm > range,
   11070 		  _("immediate value out of range"));
   11071       inst.instruction |= inst.operands[0].imm;
   11072     }
   11073 
   11074   set_it_insn_type (NEUTRAL_IT_INSN);
   11075 }
   11076 
   11077 static void
   11078 do_t_hlt (void)
   11079 {
   11080   do_t_bkpt_hlt1 (63);
   11081 }
   11082 
   11083 static void
   11084 do_t_bkpt (void)
   11085 {
   11086   do_t_bkpt_hlt1 (255);
   11087 }
   11088 
   11089 static void
   11090 do_t_branch23 (void)
   11091 {
   11092   set_it_insn_type_last ();
   11093   encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
   11094 
   11095   /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
   11096      this file.  We used to simply ignore the PLT reloc type here --
   11097      the branch encoding is now needed to deal with TLSCALL relocs.
   11098      So if we see a PLT reloc now, put it back to how it used to be to
   11099      keep the preexisting behaviour.  */
   11100   if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
   11101     inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
   11102 
   11103 #if defined(OBJ_COFF)
   11104   /* If the destination of the branch is a defined symbol which does not have
   11105      the THUMB_FUNC attribute, then we must be calling a function which has
   11106      the (interfacearm) attribute.  We look for the Thumb entry point to that
   11107      function and change the branch to refer to that function instead.	*/
   11108   if (	 inst.reloc.exp.X_op == O_symbol
   11109       && inst.reloc.exp.X_add_symbol != NULL
   11110       && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
   11111       && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
   11112     inst.reloc.exp.X_add_symbol =
   11113       find_real_start (inst.reloc.exp.X_add_symbol);
   11114 #endif
   11115 }
   11116 
   11117 static void
   11118 do_t_bx (void)
   11119 {
   11120   set_it_insn_type_last ();
   11121   inst.instruction |= inst.operands[0].reg << 3;
   11122   /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
   11123      should cause the alignment to be checked once it is known.	 This is
   11124      because BX PC only works if the instruction is word aligned.  */
   11125 }
   11126 
   11127 static void
   11128 do_t_bxj (void)
   11129 {
   11130   int Rm;
   11131 
   11132   set_it_insn_type_last ();
   11133   Rm = inst.operands[0].reg;
   11134   reject_bad_reg (Rm);
   11135   inst.instruction |= Rm << 16;
   11136 }
   11137 
   11138 static void
   11139 do_t_clz (void)
   11140 {
   11141   unsigned Rd;
   11142   unsigned Rm;
   11143 
   11144   Rd = inst.operands[0].reg;
   11145   Rm = inst.operands[1].reg;
   11146 
   11147   reject_bad_reg (Rd);
   11148   reject_bad_reg (Rm);
   11149 
   11150   inst.instruction |= Rd << 8;
   11151   inst.instruction |= Rm << 16;
   11152   inst.instruction |= Rm;
   11153 }
   11154 
   11155 static void
   11156 do_t_cps (void)
   11157 {
   11158   set_it_insn_type (OUTSIDE_IT_INSN);
   11159   inst.instruction |= inst.operands[0].imm;
   11160 }
   11161 
   11162 static void
   11163 do_t_cpsi (void)
   11164 {
   11165   set_it_insn_type (OUTSIDE_IT_INSN);
   11166   if (unified_syntax
   11167       && (inst.operands[1].present || inst.size_req == 4)
   11168       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
   11169     {
   11170       unsigned int imod = (inst.instruction & 0x0030) >> 4;
   11171       inst.instruction = 0xf3af8000;
   11172       inst.instruction |= imod << 9;
   11173       inst.instruction |= inst.operands[0].imm << 5;
   11174       if (inst.operands[1].present)
   11175 	inst.instruction |= 0x100 | inst.operands[1].imm;
   11176     }
   11177   else
   11178     {
   11179       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
   11180 		  && (inst.operands[0].imm & 4),
   11181 		  _("selected processor does not support 'A' form "
   11182 		    "of this instruction"));
   11183       constraint (inst.operands[1].present || inst.size_req == 4,
   11184 		  _("Thumb does not support the 2-argument "
   11185 		    "form of this instruction"));
   11186       inst.instruction |= inst.operands[0].imm;
   11187     }
   11188 }
   11189 
   11190 /* THUMB CPY instruction (argument parse).  */
   11191 
   11192 static void
   11193 do_t_cpy (void)
   11194 {
   11195   if (inst.size_req == 4)
   11196     {
   11197       inst.instruction = THUMB_OP32 (T_MNEM_mov);
   11198       inst.instruction |= inst.operands[0].reg << 8;
   11199       inst.instruction |= inst.operands[1].reg;
   11200     }
   11201   else
   11202     {
   11203       inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
   11204       inst.instruction |= (inst.operands[0].reg & 0x7);
   11205       inst.instruction |= inst.operands[1].reg << 3;
   11206     }
   11207 }
   11208 
   11209 static void
   11210 do_t_cbz (void)
   11211 {
   11212   set_it_insn_type (OUTSIDE_IT_INSN);
   11213   constraint (inst.operands[0].reg > 7, BAD_HIREG);
   11214   inst.instruction |= inst.operands[0].reg;
   11215   inst.reloc.pc_rel = 1;
   11216   inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
   11217 }
   11218 
   11219 static void
   11220 do_t_dbg (void)
   11221 {
   11222   inst.instruction |= inst.operands[0].imm;
   11223 }
   11224 
   11225 static void
   11226 do_t_div (void)
   11227 {
   11228   unsigned Rd, Rn, Rm;
   11229 
   11230   Rd = inst.operands[0].reg;
   11231   Rn = (inst.operands[1].present
   11232 	? inst.operands[1].reg : Rd);
   11233   Rm = inst.operands[2].reg;
   11234 
   11235   reject_bad_reg (Rd);
   11236   reject_bad_reg (Rn);
   11237   reject_bad_reg (Rm);
   11238 
   11239   inst.instruction |= Rd << 8;
   11240   inst.instruction |= Rn << 16;
   11241   inst.instruction |= Rm;
   11242 }
   11243 
   11244 static void
   11245 do_t_hint (void)
   11246 {
   11247   if (unified_syntax && inst.size_req == 4)
   11248     inst.instruction = THUMB_OP32 (inst.instruction);
   11249   else
   11250     inst.instruction = THUMB_OP16 (inst.instruction);
   11251 }
   11252 
   11253 static void
   11254 do_t_it (void)
   11255 {
   11256   unsigned int cond = inst.operands[0].imm;
   11257 
   11258   set_it_insn_type (IT_INSN);
   11259   now_it.mask = (inst.instruction & 0xf) | 0x10;
   11260   now_it.cc = cond;
   11261   now_it.warn_deprecated = FALSE;
   11262 
   11263   /* If the condition is a negative condition, invert the mask.  */
   11264   if ((cond & 0x1) == 0x0)
   11265     {
   11266       unsigned int mask = inst.instruction & 0x000f;
   11267 
   11268       if ((mask & 0x7) == 0)
   11269 	{
   11270 	  /* No conversion needed.  */
   11271 	  now_it.block_length = 1;
   11272 	}
   11273       else if ((mask & 0x3) == 0)
   11274 	{
   11275 	  mask ^= 0x8;
   11276 	  now_it.block_length = 2;
   11277 	}
   11278       else if ((mask & 0x1) == 0)
   11279 	{
   11280 	  mask ^= 0xC;
   11281 	  now_it.block_length = 3;
   11282 	}
   11283       else
   11284 	{
   11285 	  mask ^= 0xE;
   11286 	  now_it.block_length = 4;
   11287 	}
   11288 
   11289       inst.instruction &= 0xfff0;
   11290       inst.instruction |= mask;
   11291     }
   11292 
   11293   inst.instruction |= cond << 4;
   11294 }
   11295 
   11296 /* Helper function used for both push/pop and ldm/stm.  */
   11297 static void
   11298 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
   11299 {
   11300   bfd_boolean load;
   11301 
   11302   load = (inst.instruction & (1 << 20)) != 0;
   11303 
   11304   if (mask & (1 << 13))
   11305     inst.error =  _("SP not allowed in register list");
   11306 
   11307   if ((mask & (1 << base)) != 0
   11308       && writeback)
   11309     inst.error = _("having the base register in the register list when "
   11310 		   "using write back is UNPREDICTABLE");
   11311 
   11312   if (load)
   11313     {
   11314       if (mask & (1 << 15))
   11315 	{
   11316 	  if (mask & (1 << 14))
   11317 	    inst.error = _("LR and PC should not both be in register list");
   11318 	  else
   11319 	    set_it_insn_type_last ();
   11320 	}
   11321     }
   11322   else
   11323     {
   11324       if (mask & (1 << 15))
   11325 	inst.error = _("PC not allowed in register list");
   11326     }
   11327 
   11328   if ((mask & (mask - 1)) == 0)
   11329     {
   11330       /* Single register transfers implemented as str/ldr.  */
   11331       if (writeback)
   11332 	{
   11333 	  if (inst.instruction & (1 << 23))
   11334 	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
   11335 	  else
   11336 	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
   11337 	}
   11338       else
   11339 	{
   11340 	  if (inst.instruction & (1 << 23))
   11341 	    inst.instruction = 0x00800000; /* ia -> [base] */
   11342 	  else
   11343 	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
   11344 	}
   11345 
   11346       inst.instruction |= 0xf8400000;
   11347       if (load)
   11348 	inst.instruction |= 0x00100000;
   11349 
   11350       mask = ffs (mask) - 1;
   11351       mask <<= 12;
   11352     }
   11353   else if (writeback)
   11354     inst.instruction |= WRITE_BACK;
   11355 
   11356   inst.instruction |= mask;
   11357   inst.instruction |= base << 16;
   11358 }
   11359 
   11360 static void
   11361 do_t_ldmstm (void)
   11362 {
   11363   /* This really doesn't seem worth it.  */
   11364   constraint (inst.reloc.type != BFD_RELOC_UNUSED,
   11365 	      _("expression too complex"));
   11366   constraint (inst.operands[1].writeback,
   11367 	      _("Thumb load/store multiple does not support {reglist}^"));
   11368 
   11369   if (unified_syntax)
   11370     {
   11371       bfd_boolean narrow;
   11372       unsigned mask;
   11373 
   11374       narrow = FALSE;
   11375       /* See if we can use a 16-bit instruction.  */
   11376       if (inst.instruction < 0xffff /* not ldmdb/stmdb */
   11377 	  && inst.size_req != 4
   11378 	  && !(inst.operands[1].imm & ~0xff))
   11379 	{
   11380 	  mask = 1 << inst.operands[0].reg;
   11381 
   11382 	  if (inst.operands[0].reg <= 7)
   11383 	    {
   11384 	      if (inst.instruction == T_MNEM_stmia
   11385 		  ? inst.operands[0].writeback
   11386 		  : (inst.operands[0].writeback
   11387 		     == !(inst.operands[1].imm & mask)))
   11388 		{
   11389 		  if (inst.instruction == T_MNEM_stmia
   11390 		      && (inst.operands[1].imm & mask)
   11391 		      && (inst.operands[1].imm & (mask - 1)))
   11392 		    as_warn (_("value stored for r%d is UNKNOWN"),
   11393 			     inst.operands[0].reg);
   11394 
   11395 		  inst.instruction = THUMB_OP16 (inst.instruction);
   11396 		  inst.instruction |= inst.operands[0].reg << 8;
   11397 		  inst.instruction |= inst.operands[1].imm;
   11398 		  narrow = TRUE;
   11399 		}
   11400 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
   11401 		{
   11402 		  /* This means 1 register in reg list one of 3 situations:
   11403 		     1. Instruction is stmia, but without writeback.
   11404 		     2. lmdia without writeback, but with Rn not in
   11405 			reglist.
   11406 		     3. ldmia with writeback, but with Rn in reglist.
   11407 		     Case 3 is UNPREDICTABLE behaviour, so we handle
   11408 		     case 1 and 2 which can be converted into a 16-bit
   11409 		     str or ldr. The SP cases are handled below.  */
   11410 		  unsigned long opcode;
   11411 		  /* First, record an error for Case 3.  */
   11412 		  if (inst.operands[1].imm & mask
   11413 		      && inst.operands[0].writeback)
   11414 		    inst.error =
   11415 			_("having the base register in the register list when "
   11416 			  "using write back is UNPREDICTABLE");
   11417 
   11418 		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
   11419 							     : T_MNEM_ldr);
   11420 		  inst.instruction = THUMB_OP16 (opcode);
   11421 		  inst.instruction |= inst.operands[0].reg << 3;
   11422 		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
   11423 		  narrow = TRUE;
   11424 		}
   11425 	    }
   11426 	  else if (inst.operands[0] .reg == REG_SP)
   11427 	    {
   11428 	      if (inst.operands[0].writeback)
   11429 		{
   11430 		  inst.instruction =
   11431 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
   11432 				    ? T_MNEM_push : T_MNEM_pop);
   11433 		  inst.instruction |= inst.operands[1].imm;
   11434 		  narrow = TRUE;
   11435 		}
   11436 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
   11437 		{
   11438 		  inst.instruction =
   11439 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
   11440 				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
   11441 		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
   11442 		  narrow = TRUE;
   11443 		}
   11444 	    }
   11445 	}
   11446 
   11447       if (!narrow)
   11448 	{
   11449 	  if (inst.instruction < 0xffff)
   11450 	    inst.instruction = THUMB_OP32 (inst.instruction);
   11451 
   11452 	  encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
   11453 				inst.operands[0].writeback);
   11454 	}
   11455     }
   11456   else
   11457     {
   11458       constraint (inst.operands[0].reg > 7
   11459 		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
   11460       constraint (inst.instruction != T_MNEM_ldmia
   11461 		  && inst.instruction != T_MNEM_stmia,
   11462 		  _("Thumb-2 instruction only valid in unified syntax"));
   11463       if (inst.instruction == T_MNEM_stmia)
   11464 	{
   11465 	  if (!inst.operands[0].writeback)
   11466 	    as_warn (_("this instruction will write back the base register"));
   11467 	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
   11468 	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
   11469 	    as_warn (_("value stored for r%d is UNKNOWN"),
   11470 		     inst.operands[0].reg);
   11471 	}
   11472       else
   11473 	{
   11474 	  if (!inst.operands[0].writeback
   11475 	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
   11476 	    as_warn (_("this instruction will write back the base register"));
   11477 	  else if (inst.operands[0].writeback
   11478 		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
   11479 	    as_warn (_("this instruction will not write back the base register"));
   11480 	}
   11481 
   11482       inst.instruction = THUMB_OP16 (inst.instruction);
   11483       inst.instruction |= inst.operands[0].reg << 8;
   11484       inst.instruction |= inst.operands[1].imm;
   11485     }
   11486 }
   11487 
   11488 static void
   11489 do_t_ldrex (void)
   11490 {
   11491   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
   11492 	      || inst.operands[1].postind || inst.operands[1].writeback
   11493 	      || inst.operands[1].immisreg || inst.operands[1].shifted
   11494 	      || inst.operands[1].negative,
   11495 	      BAD_ADDR_MODE);
   11496 
   11497   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
   11498 
   11499   inst.instruction |= inst.operands[0].reg << 12;
   11500   inst.instruction |= inst.operands[1].reg << 16;
   11501   inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
   11502 }
   11503 
   11504 static void
   11505 do_t_ldrexd (void)
   11506 {
   11507   if (!inst.operands[1].present)
   11508     {
   11509       constraint (inst.operands[0].reg == REG_LR,
   11510 		  _("r14 not allowed as first register "
   11511 		    "when second register is omitted"));
   11512       inst.operands[1].reg = inst.operands[0].reg + 1;
   11513     }
   11514   constraint (inst.operands[0].reg == inst.operands[1].reg,
   11515 	      BAD_OVERLAP);
   11516 
   11517   inst.instruction |= inst.operands[0].reg << 12;
   11518   inst.instruction |= inst.operands[1].reg << 8;
   11519   inst.instruction |= inst.operands[2].reg << 16;
   11520 }
   11521 
   11522 static void
   11523 do_t_ldst (void)
   11524 {
   11525   unsigned long opcode;
   11526   int Rn;
   11527 
   11528   if (inst.operands[0].isreg
   11529       && !inst.operands[0].preind
   11530       && inst.operands[0].reg == REG_PC)
   11531     set_it_insn_type_last ();
   11532 
   11533   opcode = inst.instruction;
   11534   if (unified_syntax)
   11535     {
   11536       if (!inst.operands[1].isreg)
   11537 	{
   11538 	  if (opcode <= 0xffff)
   11539 	    inst.instruction = THUMB_OP32 (opcode);
   11540 	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
   11541 	    return;
   11542 	}
   11543       if (inst.operands[1].isreg
   11544 	  && !inst.operands[1].writeback
   11545 	  && !inst.operands[1].shifted && !inst.operands[1].postind
   11546 	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
   11547 	  && opcode <= 0xffff
   11548 	  && inst.size_req != 4)
   11549 	{
   11550 	  /* Insn may have a 16-bit form.  */
   11551 	  Rn = inst.operands[1].reg;
   11552 	  if (inst.operands[1].immisreg)
   11553 	    {
   11554 	      inst.instruction = THUMB_OP16 (opcode);
   11555 	      /* [Rn, Rik] */
   11556 	      if (Rn <= 7 && inst.operands[1].imm <= 7)
   11557 		goto op16;
   11558 	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
   11559 		reject_bad_reg (inst.operands[1].imm);
   11560 	    }
   11561 	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
   11562 		    && opcode != T_MNEM_ldrsb)
   11563 		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
   11564 		   || (Rn == REG_SP && opcode == T_MNEM_str))
   11565 	    {
   11566 	      /* [Rn, #const] */
   11567 	      if (Rn > 7)
   11568 		{
   11569 		  if (Rn == REG_PC)
   11570 		    {
   11571 		      if (inst.reloc.pc_rel)
   11572 			opcode = T_MNEM_ldr_pc2;
   11573 		      else
   11574 			opcode = T_MNEM_ldr_pc;
   11575 		    }
   11576 		  else
   11577 		    {
   11578 		      if (opcode == T_MNEM_ldr)
   11579 			opcode = T_MNEM_ldr_sp;
   11580 		      else
   11581 			opcode = T_MNEM_str_sp;
   11582 		    }
   11583 		  inst.instruction = inst.operands[0].reg << 8;
   11584 		}
   11585 	      else
   11586 		{
   11587 		  inst.instruction = inst.operands[0].reg;
   11588 		  inst.instruction |= inst.operands[1].reg << 3;
   11589 		}
   11590 	      inst.instruction |= THUMB_OP16 (opcode);
   11591 	      if (inst.size_req == 2)
   11592 		inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11593 	      else
   11594 		inst.relax = opcode;
   11595 	      return;
   11596 	    }
   11597 	}
   11598       /* Definitely a 32-bit variant.  */
   11599 
   11600       /* Warning for Erratum 752419.  */
   11601       if (opcode == T_MNEM_ldr
   11602 	  && inst.operands[0].reg == REG_SP
   11603 	  && inst.operands[1].writeback == 1
   11604 	  && !inst.operands[1].immisreg)
   11605 	{
   11606 	  if (no_cpu_selected ()
   11607 	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
   11608 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
   11609 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
   11610 	    as_warn (_("This instruction may be unpredictable "
   11611 		       "if executed on M-profile cores "
   11612 		       "with interrupts enabled."));
   11613 	}
   11614 
   11615       /* Do some validations regarding addressing modes.  */
   11616       if (inst.operands[1].immisreg)
   11617 	reject_bad_reg (inst.operands[1].imm);
   11618 
   11619       constraint (inst.operands[1].writeback == 1
   11620 		  && inst.operands[0].reg == inst.operands[1].reg,
   11621 		  BAD_OVERLAP);
   11622 
   11623       inst.instruction = THUMB_OP32 (opcode);
   11624       inst.instruction |= inst.operands[0].reg << 12;
   11625       encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
   11626       check_ldr_r15_aligned ();
   11627       return;
   11628     }
   11629 
   11630   constraint (inst.operands[0].reg > 7, BAD_HIREG);
   11631 
   11632   if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
   11633     {
   11634       /* Only [Rn,Rm] is acceptable.  */
   11635       constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
   11636       constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
   11637 		  || inst.operands[1].postind || inst.operands[1].shifted
   11638 		  || inst.operands[1].negative,
   11639 		  _("Thumb does not support this addressing mode"));
   11640       inst.instruction = THUMB_OP16 (inst.instruction);
   11641       goto op16;
   11642     }
   11643 
   11644   inst.instruction = THUMB_OP16 (inst.instruction);
   11645   if (!inst.operands[1].isreg)
   11646     if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
   11647       return;
   11648 
   11649   constraint (!inst.operands[1].preind
   11650 	      || inst.operands[1].shifted
   11651 	      || inst.operands[1].writeback,
   11652 	      _("Thumb does not support this addressing mode"));
   11653   if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
   11654     {
   11655       constraint (inst.instruction & 0x0600,
   11656 		  _("byte or halfword not valid for base register"));
   11657       constraint (inst.operands[1].reg == REG_PC
   11658 		  && !(inst.instruction & THUMB_LOAD_BIT),
   11659 		  _("r15 based store not allowed"));
   11660       constraint (inst.operands[1].immisreg,
   11661 		  _("invalid base register for register offset"));
   11662 
   11663       if (inst.operands[1].reg == REG_PC)
   11664 	inst.instruction = T_OPCODE_LDR_PC;
   11665       else if (inst.instruction & THUMB_LOAD_BIT)
   11666 	inst.instruction = T_OPCODE_LDR_SP;
   11667       else
   11668 	inst.instruction = T_OPCODE_STR_SP;
   11669 
   11670       inst.instruction |= inst.operands[0].reg << 8;
   11671       inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11672       return;
   11673     }
   11674 
   11675   constraint (inst.operands[1].reg > 7, BAD_HIREG);
   11676   if (!inst.operands[1].immisreg)
   11677     {
   11678       /* Immediate offset.  */
   11679       inst.instruction |= inst.operands[0].reg;
   11680       inst.instruction |= inst.operands[1].reg << 3;
   11681       inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
   11682       return;
   11683     }
   11684 
   11685   /* Register offset.  */
   11686   constraint (inst.operands[1].imm > 7, BAD_HIREG);
   11687   constraint (inst.operands[1].negative,
   11688 	      _("Thumb does not support this addressing mode"));
   11689 
   11690  op16:
   11691   switch (inst.instruction)
   11692     {
   11693     case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
   11694     case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
   11695     case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
   11696     case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
   11697     case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
   11698     case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
   11699     case 0x5600 /* ldrsb */:
   11700     case 0x5e00 /* ldrsh */: break;
   11701     default: abort ();
   11702     }
   11703 
   11704   inst.instruction |= inst.operands[0].reg;
   11705   inst.instruction |= inst.operands[1].reg << 3;
   11706   inst.instruction |= inst.operands[1].imm << 6;
   11707 }
   11708 
   11709 static void
   11710 do_t_ldstd (void)
   11711 {
   11712   if (!inst.operands[1].present)
   11713     {
   11714       inst.operands[1].reg = inst.operands[0].reg + 1;
   11715       constraint (inst.operands[0].reg == REG_LR,
   11716 		  _("r14 not allowed here"));
   11717       constraint (inst.operands[0].reg == REG_R12,
   11718 		  _("r12 not allowed here"));
   11719     }
   11720 
   11721   if (inst.operands[2].writeback
   11722       && (inst.operands[0].reg == inst.operands[2].reg
   11723       || inst.operands[1].reg == inst.operands[2].reg))
   11724     as_warn (_("base register written back, and overlaps "
   11725 	       "one of transfer registers"));
   11726 
   11727   inst.instruction |= inst.operands[0].reg << 12;
   11728   inst.instruction |= inst.operands[1].reg << 8;
   11729   encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
   11730 }
   11731 
   11732 static void
   11733 do_t_ldstt (void)
   11734 {
   11735   inst.instruction |= inst.operands[0].reg << 12;
   11736   encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
   11737 }
   11738 
   11739 static void
   11740 do_t_mla (void)
   11741 {
   11742   unsigned Rd, Rn, Rm, Ra;
   11743 
   11744   Rd = inst.operands[0].reg;
   11745   Rn = inst.operands[1].reg;
   11746   Rm = inst.operands[2].reg;
   11747   Ra = inst.operands[3].reg;
   11748 
   11749   reject_bad_reg (Rd);
   11750   reject_bad_reg (Rn);
   11751   reject_bad_reg (Rm);
   11752   reject_bad_reg (Ra);
   11753 
   11754   inst.instruction |= Rd << 8;
   11755   inst.instruction |= Rn << 16;
   11756   inst.instruction |= Rm;
   11757   inst.instruction |= Ra << 12;
   11758 }
   11759 
   11760 static void
   11761 do_t_mlal (void)
   11762 {
   11763   unsigned RdLo, RdHi, Rn, Rm;
   11764 
   11765   RdLo = inst.operands[0].reg;
   11766   RdHi = inst.operands[1].reg;
   11767   Rn = inst.operands[2].reg;
   11768   Rm = inst.operands[3].reg;
   11769 
   11770   reject_bad_reg (RdLo);
   11771   reject_bad_reg (RdHi);
   11772   reject_bad_reg (Rn);
   11773   reject_bad_reg (Rm);
   11774 
   11775   inst.instruction |= RdLo << 12;
   11776   inst.instruction |= RdHi << 8;
   11777   inst.instruction |= Rn << 16;
   11778   inst.instruction |= Rm;
   11779 }
   11780 
   11781 static void
   11782 do_t_mov_cmp (void)
   11783 {
   11784   unsigned Rn, Rm;
   11785 
   11786   Rn = inst.operands[0].reg;
   11787   Rm = inst.operands[1].reg;
   11788 
   11789   if (Rn == REG_PC)
   11790     set_it_insn_type_last ();
   11791 
   11792   if (unified_syntax)
   11793     {
   11794       int r0off = (inst.instruction == T_MNEM_mov
   11795 		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
   11796       unsigned long opcode;
   11797       bfd_boolean narrow;
   11798       bfd_boolean low_regs;
   11799 
   11800       low_regs = (Rn <= 7 && Rm <= 7);
   11801       opcode = inst.instruction;
   11802       if (in_it_block ())
   11803 	narrow = opcode != T_MNEM_movs;
   11804       else
   11805 	narrow = opcode != T_MNEM_movs || low_regs;
   11806       if (inst.size_req == 4
   11807 	  || inst.operands[1].shifted)
   11808 	narrow = FALSE;
   11809 
   11810       /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
   11811       if (opcode == T_MNEM_movs && inst.operands[1].isreg
   11812 	  && !inst.operands[1].shifted
   11813 	  && Rn == REG_PC
   11814 	  && Rm == REG_LR)
   11815 	{
   11816 	  inst.instruction = T2_SUBS_PC_LR;
   11817 	  return;
   11818 	}
   11819 
   11820       if (opcode == T_MNEM_cmp)
   11821 	{
   11822 	  constraint (Rn == REG_PC, BAD_PC);
   11823 	  if (narrow)
   11824 	    {
   11825 	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
   11826 		 but valid.  */
   11827 	      warn_deprecated_sp (Rm);
   11828 	      /* R15 was documented as a valid choice for Rm in ARMv6,
   11829 		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
   11830 		 tools reject R15, so we do too.  */
   11831 	      constraint (Rm == REG_PC, BAD_PC);
   11832 	    }
   11833 	  else
   11834 	    reject_bad_reg (Rm);
   11835 	}
   11836       else if (opcode == T_MNEM_mov
   11837 	       || opcode == T_MNEM_movs)
   11838 	{
   11839 	  if (inst.operands[1].isreg)
   11840 	    {
   11841 	      if (opcode == T_MNEM_movs)
   11842 		{
   11843 		  reject_bad_reg (Rn);
   11844 		  reject_bad_reg (Rm);
   11845 		}
   11846 	      else if (narrow)
   11847 		{
   11848 		  /* This is mov.n.  */
   11849 		  if ((Rn == REG_SP || Rn == REG_PC)
   11850 		      && (Rm == REG_SP || Rm == REG_PC))
   11851 		    {
   11852 		      as_tsktsk (_("Use of r%u as a source register is "
   11853 				 "deprecated when r%u is the destination "
   11854 				 "register."), Rm, Rn);
   11855 		    }
   11856 		}
   11857 	      else
   11858 		{
   11859 		  /* This is mov.w.  */
   11860 		  constraint (Rn == REG_PC, BAD_PC);
   11861 		  constraint (Rm == REG_PC, BAD_PC);
   11862 		  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
   11863 		}
   11864 	    }
   11865 	  else
   11866 	    reject_bad_reg (Rn);
   11867 	}
   11868 
   11869       if (!inst.operands[1].isreg)
   11870 	{
   11871 	  /* Immediate operand.  */
   11872 	  if (!in_it_block () && opcode == T_MNEM_mov)
   11873 	    narrow = 0;
   11874 	  if (low_regs && narrow)
   11875 	    {
   11876 	      inst.instruction = THUMB_OP16 (opcode);
   11877 	      inst.instruction |= Rn << 8;
   11878 	      if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   11879 		  || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
   11880 		{
   11881 		  if (inst.size_req == 2)
   11882 		    inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
   11883 		  else
   11884 		    inst.relax = opcode;
   11885 		}
   11886 	    }
   11887 	  else
   11888 	    {
   11889 	      constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
   11890 			  && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
   11891 			  THUMB1_RELOC_ONLY);
   11892 
   11893 	      inst.instruction = THUMB_OP32 (inst.instruction);
   11894 	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   11895 	      inst.instruction |= Rn << r0off;
   11896 	      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   11897 	    }
   11898 	}
   11899       else if (inst.operands[1].shifted && inst.operands[1].immisreg
   11900 	       && (inst.instruction == T_MNEM_mov
   11901 		   || inst.instruction == T_MNEM_movs))
   11902 	{
   11903 	  /* Register shifts are encoded as separate shift instructions.  */
   11904 	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
   11905 
   11906 	  if (in_it_block ())
   11907 	    narrow = !flags;
   11908 	  else
   11909 	    narrow = flags;
   11910 
   11911 	  if (inst.size_req == 4)
   11912 	    narrow = FALSE;
   11913 
   11914 	  if (!low_regs || inst.operands[1].imm > 7)
   11915 	    narrow = FALSE;
   11916 
   11917 	  if (Rn != Rm)
   11918 	    narrow = FALSE;
   11919 
   11920 	  switch (inst.operands[1].shift_kind)
   11921 	    {
   11922 	    case SHIFT_LSL:
   11923 	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
   11924 	      break;
   11925 	    case SHIFT_ASR:
   11926 	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
   11927 	      break;
   11928 	    case SHIFT_LSR:
   11929 	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
   11930 	      break;
   11931 	    case SHIFT_ROR:
   11932 	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
   11933 	      break;
   11934 	    default:
   11935 	      abort ();
   11936 	    }
   11937 
   11938 	  inst.instruction = opcode;
   11939 	  if (narrow)
   11940 	    {
   11941 	      inst.instruction |= Rn;
   11942 	      inst.instruction |= inst.operands[1].imm << 3;
   11943 	    }
   11944 	  else
   11945 	    {
   11946 	      if (flags)
   11947 		inst.instruction |= CONDS_BIT;
   11948 
   11949 	      inst.instruction |= Rn << 8;
   11950 	      inst.instruction |= Rm << 16;
   11951 	      inst.instruction |= inst.operands[1].imm;
   11952 	    }
   11953 	}
   11954       else if (!narrow)
   11955 	{
   11956 	  /* Some mov with immediate shift have narrow variants.
   11957 	     Register shifts are handled above.  */
   11958 	  if (low_regs && inst.operands[1].shifted
   11959 	      && (inst.instruction == T_MNEM_mov
   11960 		  || inst.instruction == T_MNEM_movs))
   11961 	    {
   11962 	      if (in_it_block ())
   11963 		narrow = (inst.instruction == T_MNEM_mov);
   11964 	      else
   11965 		narrow = (inst.instruction == T_MNEM_movs);
   11966 	    }
   11967 
   11968 	  if (narrow)
   11969 	    {
   11970 	      switch (inst.operands[1].shift_kind)
   11971 		{
   11972 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
   11973 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
   11974 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
   11975 		default: narrow = FALSE; break;
   11976 		}
   11977 	    }
   11978 
   11979 	  if (narrow)
   11980 	    {
   11981 	      inst.instruction |= Rn;
   11982 	      inst.instruction |= Rm << 3;
   11983 	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   11984 	    }
   11985 	  else
   11986 	    {
   11987 	      inst.instruction = THUMB_OP32 (inst.instruction);
   11988 	      inst.instruction |= Rn << r0off;
   11989 	      encode_thumb32_shifted_operand (1);
   11990 	    }
   11991 	}
   11992       else
   11993 	switch (inst.instruction)
   11994 	  {
   11995 	  case T_MNEM_mov:
   11996 	    /* In v4t or v5t a move of two lowregs produces unpredictable
   11997 	       results. Don't allow this.  */
   11998 	    if (low_regs)
   11999 	      {
   12000 /* Silence this error for now because clang generates "MOV" two low regs in
   12001    unified syntax for thumb1, and expects CPSR are not affected.  This check
   12002    doesn't exist in binutils-2.21 with gcc 4.6.  The thumb1 code generated by
   12003    clang will continue to have problem running on v5t but not on v6 and beyond.
   12004 */
   12005 #if 0
   12006 		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
   12007 			    "MOV Rd, Rs with two low registers is not "
   12008 			    "permitted on this architecture");
   12009 #endif
   12010 		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
   12011 					arm_ext_v6);
   12012 	      }
   12013 
   12014 	    inst.instruction = T_OPCODE_MOV_HR;
   12015 	    inst.instruction |= (Rn & 0x8) << 4;
   12016 	    inst.instruction |= (Rn & 0x7);
   12017 	    inst.instruction |= Rm << 3;
   12018 	    break;
   12019 
   12020 	  case T_MNEM_movs:
   12021 	    /* We know we have low registers at this point.
   12022 	       Generate LSLS Rd, Rs, #0.  */
   12023 	    inst.instruction = T_OPCODE_LSL_I;
   12024 	    inst.instruction |= Rn;
   12025 	    inst.instruction |= Rm << 3;
   12026 	    break;
   12027 
   12028 	  case T_MNEM_cmp:
   12029 	    if (low_regs)
   12030 	      {
   12031 		inst.instruction = T_OPCODE_CMP_LR;
   12032 		inst.instruction |= Rn;
   12033 		inst.instruction |= Rm << 3;
   12034 	      }
   12035 	    else
   12036 	      {
   12037 		inst.instruction = T_OPCODE_CMP_HR;
   12038 		inst.instruction |= (Rn & 0x8) << 4;
   12039 		inst.instruction |= (Rn & 0x7);
   12040 		inst.instruction |= Rm << 3;
   12041 	      }
   12042 	    break;
   12043 	  }
   12044       return;
   12045     }
   12046 
   12047   inst.instruction = THUMB_OP16 (inst.instruction);
   12048 
   12049   /* PR 10443: Do not silently ignore shifted operands.  */
   12050   constraint (inst.operands[1].shifted,
   12051 	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
   12052 
   12053   if (inst.operands[1].isreg)
   12054     {
   12055       if (Rn < 8 && Rm < 8)
   12056 	{
   12057 	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
   12058 	     since a MOV instruction produces unpredictable results.  */
   12059 	  if (inst.instruction == T_OPCODE_MOV_I8)
   12060 	    inst.instruction = T_OPCODE_ADD_I3;
   12061 	  else
   12062 	    inst.instruction = T_OPCODE_CMP_LR;
   12063 
   12064 	  inst.instruction |= Rn;
   12065 	  inst.instruction |= Rm << 3;
   12066 	}
   12067       else
   12068 	{
   12069 	  if (inst.instruction == T_OPCODE_MOV_I8)
   12070 	    inst.instruction = T_OPCODE_MOV_HR;
   12071 	  else
   12072 	    inst.instruction = T_OPCODE_CMP_HR;
   12073 	  do_t_cpy ();
   12074 	}
   12075     }
   12076   else
   12077     {
   12078       constraint (Rn > 7,
   12079 		  _("only lo regs allowed with immediate"));
   12080       inst.instruction |= Rn << 8;
   12081       inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
   12082     }
   12083 }
   12084 
   12085 static void
   12086 do_t_mov16 (void)
   12087 {
   12088   unsigned Rd;
   12089   bfd_vma imm;
   12090   bfd_boolean top;
   12091 
   12092   top = (inst.instruction & 0x00800000) != 0;
   12093   if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
   12094     {
   12095       constraint (top, _(":lower16: not allowed this instruction"));
   12096       inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
   12097     }
   12098   else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
   12099     {
   12100       constraint (!top, _(":upper16: not allowed this instruction"));
   12101       inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
   12102     }
   12103 
   12104   Rd = inst.operands[0].reg;
   12105   reject_bad_reg (Rd);
   12106 
   12107   inst.instruction |= Rd << 8;
   12108   if (inst.reloc.type == BFD_RELOC_UNUSED)
   12109     {
   12110       imm = inst.reloc.exp.X_add_number;
   12111       inst.instruction |= (imm & 0xf000) << 4;
   12112       inst.instruction |= (imm & 0x0800) << 15;
   12113       inst.instruction |= (imm & 0x0700) << 4;
   12114       inst.instruction |= (imm & 0x00ff);
   12115     }
   12116 }
   12117 
   12118 static void
   12119 do_t_mvn_tst (void)
   12120 {
   12121   unsigned Rn, Rm;
   12122 
   12123   Rn = inst.operands[0].reg;
   12124   Rm = inst.operands[1].reg;
   12125 
   12126   if (inst.instruction == T_MNEM_cmp
   12127       || inst.instruction == T_MNEM_cmn)
   12128     constraint (Rn == REG_PC, BAD_PC);
   12129   else
   12130     reject_bad_reg (Rn);
   12131   reject_bad_reg (Rm);
   12132 
   12133   if (unified_syntax)
   12134     {
   12135       int r0off = (inst.instruction == T_MNEM_mvn
   12136 		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
   12137       bfd_boolean narrow;
   12138 
   12139       if (inst.size_req == 4
   12140 	  || inst.instruction > 0xffff
   12141 	  || inst.operands[1].shifted
   12142 	  || Rn > 7 || Rm > 7)
   12143 	narrow = FALSE;
   12144       else if (inst.instruction == T_MNEM_cmn
   12145 	       || inst.instruction == T_MNEM_tst)
   12146 	narrow = TRUE;
   12147       else if (THUMB_SETS_FLAGS (inst.instruction))
   12148 	narrow = !in_it_block ();
   12149       else
   12150 	narrow = in_it_block ();
   12151 
   12152       if (!inst.operands[1].isreg)
   12153 	{
   12154 	  /* For an immediate, we always generate a 32-bit opcode;
   12155 	     section relaxation will shrink it later if possible.  */
   12156 	  if (inst.instruction < 0xffff)
   12157 	    inst.instruction = THUMB_OP32 (inst.instruction);
   12158 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   12159 	  inst.instruction |= Rn << r0off;
   12160 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   12161 	}
   12162       else
   12163 	{
   12164 	  /* See if we can do this with a 16-bit instruction.  */
   12165 	  if (narrow)
   12166 	    {
   12167 	      inst.instruction = THUMB_OP16 (inst.instruction);
   12168 	      inst.instruction |= Rn;
   12169 	      inst.instruction |= Rm << 3;
   12170 	    }
   12171 	  else
   12172 	    {
   12173 	      constraint (inst.operands[1].shifted
   12174 			  && inst.operands[1].immisreg,
   12175 			  _("shift must be constant"));
   12176 	      if (inst.instruction < 0xffff)
   12177 		inst.instruction = THUMB_OP32 (inst.instruction);
   12178 	      inst.instruction |= Rn << r0off;
   12179 	      encode_thumb32_shifted_operand (1);
   12180 	    }
   12181 	}
   12182     }
   12183   else
   12184     {
   12185       constraint (inst.instruction > 0xffff
   12186 		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
   12187       constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
   12188 		  _("unshifted register required"));
   12189       constraint (Rn > 7 || Rm > 7,
   12190 		  BAD_HIREG);
   12191 
   12192       inst.instruction = THUMB_OP16 (inst.instruction);
   12193       inst.instruction |= Rn;
   12194       inst.instruction |= Rm << 3;
   12195     }
   12196 }
   12197 
   12198 static void
   12199 do_t_mrs (void)
   12200 {
   12201   unsigned Rd;
   12202 
   12203   if (do_vfp_nsyn_mrs () == SUCCESS)
   12204     return;
   12205 
   12206   Rd = inst.operands[0].reg;
   12207   reject_bad_reg (Rd);
   12208   inst.instruction |= Rd << 8;
   12209 
   12210   if (inst.operands[1].isreg)
   12211     {
   12212       unsigned br = inst.operands[1].reg;
   12213       if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
   12214 	as_bad (_("bad register for mrs"));
   12215 
   12216       inst.instruction |= br & (0xf << 16);
   12217       inst.instruction |= (br & 0x300) >> 4;
   12218       inst.instruction |= (br & SPSR_BIT) >> 2;
   12219     }
   12220   else
   12221     {
   12222       int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
   12223 
   12224       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
   12225 	{
   12226 	  /* PR gas/12698:  The constraint is only applied for m_profile.
   12227 	     If the user has specified -march=all, we want to ignore it as
   12228 	     we are building for any CPU type, including non-m variants.  */
   12229 	  bfd_boolean m_profile =
   12230 	    !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
   12231 	  constraint ((flags != 0) && m_profile, _("selected processor does "
   12232 						   "not support requested special purpose register"));
   12233 	}
   12234       else
   12235 	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
   12236 	   devices).  */
   12237 	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
   12238 		    _("'APSR', 'CPSR' or 'SPSR' expected"));
   12239 
   12240       inst.instruction |= (flags & SPSR_BIT) >> 2;
   12241       inst.instruction |= inst.operands[1].imm & 0xff;
   12242       inst.instruction |= 0xf0000;
   12243     }
   12244 }
   12245 
   12246 static void
   12247 do_t_msr (void)
   12248 {
   12249   int flags;
   12250   unsigned Rn;
   12251 
   12252   if (do_vfp_nsyn_msr () == SUCCESS)
   12253     return;
   12254 
   12255   constraint (!inst.operands[1].isreg,
   12256 	      _("Thumb encoding does not support an immediate here"));
   12257 
   12258   if (inst.operands[0].isreg)
   12259     flags = (int)(inst.operands[0].reg);
   12260   else
   12261     flags = inst.operands[0].imm;
   12262 
   12263   if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
   12264     {
   12265       int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
   12266 
   12267       /* PR gas/12698:  The constraint is only applied for m_profile.
   12268 	 If the user has specified -march=all, we want to ignore it as
   12269 	 we are building for any CPU type, including non-m variants.  */
   12270       bfd_boolean m_profile =
   12271 	!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
   12272       constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
   12273 	   && (bits & ~(PSR_s | PSR_f)) != 0)
   12274 	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
   12275 	      && bits != PSR_f)) && m_profile,
   12276 	  _("selected processor does not support requested special "
   12277 	    "purpose register"));
   12278     }
   12279   else
   12280      constraint ((flags & 0xff) != 0, _("selected processor does not support "
   12281 		 "requested special purpose register"));
   12282 
   12283   Rn = inst.operands[1].reg;
   12284   reject_bad_reg (Rn);
   12285 
   12286   inst.instruction |= (flags & SPSR_BIT) >> 2;
   12287   inst.instruction |= (flags & 0xf0000) >> 8;
   12288   inst.instruction |= (flags & 0x300) >> 4;
   12289   inst.instruction |= (flags & 0xff);
   12290   inst.instruction |= Rn << 16;
   12291 }
   12292 
   12293 static void
   12294 do_t_mul (void)
   12295 {
   12296   bfd_boolean narrow;
   12297   unsigned Rd, Rn, Rm;
   12298 
   12299   if (!inst.operands[2].present)
   12300     inst.operands[2].reg = inst.operands[0].reg;
   12301 
   12302   Rd = inst.operands[0].reg;
   12303   Rn = inst.operands[1].reg;
   12304   Rm = inst.operands[2].reg;
   12305 
   12306   if (unified_syntax)
   12307     {
   12308       if (inst.size_req == 4
   12309 	  || (Rd != Rn
   12310 	      && Rd != Rm)
   12311 	  || Rn > 7
   12312 	  || Rm > 7)
   12313 	narrow = FALSE;
   12314       else if (inst.instruction == T_MNEM_muls)
   12315 	narrow = !in_it_block ();
   12316       else
   12317 	narrow = in_it_block ();
   12318     }
   12319   else
   12320     {
   12321       constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
   12322       constraint (Rn > 7 || Rm > 7,
   12323 		  BAD_HIREG);
   12324       narrow = TRUE;
   12325     }
   12326 
   12327   if (narrow)
   12328     {
   12329       /* 16-bit MULS/Conditional MUL.  */
   12330       inst.instruction = THUMB_OP16 (inst.instruction);
   12331       inst.instruction |= Rd;
   12332 
   12333       if (Rd == Rn)
   12334 	inst.instruction |= Rm << 3;
   12335       else if (Rd == Rm)
   12336 	inst.instruction |= Rn << 3;
   12337       else
   12338 	constraint (1, _("dest must overlap one source register"));
   12339     }
   12340   else
   12341     {
   12342       constraint (inst.instruction != T_MNEM_mul,
   12343 		  _("Thumb-2 MUL must not set flags"));
   12344       /* 32-bit MUL.  */
   12345       inst.instruction = THUMB_OP32 (inst.instruction);
   12346       inst.instruction |= Rd << 8;
   12347       inst.instruction |= Rn << 16;
   12348       inst.instruction |= Rm << 0;
   12349 
   12350       reject_bad_reg (Rd);
   12351       reject_bad_reg (Rn);
   12352       reject_bad_reg (Rm);
   12353     }
   12354 }
   12355 
   12356 static void
   12357 do_t_mull (void)
   12358 {
   12359   unsigned RdLo, RdHi, Rn, Rm;
   12360 
   12361   RdLo = inst.operands[0].reg;
   12362   RdHi = inst.operands[1].reg;
   12363   Rn = inst.operands[2].reg;
   12364   Rm = inst.operands[3].reg;
   12365 
   12366   reject_bad_reg (RdLo);
   12367   reject_bad_reg (RdHi);
   12368   reject_bad_reg (Rn);
   12369   reject_bad_reg (Rm);
   12370 
   12371   inst.instruction |= RdLo << 12;
   12372   inst.instruction |= RdHi << 8;
   12373   inst.instruction |= Rn << 16;
   12374   inst.instruction |= Rm;
   12375 
   12376  if (RdLo == RdHi)
   12377     as_tsktsk (_("rdhi and rdlo must be different"));
   12378 }
   12379 
   12380 static void
   12381 do_t_nop (void)
   12382 {
   12383   set_it_insn_type (NEUTRAL_IT_INSN);
   12384 
   12385   if (unified_syntax)
   12386     {
   12387       if (inst.size_req == 4 || inst.operands[0].imm > 15)
   12388 	{
   12389 	  inst.instruction = THUMB_OP32 (inst.instruction);
   12390 	  inst.instruction |= inst.operands[0].imm;
   12391 	}
   12392       else
   12393 	{
   12394 	  /* PR9722: Check for Thumb2 availability before
   12395 	     generating a thumb2 nop instruction.  */
   12396 	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
   12397 	    {
   12398 	      inst.instruction = THUMB_OP16 (inst.instruction);
   12399 	      inst.instruction |= inst.operands[0].imm << 4;
   12400 	    }
   12401 	  else
   12402 	    inst.instruction = 0x46c0;
   12403 	}
   12404     }
   12405   else
   12406     {
   12407       constraint (inst.operands[0].present,
   12408 		  _("Thumb does not support NOP with hints"));
   12409       inst.instruction = 0x46c0;
   12410     }
   12411 }
   12412 
   12413 static void
   12414 do_t_neg (void)
   12415 {
   12416   if (unified_syntax)
   12417     {
   12418       bfd_boolean narrow;
   12419 
   12420       if (THUMB_SETS_FLAGS (inst.instruction))
   12421 	narrow = !in_it_block ();
   12422       else
   12423 	narrow = in_it_block ();
   12424       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
   12425 	narrow = FALSE;
   12426       if (inst.size_req == 4)
   12427 	narrow = FALSE;
   12428 
   12429       if (!narrow)
   12430 	{
   12431 	  inst.instruction = THUMB_OP32 (inst.instruction);
   12432 	  inst.instruction |= inst.operands[0].reg << 8;
   12433 	  inst.instruction |= inst.operands[1].reg << 16;
   12434 	}
   12435       else
   12436 	{
   12437 	  inst.instruction = THUMB_OP16 (inst.instruction);
   12438 	  inst.instruction |= inst.operands[0].reg;
   12439 	  inst.instruction |= inst.operands[1].reg << 3;
   12440 	}
   12441     }
   12442   else
   12443     {
   12444       constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
   12445 		  BAD_HIREG);
   12446       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   12447 
   12448       inst.instruction = THUMB_OP16 (inst.instruction);
   12449       inst.instruction |= inst.operands[0].reg;
   12450       inst.instruction |= inst.operands[1].reg << 3;
   12451     }
   12452 }
   12453 
   12454 static void
   12455 do_t_orn (void)
   12456 {
   12457   unsigned Rd, Rn;
   12458 
   12459   Rd = inst.operands[0].reg;
   12460   Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
   12461 
   12462   reject_bad_reg (Rd);
   12463   /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
   12464   reject_bad_reg (Rn);
   12465 
   12466   inst.instruction |= Rd << 8;
   12467   inst.instruction |= Rn << 16;
   12468 
   12469   if (!inst.operands[2].isreg)
   12470     {
   12471       inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   12472       inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   12473     }
   12474   else
   12475     {
   12476       unsigned Rm;
   12477 
   12478       Rm = inst.operands[2].reg;
   12479       reject_bad_reg (Rm);
   12480 
   12481       constraint (inst.operands[2].shifted
   12482 		  && inst.operands[2].immisreg,
   12483 		  _("shift must be constant"));
   12484       encode_thumb32_shifted_operand (2);
   12485     }
   12486 }
   12487 
   12488 static void
   12489 do_t_pkhbt (void)
   12490 {
   12491   unsigned Rd, Rn, Rm;
   12492 
   12493   Rd = inst.operands[0].reg;
   12494   Rn = inst.operands[1].reg;
   12495   Rm = inst.operands[2].reg;
   12496 
   12497   reject_bad_reg (Rd);
   12498   reject_bad_reg (Rn);
   12499   reject_bad_reg (Rm);
   12500 
   12501   inst.instruction |= Rd << 8;
   12502   inst.instruction |= Rn << 16;
   12503   inst.instruction |= Rm;
   12504   if (inst.operands[3].present)
   12505     {
   12506       unsigned int val = inst.reloc.exp.X_add_number;
   12507       constraint (inst.reloc.exp.X_op != O_constant,
   12508 		  _("expression too complex"));
   12509       inst.instruction |= (val & 0x1c) << 10;
   12510       inst.instruction |= (val & 0x03) << 6;
   12511     }
   12512 }
   12513 
   12514 static void
   12515 do_t_pkhtb (void)
   12516 {
   12517   if (!inst.operands[3].present)
   12518     {
   12519       unsigned Rtmp;
   12520 
   12521       inst.instruction &= ~0x00000020;
   12522 
   12523       /* PR 10168.  Swap the Rm and Rn registers.  */
   12524       Rtmp = inst.operands[1].reg;
   12525       inst.operands[1].reg = inst.operands[2].reg;
   12526       inst.operands[2].reg = Rtmp;
   12527     }
   12528   do_t_pkhbt ();
   12529 }
   12530 
   12531 static void
   12532 do_t_pld (void)
   12533 {
   12534   if (inst.operands[0].immisreg)
   12535     reject_bad_reg (inst.operands[0].imm);
   12536 
   12537   encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
   12538 }
   12539 
   12540 static void
   12541 do_t_push_pop (void)
   12542 {
   12543   unsigned mask;
   12544 
   12545   constraint (inst.operands[0].writeback,
   12546 	      _("push/pop do not support {reglist}^"));
   12547   constraint (inst.reloc.type != BFD_RELOC_UNUSED,
   12548 	      _("expression too complex"));
   12549 
   12550   mask = inst.operands[0].imm;
   12551   if (inst.size_req != 4 && (mask & ~0xff) == 0)
   12552     inst.instruction = THUMB_OP16 (inst.instruction) | mask;
   12553   else if (inst.size_req != 4
   12554 	   && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
   12555 				       ? REG_LR : REG_PC)))
   12556     {
   12557       inst.instruction = THUMB_OP16 (inst.instruction);
   12558       inst.instruction |= THUMB_PP_PC_LR;
   12559       inst.instruction |= mask & 0xff;
   12560     }
   12561   else if (unified_syntax)
   12562     {
   12563       inst.instruction = THUMB_OP32 (inst.instruction);
   12564       encode_thumb2_ldmstm (13, mask, TRUE);
   12565     }
   12566   else
   12567     {
   12568       inst.error = _("invalid register list to push/pop instruction");
   12569       return;
   12570     }
   12571 }
   12572 
   12573 static void
   12574 do_t_rbit (void)
   12575 {
   12576   unsigned Rd, Rm;
   12577 
   12578   Rd = inst.operands[0].reg;
   12579   Rm = inst.operands[1].reg;
   12580 
   12581   reject_bad_reg (Rd);
   12582   reject_bad_reg (Rm);
   12583 
   12584   inst.instruction |= Rd << 8;
   12585   inst.instruction |= Rm << 16;
   12586   inst.instruction |= Rm;
   12587 }
   12588 
   12589 static void
   12590 do_t_rev (void)
   12591 {
   12592   unsigned Rd, Rm;
   12593 
   12594   Rd = inst.operands[0].reg;
   12595   Rm = inst.operands[1].reg;
   12596 
   12597   reject_bad_reg (Rd);
   12598   reject_bad_reg (Rm);
   12599 
   12600   if (Rd <= 7 && Rm <= 7
   12601       && inst.size_req != 4)
   12602     {
   12603       inst.instruction = THUMB_OP16 (inst.instruction);
   12604       inst.instruction |= Rd;
   12605       inst.instruction |= Rm << 3;
   12606     }
   12607   else if (unified_syntax)
   12608     {
   12609       inst.instruction = THUMB_OP32 (inst.instruction);
   12610       inst.instruction |= Rd << 8;
   12611       inst.instruction |= Rm << 16;
   12612       inst.instruction |= Rm;
   12613     }
   12614   else
   12615     inst.error = BAD_HIREG;
   12616 }
   12617 
   12618 static void
   12619 do_t_rrx (void)
   12620 {
   12621   unsigned Rd, Rm;
   12622 
   12623   Rd = inst.operands[0].reg;
   12624   Rm = inst.operands[1].reg;
   12625 
   12626   reject_bad_reg (Rd);
   12627   reject_bad_reg (Rm);
   12628 
   12629   inst.instruction |= Rd << 8;
   12630   inst.instruction |= Rm;
   12631 }
   12632 
   12633 static void
   12634 do_t_rsb (void)
   12635 {
   12636   unsigned Rd, Rs;
   12637 
   12638   Rd = inst.operands[0].reg;
   12639   Rs = (inst.operands[1].present
   12640 	? inst.operands[1].reg    /* Rd, Rs, foo */
   12641 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
   12642 
   12643   reject_bad_reg (Rd);
   12644   reject_bad_reg (Rs);
   12645   if (inst.operands[2].isreg)
   12646     reject_bad_reg (inst.operands[2].reg);
   12647 
   12648   inst.instruction |= Rd << 8;
   12649   inst.instruction |= Rs << 16;
   12650   if (!inst.operands[2].isreg)
   12651     {
   12652       bfd_boolean narrow;
   12653 
   12654       if ((inst.instruction & 0x00100000) != 0)
   12655 	narrow = !in_it_block ();
   12656       else
   12657 	narrow = in_it_block ();
   12658 
   12659       if (Rd > 7 || Rs > 7)
   12660 	narrow = FALSE;
   12661 
   12662       if (inst.size_req == 4 || !unified_syntax)
   12663 	narrow = FALSE;
   12664 
   12665       if (inst.reloc.exp.X_op != O_constant
   12666 	  || inst.reloc.exp.X_add_number != 0)
   12667 	narrow = FALSE;
   12668 
   12669       /* Turn rsb #0 into 16-bit neg.  We should probably do this via
   12670 	 relaxation, but it doesn't seem worth the hassle.  */
   12671       if (narrow)
   12672 	{
   12673 	  inst.reloc.type = BFD_RELOC_UNUSED;
   12674 	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
   12675 	  inst.instruction |= Rs << 3;
   12676 	  inst.instruction |= Rd;
   12677 	}
   12678       else
   12679 	{
   12680 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
   12681 	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
   12682 	}
   12683     }
   12684   else
   12685     encode_thumb32_shifted_operand (2);
   12686 }
   12687 
   12688 static void
   12689 do_t_setend (void)
   12690 {
   12691   if (warn_on_deprecated
   12692       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
   12693       as_tsktsk (_("setend use is deprecated for ARMv8"));
   12694 
   12695   set_it_insn_type (OUTSIDE_IT_INSN);
   12696   if (inst.operands[0].imm)
   12697     inst.instruction |= 0x8;
   12698 }
   12699 
   12700 static void
   12701 do_t_shift (void)
   12702 {
   12703   if (!inst.operands[1].present)
   12704     inst.operands[1].reg = inst.operands[0].reg;
   12705 
   12706   if (unified_syntax)
   12707     {
   12708       bfd_boolean narrow;
   12709       int shift_kind;
   12710 
   12711       switch (inst.instruction)
   12712 	{
   12713 	case T_MNEM_asr:
   12714 	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
   12715 	case T_MNEM_lsl:
   12716 	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
   12717 	case T_MNEM_lsr:
   12718 	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
   12719 	case T_MNEM_ror:
   12720 	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
   12721 	default: abort ();
   12722 	}
   12723 
   12724       if (THUMB_SETS_FLAGS (inst.instruction))
   12725 	narrow = !in_it_block ();
   12726       else
   12727 	narrow = in_it_block ();
   12728       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
   12729 	narrow = FALSE;
   12730       if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
   12731 	narrow = FALSE;
   12732       if (inst.operands[2].isreg
   12733 	  && (inst.operands[1].reg != inst.operands[0].reg
   12734 	      || inst.operands[2].reg > 7))
   12735 	narrow = FALSE;
   12736       if (inst.size_req == 4)
   12737 	narrow = FALSE;
   12738 
   12739       reject_bad_reg (inst.operands[0].reg);
   12740       reject_bad_reg (inst.operands[1].reg);
   12741 
   12742       if (!narrow)
   12743 	{
   12744 	  if (inst.operands[2].isreg)
   12745 	    {
   12746 	      reject_bad_reg (inst.operands[2].reg);
   12747 	      inst.instruction = THUMB_OP32 (inst.instruction);
   12748 	      inst.instruction |= inst.operands[0].reg << 8;
   12749 	      inst.instruction |= inst.operands[1].reg << 16;
   12750 	      inst.instruction |= inst.operands[2].reg;
   12751 
   12752 	      /* PR 12854: Error on extraneous shifts.  */
   12753 	      constraint (inst.operands[2].shifted,
   12754 			  _("extraneous shift as part of operand to shift insn"));
   12755 	    }
   12756 	  else
   12757 	    {
   12758 	      inst.operands[1].shifted = 1;
   12759 	      inst.operands[1].shift_kind = shift_kind;
   12760 	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
   12761 					     ? T_MNEM_movs : T_MNEM_mov);
   12762 	      inst.instruction |= inst.operands[0].reg << 8;
   12763 	      encode_thumb32_shifted_operand (1);
   12764 	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
   12765 	      inst.reloc.type = BFD_RELOC_UNUSED;
   12766 	    }
   12767 	}
   12768       else
   12769 	{
   12770 	  if (inst.operands[2].isreg)
   12771 	    {
   12772 	      switch (shift_kind)
   12773 		{
   12774 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
   12775 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
   12776 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
   12777 		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
   12778 		default: abort ();
   12779 		}
   12780 
   12781 	      inst.instruction |= inst.operands[0].reg;
   12782 	      inst.instruction |= inst.operands[2].reg << 3;
   12783 
   12784 	      /* PR 12854: Error on extraneous shifts.  */
   12785 	      constraint (inst.operands[2].shifted,
   12786 			  _("extraneous shift as part of operand to shift insn"));
   12787 	    }
   12788 	  else
   12789 	    {
   12790 	      switch (shift_kind)
   12791 		{
   12792 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
   12793 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
   12794 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
   12795 		default: abort ();
   12796 		}
   12797 	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   12798 	      inst.instruction |= inst.operands[0].reg;
   12799 	      inst.instruction |= inst.operands[1].reg << 3;
   12800 	    }
   12801 	}
   12802     }
   12803   else
   12804     {
   12805       constraint (inst.operands[0].reg > 7
   12806 		  || inst.operands[1].reg > 7, BAD_HIREG);
   12807       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
   12808 
   12809       if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
   12810 	{
   12811 	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
   12812 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
   12813 		      _("source1 and dest must be same register"));
   12814 
   12815 	  switch (inst.instruction)
   12816 	    {
   12817 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
   12818 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
   12819 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
   12820 	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
   12821 	    default: abort ();
   12822 	    }
   12823 
   12824 	  inst.instruction |= inst.operands[0].reg;
   12825 	  inst.instruction |= inst.operands[2].reg << 3;
   12826 
   12827 	  /* PR 12854: Error on extraneous shifts.  */
   12828 	  constraint (inst.operands[2].shifted,
   12829 		      _("extraneous shift as part of operand to shift insn"));
   12830 	}
   12831       else
   12832 	{
   12833 	  switch (inst.instruction)
   12834 	    {
   12835 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
   12836 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
   12837 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
   12838 	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
   12839 	    default: abort ();
   12840 	    }
   12841 	  inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
   12842 	  inst.instruction |= inst.operands[0].reg;
   12843 	  inst.instruction |= inst.operands[1].reg << 3;
   12844 	}
   12845     }
   12846 }
   12847 
   12848 static void
   12849 do_t_simd (void)
   12850 {
   12851   unsigned Rd, Rn, Rm;
   12852 
   12853   Rd = inst.operands[0].reg;
   12854   Rn = inst.operands[1].reg;
   12855   Rm = inst.operands[2].reg;
   12856 
   12857   reject_bad_reg (Rd);
   12858   reject_bad_reg (Rn);
   12859   reject_bad_reg (Rm);
   12860 
   12861   inst.instruction |= Rd << 8;
   12862   inst.instruction |= Rn << 16;
   12863   inst.instruction |= Rm;
   12864 }
   12865 
   12866 static void
   12867 do_t_simd2 (void)
   12868 {
   12869   unsigned Rd, Rn, Rm;
   12870 
   12871   Rd = inst.operands[0].reg;
   12872   Rm = inst.operands[1].reg;
   12873   Rn = inst.operands[2].reg;
   12874 
   12875   reject_bad_reg (Rd);
   12876   reject_bad_reg (Rn);
   12877   reject_bad_reg (Rm);
   12878 
   12879   inst.instruction |= Rd << 8;
   12880   inst.instruction |= Rn << 16;
   12881   inst.instruction |= Rm;
   12882 }
   12883 
   12884 static void
   12885 do_t_smc (void)
   12886 {
   12887   unsigned int value = inst.reloc.exp.X_add_number;
   12888   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
   12889 	      _("SMC is not permitted on this architecture"));
   12890   constraint (inst.reloc.exp.X_op != O_constant,
   12891 	      _("expression too complex"));
   12892   inst.reloc.type = BFD_RELOC_UNUSED;
   12893   inst.instruction |= (value & 0xf000) >> 12;
   12894   inst.instruction |= (value & 0x0ff0);
   12895   inst.instruction |= (value & 0x000f) << 16;
   12896   /* PR gas/15623: SMC instructions must be last in an IT block.  */
   12897   set_it_insn_type_last ();
   12898 }
   12899 
   12900 static void
   12901 do_t_hvc (void)
   12902 {
   12903   unsigned int value = inst.reloc.exp.X_add_number;
   12904 
   12905   inst.reloc.type = BFD_RELOC_UNUSED;
   12906   inst.instruction |= (value & 0x0fff);
   12907   inst.instruction |= (value & 0xf000) << 4;
   12908 }
   12909 
   12910 static void
   12911 do_t_ssat_usat (int bias)
   12912 {
   12913   unsigned Rd, Rn;
   12914 
   12915   Rd = inst.operands[0].reg;
   12916   Rn = inst.operands[2].reg;
   12917 
   12918   reject_bad_reg (Rd);
   12919   reject_bad_reg (Rn);
   12920 
   12921   inst.instruction |= Rd << 8;
   12922   inst.instruction |= inst.operands[1].imm - bias;
   12923   inst.instruction |= Rn << 16;
   12924 
   12925   if (inst.operands[3].present)
   12926     {
   12927       offsetT shift_amount = inst.reloc.exp.X_add_number;
   12928 
   12929       inst.reloc.type = BFD_RELOC_UNUSED;
   12930 
   12931       constraint (inst.reloc.exp.X_op != O_constant,
   12932 		  _("expression too complex"));
   12933 
   12934       if (shift_amount != 0)
   12935 	{
   12936 	  constraint (shift_amount > 31,
   12937 		      _("shift expression is too large"));
   12938 
   12939 	  if (inst.operands[3].shift_kind == SHIFT_ASR)
   12940 	    inst.instruction |= 0x00200000;  /* sh bit.  */
   12941 
   12942 	  inst.instruction |= (shift_amount & 0x1c) << 10;
   12943 	  inst.instruction |= (shift_amount & 0x03) << 6;
   12944 	}
   12945     }
   12946 }
   12947 
   12948 static void
   12949 do_t_ssat (void)
   12950 {
   12951   do_t_ssat_usat (1);
   12952 }
   12953 
   12954 static void
   12955 do_t_ssat16 (void)
   12956 {
   12957   unsigned Rd, Rn;
   12958 
   12959   Rd = inst.operands[0].reg;
   12960   Rn = inst.operands[2].reg;
   12961 
   12962   reject_bad_reg (Rd);
   12963   reject_bad_reg (Rn);
   12964 
   12965   inst.instruction |= Rd << 8;
   12966   inst.instruction |= inst.operands[1].imm - 1;
   12967   inst.instruction |= Rn << 16;
   12968 }
   12969 
   12970 static void
   12971 do_t_strex (void)
   12972 {
   12973   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
   12974 	      || inst.operands[2].postind || inst.operands[2].writeback
   12975 	      || inst.operands[2].immisreg || inst.operands[2].shifted
   12976 	      || inst.operands[2].negative,
   12977 	      BAD_ADDR_MODE);
   12978 
   12979   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
   12980 
   12981   inst.instruction |= inst.operands[0].reg << 8;
   12982   inst.instruction |= inst.operands[1].reg << 12;
   12983   inst.instruction |= inst.operands[2].reg << 16;
   12984   inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
   12985 }
   12986 
   12987 static void
   12988 do_t_strexd (void)
   12989 {
   12990   if (!inst.operands[2].present)
   12991     inst.operands[2].reg = inst.operands[1].reg + 1;
   12992 
   12993   constraint (inst.operands[0].reg == inst.operands[1].reg
   12994 	      || inst.operands[0].reg == inst.operands[2].reg
   12995 	      || inst.operands[0].reg == inst.operands[3].reg,
   12996 	      BAD_OVERLAP);
   12997 
   12998   inst.instruction |= inst.operands[0].reg;
   12999   inst.instruction |= inst.operands[1].reg << 12;
   13000   inst.instruction |= inst.operands[2].reg << 8;
   13001   inst.instruction |= inst.operands[3].reg << 16;
   13002 }
   13003 
   13004 static void
   13005 do_t_sxtah (void)
   13006 {
   13007   unsigned Rd, Rn, Rm;
   13008 
   13009   Rd = inst.operands[0].reg;
   13010   Rn = inst.operands[1].reg;
   13011   Rm = inst.operands[2].reg;
   13012 
   13013   reject_bad_reg (Rd);
   13014   reject_bad_reg (Rn);
   13015   reject_bad_reg (Rm);
   13016 
   13017   inst.instruction |= Rd << 8;
   13018   inst.instruction |= Rn << 16;
   13019   inst.instruction |= Rm;
   13020   inst.instruction |= inst.operands[3].imm << 4;
   13021 }
   13022 
   13023 static void
   13024 do_t_sxth (void)
   13025 {
   13026   unsigned Rd, Rm;
   13027 
   13028   Rd = inst.operands[0].reg;
   13029   Rm = inst.operands[1].reg;
   13030 
   13031   reject_bad_reg (Rd);
   13032   reject_bad_reg (Rm);
   13033 
   13034   if (inst.instruction <= 0xffff
   13035       && inst.size_req != 4
   13036       && Rd <= 7 && Rm <= 7
   13037       && (!inst.operands[2].present || inst.operands[2].imm == 0))
   13038     {
   13039       inst.instruction = THUMB_OP16 (inst.instruction);
   13040       inst.instruction |= Rd;
   13041       inst.instruction |= Rm << 3;
   13042     }
   13043   else if (unified_syntax)
   13044     {
   13045       if (inst.instruction <= 0xffff)
   13046 	inst.instruction = THUMB_OP32 (inst.instruction);
   13047       inst.instruction |= Rd << 8;
   13048       inst.instruction |= Rm;
   13049       inst.instruction |= inst.operands[2].imm << 4;
   13050     }
   13051   else
   13052     {
   13053       constraint (inst.operands[2].present && inst.operands[2].imm != 0,
   13054 		  _("Thumb encoding does not support rotation"));
   13055       constraint (1, BAD_HIREG);
   13056     }
   13057 }
   13058 
   13059 static void
   13060 do_t_swi (void)
   13061 {
   13062   /* We have to do the following check manually as ARM_EXT_OS only applies
   13063      to ARM_EXT_V6M.  */
   13064   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
   13065     {
   13066       if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
   13067 	  /* This only applies to the v6m howver, not later architectures.  */
   13068 	  && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
   13069 	as_bad (_("SVC is not permitted on this architecture"));
   13070       ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
   13071     }
   13072 
   13073   inst.reloc.type = BFD_RELOC_ARM_SWI;
   13074 }
   13075 
   13076 static void
   13077 do_t_tb (void)
   13078 {
   13079   unsigned Rn, Rm;
   13080   int half;
   13081 
   13082   half = (inst.instruction & 0x10) != 0;
   13083   set_it_insn_type_last ();
   13084   constraint (inst.operands[0].immisreg,
   13085 	      _("instruction requires register index"));
   13086 
   13087   Rn = inst.operands[0].reg;
   13088   Rm = inst.operands[0].imm;
   13089 
   13090   constraint (Rn == REG_SP, BAD_SP);
   13091   reject_bad_reg (Rm);
   13092 
   13093   constraint (!half && inst.operands[0].shifted,
   13094 	      _("instruction does not allow shifted index"));
   13095   inst.instruction |= (Rn << 16) | Rm;
   13096 }
   13097 
   13098 static void
   13099 do_t_udf (void)
   13100 {
   13101   if (!inst.operands[0].present)
   13102     inst.operands[0].imm = 0;
   13103 
   13104   if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
   13105     {
   13106       constraint (inst.size_req == 2,
   13107                   _("immediate value out of range"));
   13108       inst.instruction = THUMB_OP32 (inst.instruction);
   13109       inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
   13110       inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
   13111     }
   13112   else
   13113     {
   13114       inst.instruction = THUMB_OP16 (inst.instruction);
   13115       inst.instruction |= inst.operands[0].imm;
   13116     }
   13117 
   13118   set_it_insn_type (NEUTRAL_IT_INSN);
   13119 }
   13120 
   13121 
   13122 static void
   13123 do_t_usat (void)
   13124 {
   13125   do_t_ssat_usat (0);
   13126 }
   13127 
   13128 static void
   13129 do_t_usat16 (void)
   13130 {
   13131   unsigned Rd, Rn;
   13132 
   13133   Rd = inst.operands[0].reg;
   13134   Rn = inst.operands[2].reg;
   13135 
   13136   reject_bad_reg (Rd);
   13137   reject_bad_reg (Rn);
   13138 
   13139   inst.instruction |= Rd << 8;
   13140   inst.instruction |= inst.operands[1].imm;
   13141   inst.instruction |= Rn << 16;
   13142 }
   13143 
   13144 /* Neon instruction encoder helpers.  */
   13145 
   13146 /* Encodings for the different types for various Neon opcodes.  */
   13147 
   13148 /* An "invalid" code for the following tables.  */
   13149 #define N_INV -1u
   13150 
   13151 struct neon_tab_entry
   13152 {
   13153   unsigned integer;
   13154   unsigned float_or_poly;
   13155   unsigned scalar_or_imm;
   13156 };
   13157 
   13158 /* Map overloaded Neon opcodes to their respective encodings.  */
   13159 #define NEON_ENC_TAB					\
   13160   X(vabd,	0x0000700, 0x1200d00, N_INV),		\
   13161   X(vmax,	0x0000600, 0x0000f00, N_INV),		\
   13162   X(vmin,	0x0000610, 0x0200f00, N_INV),		\
   13163   X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
   13164   X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
   13165   X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
   13166   X(vadd,	0x0000800, 0x0000d00, N_INV),		\
   13167   X(vsub,	0x1000800, 0x0200d00, N_INV),		\
   13168   X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
   13169   X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
   13170   X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
   13171   /* Register variants of the following two instructions are encoded as
   13172      vcge / vcgt with the operands reversed.  */  	\
   13173   X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
   13174   X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
   13175   X(vfma,	N_INV, 0x0000c10, N_INV),		\
   13176   X(vfms,	N_INV, 0x0200c10, N_INV),		\
   13177   X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
   13178   X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
   13179   X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
   13180   X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
   13181   X(vmlal,	0x0800800, N_INV,     0x0800240),	\
   13182   X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
   13183   X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
   13184   X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
   13185   X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
   13186   X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
   13187   X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
   13188   X(vqrdmlah,   0x3000b10, N_INV,     0x0800e40),	\
   13189   X(vqrdmlsh,   0x3000c10, N_INV,     0x0800f40),	\
   13190   X(vshl,	0x0000400, N_INV,     0x0800510),	\
   13191   X(vqshl,	0x0000410, N_INV,     0x0800710),	\
   13192   X(vand,	0x0000110, N_INV,     0x0800030),	\
   13193   X(vbic,	0x0100110, N_INV,     0x0800030),	\
   13194   X(veor,	0x1000110, N_INV,     N_INV),		\
   13195   X(vorn,	0x0300110, N_INV,     0x0800010),	\
   13196   X(vorr,	0x0200110, N_INV,     0x0800010),	\
   13197   X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
   13198   X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
   13199   X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
   13200   X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
   13201   X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
   13202   X(vst1,	0x0000000, 0x0800000, N_INV),		\
   13203   X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
   13204   X(vst2,	0x0000100, 0x0800100, N_INV),		\
   13205   X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
   13206   X(vst3,	0x0000200, 0x0800200, N_INV),		\
   13207   X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
   13208   X(vst4,	0x0000300, 0x0800300, N_INV),		\
   13209   X(vmovn,	0x1b20200, N_INV,     N_INV),		\
   13210   X(vtrn,	0x1b20080, N_INV,     N_INV),		\
   13211   X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
   13212   X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
   13213   X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
   13214   X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
   13215   X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
   13216   X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
   13217   X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
   13218   X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
   13219   X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
   13220   X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
   13221   X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
   13222   X(vseleq,	0xe000a00, N_INV,     N_INV),		\
   13223   X(vselvs,	0xe100a00, N_INV,     N_INV),		\
   13224   X(vselge,	0xe200a00, N_INV,     N_INV),		\
   13225   X(vselgt,	0xe300a00, N_INV,     N_INV),		\
   13226   X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
   13227   X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
   13228   X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
   13229   X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
   13230   X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
   13231   X(aes,	0x3b00300, N_INV,     N_INV),		\
   13232   X(sha3op,	0x2000c00, N_INV,     N_INV),		\
   13233   X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
   13234   X(sha2op,     0x3ba0380, N_INV,     N_INV)
   13235 
   13236 enum neon_opc
   13237 {
   13238 #define X(OPC,I,F,S) N_MNEM_##OPC
   13239 NEON_ENC_TAB
   13240 #undef X
   13241 };
   13242 
   13243 static const struct neon_tab_entry neon_enc_tab[] =
   13244 {
   13245 #define X(OPC,I,F,S) { (I), (F), (S) }
   13246 NEON_ENC_TAB
   13247 #undef X
   13248 };
   13249 
   13250 /* Do not use these macros; instead, use NEON_ENCODE defined below.  */
   13251 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
   13252 #define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
   13253 #define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   13254 #define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   13255 #define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   13256 #define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   13257 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
   13258 #define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
   13259 #define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
   13260 #define NEON_ENC_SINGLE_(X) \
   13261   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
   13262 #define NEON_ENC_DOUBLE_(X) \
   13263   ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
   13264 #define NEON_ENC_FPV8_(X) \
   13265   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
   13266 
   13267 #define NEON_ENCODE(type, inst)					\
   13268   do								\
   13269     {								\
   13270       inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
   13271       inst.is_neon = 1;						\
   13272     }								\
   13273   while (0)
   13274 
   13275 #define check_neon_suffixes						\
   13276   do									\
   13277     {									\
   13278       if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
   13279 	{								\
   13280 	  as_bad (_("invalid neon suffix for non neon instruction"));	\
   13281 	  return;							\
   13282 	}								\
   13283     }									\
   13284   while (0)
   13285 
   13286 /* Define shapes for instruction operands. The following mnemonic characters
   13287    are used in this table:
   13288 
   13289      F - VFP S<n> register
   13290      D - Neon D<n> register
   13291      Q - Neon Q<n> register
   13292      I - Immediate
   13293      S - Scalar
   13294      R - ARM register
   13295      L - D<n> register list
   13296 
   13297    This table is used to generate various data:
   13298      - enumerations of the form NS_DDR to be used as arguments to
   13299        neon_select_shape.
   13300      - a table classifying shapes into single, double, quad, mixed.
   13301      - a table used to drive neon_select_shape.  */
   13302 
   13303 #define NEON_SHAPE_DEF			\
   13304   X(3, (D, D, D), DOUBLE),		\
   13305   X(3, (Q, Q, Q), QUAD),		\
   13306   X(3, (D, D, I), DOUBLE),		\
   13307   X(3, (Q, Q, I), QUAD),		\
   13308   X(3, (D, D, S), DOUBLE),		\
   13309   X(3, (Q, Q, S), QUAD),		\
   13310   X(2, (D, D), DOUBLE),			\
   13311   X(2, (Q, Q), QUAD),			\
   13312   X(2, (D, S), DOUBLE),			\
   13313   X(2, (Q, S), QUAD),			\
   13314   X(2, (D, R), DOUBLE),			\
   13315   X(2, (Q, R), QUAD),			\
   13316   X(2, (D, I), DOUBLE),			\
   13317   X(2, (Q, I), QUAD),			\
   13318   X(3, (D, L, D), DOUBLE),		\
   13319   X(2, (D, Q), MIXED),			\
   13320   X(2, (Q, D), MIXED),			\
   13321   X(3, (D, Q, I), MIXED),		\
   13322   X(3, (Q, D, I), MIXED),		\
   13323   X(3, (Q, D, D), MIXED),		\
   13324   X(3, (D, Q, Q), MIXED),		\
   13325   X(3, (Q, Q, D), MIXED),		\
   13326   X(3, (Q, D, S), MIXED),		\
   13327   X(3, (D, Q, S), MIXED),		\
   13328   X(4, (D, D, D, I), DOUBLE),		\
   13329   X(4, (Q, Q, Q, I), QUAD),		\
   13330   X(2, (F, F), SINGLE),			\
   13331   X(3, (F, F, F), SINGLE),		\
   13332   X(2, (F, I), SINGLE),			\
   13333   X(2, (F, D), MIXED),			\
   13334   X(2, (D, F), MIXED),			\
   13335   X(3, (F, F, I), MIXED),		\
   13336   X(4, (R, R, F, F), SINGLE),		\
   13337   X(4, (F, F, R, R), SINGLE),		\
   13338   X(3, (D, R, R), DOUBLE),		\
   13339   X(3, (R, R, D), DOUBLE),		\
   13340   X(2, (S, R), SINGLE),			\
   13341   X(2, (R, S), SINGLE),			\
   13342   X(2, (F, R), SINGLE),			\
   13343   X(2, (R, F), SINGLE),			\
   13344 /* Half float shape supported so far.  */\
   13345   X (2, (H, D), MIXED),			\
   13346   X (2, (D, H), MIXED),			\
   13347   X (2, (H, F), MIXED),			\
   13348   X (2, (F, H), MIXED),			\
   13349   X (2, (H, H), HALF),			\
   13350   X (2, (H, R), HALF),			\
   13351   X (2, (R, H), HALF),			\
   13352   X (2, (H, I), HALF),			\
   13353   X (3, (H, H, H), HALF),		\
   13354   X (3, (H, F, I), MIXED),		\
   13355   X (3, (F, H, I), MIXED)
   13356 
   13357 #define S2(A,B)		NS_##A##B
   13358 #define S3(A,B,C)	NS_##A##B##C
   13359 #define S4(A,B,C,D)	NS_##A##B##C##D
   13360 
   13361 #define X(N, L, C) S##N L
   13362 
   13363 enum neon_shape
   13364 {
   13365   NEON_SHAPE_DEF,
   13366   NS_NULL
   13367 };
   13368 
   13369 #undef X
   13370 #undef S2
   13371 #undef S3
   13372 #undef S4
   13373 
   13374 enum neon_shape_class
   13375 {
   13376   SC_HALF,
   13377   SC_SINGLE,
   13378   SC_DOUBLE,
   13379   SC_QUAD,
   13380   SC_MIXED
   13381 };
   13382 
   13383 #define X(N, L, C) SC_##C
   13384 
   13385 static enum neon_shape_class neon_shape_class[] =
   13386 {
   13387   NEON_SHAPE_DEF
   13388 };
   13389 
   13390 #undef X
   13391 
   13392 enum neon_shape_el
   13393 {
   13394   SE_H,
   13395   SE_F,
   13396   SE_D,
   13397   SE_Q,
   13398   SE_I,
   13399   SE_S,
   13400   SE_R,
   13401   SE_L
   13402 };
   13403 
   13404 /* Register widths of above.  */
   13405 static unsigned neon_shape_el_size[] =
   13406 {
   13407   16,
   13408   32,
   13409   64,
   13410   128,
   13411   0,
   13412   32,
   13413   32,
   13414   0
   13415 };
   13416 
   13417 struct neon_shape_info
   13418 {
   13419   unsigned els;
   13420   enum neon_shape_el el[NEON_MAX_TYPE_ELS];
   13421 };
   13422 
   13423 #define S2(A,B)		{ SE_##A, SE_##B }
   13424 #define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
   13425 #define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
   13426 
   13427 #define X(N, L, C) { N, S##N L }
   13428 
   13429 static struct neon_shape_info neon_shape_tab[] =
   13430 {
   13431   NEON_SHAPE_DEF
   13432 };
   13433 
   13434 #undef X
   13435 #undef S2
   13436 #undef S3
   13437 #undef S4
   13438 
   13439 /* Bit masks used in type checking given instructions.
   13440   'N_EQK' means the type must be the same as (or based on in some way) the key
   13441    type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
   13442    set, various other bits can be set as well in order to modify the meaning of
   13443    the type constraint.  */
   13444 
   13445 enum neon_type_mask
   13446 {
   13447   N_S8   = 0x0000001,
   13448   N_S16  = 0x0000002,
   13449   N_S32  = 0x0000004,
   13450   N_S64  = 0x0000008,
   13451   N_U8   = 0x0000010,
   13452   N_U16  = 0x0000020,
   13453   N_U32  = 0x0000040,
   13454   N_U64  = 0x0000080,
   13455   N_I8   = 0x0000100,
   13456   N_I16  = 0x0000200,
   13457   N_I32  = 0x0000400,
   13458   N_I64  = 0x0000800,
   13459   N_8    = 0x0001000,
   13460   N_16   = 0x0002000,
   13461   N_32   = 0x0004000,
   13462   N_64   = 0x0008000,
   13463   N_P8   = 0x0010000,
   13464   N_P16  = 0x0020000,
   13465   N_F16  = 0x0040000,
   13466   N_F32  = 0x0080000,
   13467   N_F64  = 0x0100000,
   13468   N_P64	 = 0x0200000,
   13469   N_KEY  = 0x1000000, /* Key element (main type specifier).  */
   13470   N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
   13471   N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
   13472   N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
   13473   N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
   13474   N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
   13475   N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
   13476   N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
   13477   N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
   13478   N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
   13479   N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
   13480   N_UTYP = 0,
   13481   N_MAX_NONSPECIAL = N_P64
   13482 };
   13483 
   13484 #define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
   13485 
   13486 #define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
   13487 #define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
   13488 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
   13489 #define N_S_32     (N_S8 | N_S16 | N_S32)
   13490 #define N_F_16_32  (N_F16 | N_F32)
   13491 #define N_SUF_32   (N_SU_32 | N_F_16_32)
   13492 #define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
   13493 #define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
   13494 #define N_F_ALL    (N_F16 | N_F32 | N_F64)
   13495 
   13496 /* Pass this as the first type argument to neon_check_type to ignore types
   13497    altogether.  */
   13498 #define N_IGNORE_TYPE (N_KEY | N_EQK)
   13499 
   13500 /* Select a "shape" for the current instruction (describing register types or
   13501    sizes) from a list of alternatives. Return NS_NULL if the current instruction
   13502    doesn't fit. For non-polymorphic shapes, checking is usually done as a
   13503    function of operand parsing, so this function doesn't need to be called.
   13504    Shapes should be listed in order of decreasing length.  */
   13505 
   13506 static enum neon_shape
   13507 neon_select_shape (enum neon_shape shape, ...)
   13508 {
   13509   va_list ap;
   13510   enum neon_shape first_shape = shape;
   13511 
   13512   /* Fix missing optional operands. FIXME: we don't know at this point how
   13513      many arguments we should have, so this makes the assumption that we have
   13514      > 1. This is true of all current Neon opcodes, I think, but may not be
   13515      true in the future.  */
   13516   if (!inst.operands[1].present)
   13517     inst.operands[1] = inst.operands[0];
   13518 
   13519   va_start (ap, shape);
   13520 
   13521   for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
   13522     {
   13523       unsigned j;
   13524       int matches = 1;
   13525 
   13526       for (j = 0; j < neon_shape_tab[shape].els; j++)
   13527 	{
   13528 	  if (!inst.operands[j].present)
   13529 	    {
   13530 	      matches = 0;
   13531 	      break;
   13532 	    }
   13533 
   13534 	  switch (neon_shape_tab[shape].el[j])
   13535 	    {
   13536 	      /* If a  .f16,  .16,  .u16,  .s16 type specifier is given over
   13537 		 a VFP single precision register operand, it's essentially
   13538 		 means only half of the register is used.
   13539 
   13540 		 If the type specifier is given after the mnemonics, the
   13541 		 information is stored in inst.vectype.  If the type specifier
   13542 		 is given after register operand, the information is stored
   13543 		 in inst.operands[].vectype.
   13544 
   13545 		 When there is only one type specifier, and all the register
   13546 		 operands are the same type of hardware register, the type
   13547 		 specifier applies to all register operands.
   13548 
   13549 		 If no type specifier is given, the shape is inferred from
   13550 		 operand information.
   13551 
   13552 		 for example:
   13553 		 vadd.f16 s0, s1, s2:		NS_HHH
   13554 		 vabs.f16 s0, s1:		NS_HH
   13555 		 vmov.f16 s0, r1:		NS_HR
   13556 		 vmov.f16 r0, s1:		NS_RH
   13557 		 vcvt.f16 r0, s1:		NS_RH
   13558 		 vcvt.f16.s32	s2, s2, #29:	NS_HFI
   13559 		 vcvt.f16.s32	s2, s2:		NS_HF
   13560 	      */
   13561 	    case SE_H:
   13562 	      if (!(inst.operands[j].isreg
   13563 		    && inst.operands[j].isvec
   13564 		    && inst.operands[j].issingle
   13565 		    && !inst.operands[j].isquad
   13566 		    && ((inst.vectype.elems == 1
   13567 			 && inst.vectype.el[0].size == 16)
   13568 			|| (inst.vectype.elems > 1
   13569 			    && inst.vectype.el[j].size == 16)
   13570 			|| (inst.vectype.elems == 0
   13571 			    && inst.operands[j].vectype.type != NT_invtype
   13572 			    && inst.operands[j].vectype.size == 16))))
   13573 		matches = 0;
   13574 	      break;
   13575 
   13576 	    case SE_F:
   13577 	      if (!(inst.operands[j].isreg
   13578 		    && inst.operands[j].isvec
   13579 		    && inst.operands[j].issingle
   13580 		    && !inst.operands[j].isquad
   13581 		    && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
   13582 			|| (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
   13583 			|| (inst.vectype.elems == 0
   13584 			    && (inst.operands[j].vectype.size == 32
   13585 				|| inst.operands[j].vectype.type == NT_invtype)))))
   13586 		matches = 0;
   13587 	      break;
   13588 
   13589 	    case SE_D:
   13590 	      if (!(inst.operands[j].isreg
   13591 		    && inst.operands[j].isvec
   13592 		    && !inst.operands[j].isquad
   13593 		    && !inst.operands[j].issingle))
   13594 		matches = 0;
   13595 	      break;
   13596 
   13597 	    case SE_R:
   13598 	      if (!(inst.operands[j].isreg
   13599 		    && !inst.operands[j].isvec))
   13600 		matches = 0;
   13601 	      break;
   13602 
   13603 	    case SE_Q:
   13604 	      if (!(inst.operands[j].isreg
   13605 		    && inst.operands[j].isvec
   13606 		    && inst.operands[j].isquad
   13607 		    && !inst.operands[j].issingle))
   13608 		matches = 0;
   13609 	      break;
   13610 
   13611 	    case SE_I:
   13612 	      if (!(!inst.operands[j].isreg
   13613 		    && !inst.operands[j].isscalar))
   13614 		matches = 0;
   13615 	      break;
   13616 
   13617 	    case SE_S:
   13618 	      if (!(!inst.operands[j].isreg
   13619 		    && inst.operands[j].isscalar))
   13620 		matches = 0;
   13621 	      break;
   13622 
   13623 	    case SE_L:
   13624 	      break;
   13625 	    }
   13626 	  if (!matches)
   13627 	    break;
   13628 	}
   13629       if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
   13630 	/* We've matched all the entries in the shape table, and we don't
   13631 	   have any left over operands which have not been matched.  */
   13632 	break;
   13633     }
   13634 
   13635   va_end (ap);
   13636 
   13637   if (shape == NS_NULL && first_shape != NS_NULL)
   13638     first_error (_("invalid instruction shape"));
   13639 
   13640   return shape;
   13641 }
   13642 
   13643 /* True if SHAPE is predominantly a quadword operation (most of the time, this
   13644    means the Q bit should be set).  */
   13645 
   13646 static int
   13647 neon_quad (enum neon_shape shape)
   13648 {
   13649   return neon_shape_class[shape] == SC_QUAD;
   13650 }
   13651 
   13652 static void
   13653 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
   13654 		       unsigned *g_size)
   13655 {
   13656   /* Allow modification to be made to types which are constrained to be
   13657      based on the key element, based on bits set alongside N_EQK.  */
   13658   if ((typebits & N_EQK) != 0)
   13659     {
   13660       if ((typebits & N_HLF) != 0)
   13661 	*g_size /= 2;
   13662       else if ((typebits & N_DBL) != 0)
   13663 	*g_size *= 2;
   13664       if ((typebits & N_SGN) != 0)
   13665 	*g_type = NT_signed;
   13666       else if ((typebits & N_UNS) != 0)
   13667 	*g_type = NT_unsigned;
   13668       else if ((typebits & N_INT) != 0)
   13669 	*g_type = NT_integer;
   13670       else if ((typebits & N_FLT) != 0)
   13671 	*g_type = NT_float;
   13672       else if ((typebits & N_SIZ) != 0)
   13673 	*g_type = NT_untyped;
   13674     }
   13675 }
   13676 
   13677 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
   13678    operand type, i.e. the single type specified in a Neon instruction when it
   13679    is the only one given.  */
   13680 
   13681 static struct neon_type_el
   13682 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
   13683 {
   13684   struct neon_type_el dest = *key;
   13685 
   13686   gas_assert ((thisarg & N_EQK) != 0);
   13687 
   13688   neon_modify_type_size (thisarg, &dest.type, &dest.size);
   13689 
   13690   return dest;
   13691 }
   13692 
   13693 /* Convert Neon type and size into compact bitmask representation.  */
   13694 
   13695 static enum neon_type_mask
   13696 type_chk_of_el_type (enum neon_el_type type, unsigned size)
   13697 {
   13698   switch (type)
   13699     {
   13700     case NT_untyped:
   13701       switch (size)
   13702 	{
   13703 	case 8:  return N_8;
   13704 	case 16: return N_16;
   13705 	case 32: return N_32;
   13706 	case 64: return N_64;
   13707 	default: ;
   13708 	}
   13709       break;
   13710 
   13711     case NT_integer:
   13712       switch (size)
   13713 	{
   13714 	case 8:  return N_I8;
   13715 	case 16: return N_I16;
   13716 	case 32: return N_I32;
   13717 	case 64: return N_I64;
   13718 	default: ;
   13719 	}
   13720       break;
   13721 
   13722     case NT_float:
   13723       switch (size)
   13724 	{
   13725 	case 16: return N_F16;
   13726 	case 32: return N_F32;
   13727 	case 64: return N_F64;
   13728 	default: ;
   13729 	}
   13730       break;
   13731 
   13732     case NT_poly:
   13733       switch (size)
   13734 	{
   13735 	case 8:  return N_P8;
   13736 	case 16: return N_P16;
   13737 	case 64: return N_P64;
   13738 	default: ;
   13739 	}
   13740       break;
   13741 
   13742     case NT_signed:
   13743       switch (size)
   13744 	{
   13745 	case 8:  return N_S8;
   13746 	case 16: return N_S16;
   13747 	case 32: return N_S32;
   13748 	case 64: return N_S64;
   13749 	default: ;
   13750 	}
   13751       break;
   13752 
   13753     case NT_unsigned:
   13754       switch (size)
   13755 	{
   13756 	case 8:  return N_U8;
   13757 	case 16: return N_U16;
   13758 	case 32: return N_U32;
   13759 	case 64: return N_U64;
   13760 	default: ;
   13761 	}
   13762       break;
   13763 
   13764     default: ;
   13765     }
   13766 
   13767   return N_UTYP;
   13768 }
   13769 
   13770 /* Convert compact Neon bitmask type representation to a type and size. Only
   13771    handles the case where a single bit is set in the mask.  */
   13772 
   13773 static int
   13774 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
   13775 		     enum neon_type_mask mask)
   13776 {
   13777   if ((mask & N_EQK) != 0)
   13778     return FAIL;
   13779 
   13780   if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
   13781     *size = 8;
   13782   else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
   13783     *size = 16;
   13784   else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
   13785     *size = 32;
   13786   else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
   13787     *size = 64;
   13788   else
   13789     return FAIL;
   13790 
   13791   if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
   13792     *type = NT_signed;
   13793   else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
   13794     *type = NT_unsigned;
   13795   else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
   13796     *type = NT_integer;
   13797   else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
   13798     *type = NT_untyped;
   13799   else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
   13800     *type = NT_poly;
   13801   else if ((mask & (N_F_ALL)) != 0)
   13802     *type = NT_float;
   13803   else
   13804     return FAIL;
   13805 
   13806   return SUCCESS;
   13807 }
   13808 
   13809 /* Modify a bitmask of allowed types. This is only needed for type
   13810    relaxation.  */
   13811 
   13812 static unsigned
   13813 modify_types_allowed (unsigned allowed, unsigned mods)
   13814 {
   13815   unsigned size;
   13816   enum neon_el_type type;
   13817   unsigned destmask;
   13818   int i;
   13819 
   13820   destmask = 0;
   13821 
   13822   for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
   13823     {
   13824       if (el_type_of_type_chk (&type, &size,
   13825 			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
   13826 	{
   13827 	  neon_modify_type_size (mods, &type, &size);
   13828 	  destmask |= type_chk_of_el_type (type, size);
   13829 	}
   13830     }
   13831 
   13832   return destmask;
   13833 }
   13834 
   13835 /* Check type and return type classification.
   13836    The manual states (paraphrase): If one datatype is given, it indicates the
   13837    type given in:
   13838     - the second operand, if there is one
   13839     - the operand, if there is no second operand
   13840     - the result, if there are no operands.
   13841    This isn't quite good enough though, so we use a concept of a "key" datatype
   13842    which is set on a per-instruction basis, which is the one which matters when
   13843    only one data type is written.
   13844    Note: this function has side-effects (e.g. filling in missing operands). All
   13845    Neon instructions should call it before performing bit encoding.  */
   13846 
   13847 static struct neon_type_el
   13848 neon_check_type (unsigned els, enum neon_shape ns, ...)
   13849 {
   13850   va_list ap;
   13851   unsigned i, pass, key_el = 0;
   13852   unsigned types[NEON_MAX_TYPE_ELS];
   13853   enum neon_el_type k_type = NT_invtype;
   13854   unsigned k_size = -1u;
   13855   struct neon_type_el badtype = {NT_invtype, -1};
   13856   unsigned key_allowed = 0;
   13857 
   13858   /* Optional registers in Neon instructions are always (not) in operand 1.
   13859      Fill in the missing operand here, if it was omitted.  */
   13860   if (els > 1 && !inst.operands[1].present)
   13861     inst.operands[1] = inst.operands[0];
   13862 
   13863   /* Suck up all the varargs.  */
   13864   va_start (ap, ns);
   13865   for (i = 0; i < els; i++)
   13866     {
   13867       unsigned thisarg = va_arg (ap, unsigned);
   13868       if (thisarg == N_IGNORE_TYPE)
   13869 	{
   13870 	  va_end (ap);
   13871 	  return badtype;
   13872 	}
   13873       types[i] = thisarg;
   13874       if ((thisarg & N_KEY) != 0)
   13875 	key_el = i;
   13876     }
   13877   va_end (ap);
   13878 
   13879   if (inst.vectype.elems > 0)
   13880     for (i = 0; i < els; i++)
   13881       if (inst.operands[i].vectype.type != NT_invtype)
   13882 	{
   13883 	  first_error (_("types specified in both the mnemonic and operands"));
   13884 	  return badtype;
   13885 	}
   13886 
   13887   /* Duplicate inst.vectype elements here as necessary.
   13888      FIXME: No idea if this is exactly the same as the ARM assembler,
   13889      particularly when an insn takes one register and one non-register
   13890      operand. */
   13891   if (inst.vectype.elems == 1 && els > 1)
   13892     {
   13893       unsigned j;
   13894       inst.vectype.elems = els;
   13895       inst.vectype.el[key_el] = inst.vectype.el[0];
   13896       for (j = 0; j < els; j++)
   13897 	if (j != key_el)
   13898 	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
   13899 						  types[j]);
   13900     }
   13901   else if (inst.vectype.elems == 0 && els > 0)
   13902     {
   13903       unsigned j;
   13904       /* No types were given after the mnemonic, so look for types specified
   13905 	 after each operand. We allow some flexibility here; as long as the
   13906 	 "key" operand has a type, we can infer the others.  */
   13907       for (j = 0; j < els; j++)
   13908 	if (inst.operands[j].vectype.type != NT_invtype)
   13909 	  inst.vectype.el[j] = inst.operands[j].vectype;
   13910 
   13911       if (inst.operands[key_el].vectype.type != NT_invtype)
   13912 	{
   13913 	  for (j = 0; j < els; j++)
   13914 	    if (inst.operands[j].vectype.type == NT_invtype)
   13915 	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
   13916 						      types[j]);
   13917 	}
   13918       else
   13919 	{
   13920 	  first_error (_("operand types can't be inferred"));
   13921 	  return badtype;
   13922 	}
   13923     }
   13924   else if (inst.vectype.elems != els)
   13925     {
   13926       first_error (_("type specifier has the wrong number of parts"));
   13927       return badtype;
   13928     }
   13929 
   13930   for (pass = 0; pass < 2; pass++)
   13931     {
   13932       for (i = 0; i < els; i++)
   13933 	{
   13934 	  unsigned thisarg = types[i];
   13935 	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
   13936 	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
   13937 	  enum neon_el_type g_type = inst.vectype.el[i].type;
   13938 	  unsigned g_size = inst.vectype.el[i].size;
   13939 
   13940 	  /* Decay more-specific signed & unsigned types to sign-insensitive
   13941 	     integer types if sign-specific variants are unavailable.  */
   13942 	  if ((g_type == NT_signed || g_type == NT_unsigned)
   13943 	      && (types_allowed & N_SU_ALL) == 0)
   13944 	    g_type = NT_integer;
   13945 
   13946 	  /* If only untyped args are allowed, decay any more specific types to
   13947 	     them. Some instructions only care about signs for some element
   13948 	     sizes, so handle that properly.  */
   13949 	  if (((types_allowed & N_UNT) == 0)
   13950 	      && ((g_size == 8 && (types_allowed & N_8) != 0)
   13951 		  || (g_size == 16 && (types_allowed & N_16) != 0)
   13952 		  || (g_size == 32 && (types_allowed & N_32) != 0)
   13953 		  || (g_size == 64 && (types_allowed & N_64) != 0)))
   13954 	    g_type = NT_untyped;
   13955 
   13956 	  if (pass == 0)
   13957 	    {
   13958 	      if ((thisarg & N_KEY) != 0)
   13959 		{
   13960 		  k_type = g_type;
   13961 		  k_size = g_size;
   13962 		  key_allowed = thisarg & ~N_KEY;
   13963 
   13964 		  /* Check architecture constraint on FP16 extension.  */
   13965 		  if (k_size == 16
   13966 		      && k_type == NT_float
   13967 		      && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
   13968 		    {
   13969 		      inst.error = _(BAD_FP16);
   13970 		      return badtype;
   13971 		    }
   13972 		}
   13973 	    }
   13974 	  else
   13975 	    {
   13976 	      if ((thisarg & N_VFP) != 0)
   13977 		{
   13978 		  enum neon_shape_el regshape;
   13979 		  unsigned regwidth, match;
   13980 
   13981 		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
   13982 		  if (ns == NS_NULL)
   13983 		    {
   13984 		      first_error (_("invalid instruction shape"));
   13985 		      return badtype;
   13986 		    }
   13987 		  regshape = neon_shape_tab[ns].el[i];
   13988 		  regwidth = neon_shape_el_size[regshape];
   13989 
   13990 		  /* In VFP mode, operands must match register widths. If we
   13991 		     have a key operand, use its width, else use the width of
   13992 		     the current operand.  */
   13993 		  if (k_size != -1u)
   13994 		    match = k_size;
   13995 		  else
   13996 		    match = g_size;
   13997 
   13998 		  /* FP16 will use a single precision register.  */
   13999 		  if (regwidth == 32 && match == 16)
   14000 		    {
   14001 		      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
   14002 			match = regwidth;
   14003 		      else
   14004 			{
   14005 			  inst.error = _(BAD_FP16);
   14006 			  return badtype;
   14007 			}
   14008 		    }
   14009 
   14010 		  if (regwidth != match)
   14011 		    {
   14012 		      first_error (_("operand size must match register width"));
   14013 		      return badtype;
   14014 		    }
   14015 		}
   14016 
   14017 	      if ((thisarg & N_EQK) == 0)
   14018 		{
   14019 		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
   14020 
   14021 		  if ((given_type & types_allowed) == 0)
   14022 		    {
   14023 		      first_error (_("bad type in Neon instruction"));
   14024 		      return badtype;
   14025 		    }
   14026 		}
   14027 	      else
   14028 		{
   14029 		  enum neon_el_type mod_k_type = k_type;
   14030 		  unsigned mod_k_size = k_size;
   14031 		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
   14032 		  if (g_type != mod_k_type || g_size != mod_k_size)
   14033 		    {
   14034 		      first_error (_("inconsistent types in Neon instruction"));
   14035 		      return badtype;
   14036 		    }
   14037 		}
   14038 	    }
   14039 	}
   14040     }
   14041 
   14042   return inst.vectype.el[key_el];
   14043 }
   14044 
   14045 /* Neon-style VFP instruction forwarding.  */
   14046 
   14047 /* Thumb VFP instructions have 0xE in the condition field.  */
   14048 
   14049 static void
   14050 do_vfp_cond_or_thumb (void)
   14051 {
   14052   inst.is_neon = 1;
   14053 
   14054   if (thumb_mode)
   14055     inst.instruction |= 0xe0000000;
   14056   else
   14057     inst.instruction |= inst.cond << 28;
   14058 }
   14059 
   14060 /* Look up and encode a simple mnemonic, for use as a helper function for the
   14061    Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
   14062    etc.  It is assumed that operand parsing has already been done, and that the
   14063    operands are in the form expected by the given opcode (this isn't necessarily
   14064    the same as the form in which they were parsed, hence some massaging must
   14065    take place before this function is called).
   14066    Checks current arch version against that in the looked-up opcode.  */
   14067 
   14068 static void
   14069 do_vfp_nsyn_opcode (const char *opname)
   14070 {
   14071   const struct asm_opcode *opcode;
   14072 
   14073   opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
   14074 
   14075   if (!opcode)
   14076     abort ();
   14077 
   14078   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
   14079 		thumb_mode ? *opcode->tvariant : *opcode->avariant),
   14080 	      _(BAD_FPU));
   14081 
   14082   inst.is_neon = 1;
   14083 
   14084   if (thumb_mode)
   14085     {
   14086       inst.instruction = opcode->tvalue;
   14087       opcode->tencode ();
   14088     }
   14089   else
   14090     {
   14091       inst.instruction = (inst.cond << 28) | opcode->avalue;
   14092       opcode->aencode ();
   14093     }
   14094 }
   14095 
   14096 static void
   14097 do_vfp_nsyn_add_sub (enum neon_shape rs)
   14098 {
   14099   int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
   14100 
   14101   if (rs == NS_FFF || rs == NS_HHH)
   14102     {
   14103       if (is_add)
   14104 	do_vfp_nsyn_opcode ("fadds");
   14105       else
   14106 	do_vfp_nsyn_opcode ("fsubs");
   14107 
   14108       /* ARMv8.2 fp16 instruction.  */
   14109       if (rs == NS_HHH)
   14110 	do_scalar_fp16_v82_encode ();
   14111     }
   14112   else
   14113     {
   14114       if (is_add)
   14115 	do_vfp_nsyn_opcode ("faddd");
   14116       else
   14117 	do_vfp_nsyn_opcode ("fsubd");
   14118     }
   14119 }
   14120 
   14121 /* Check operand types to see if this is a VFP instruction, and if so call
   14122    PFN ().  */
   14123 
   14124 static int
   14125 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
   14126 {
   14127   enum neon_shape rs;
   14128   struct neon_type_el et;
   14129 
   14130   switch (args)
   14131     {
   14132     case 2:
   14133       rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
   14134       et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
   14135       break;
   14136 
   14137     case 3:
   14138       rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
   14139       et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
   14140 			    N_F_ALL | N_KEY | N_VFP);
   14141       break;
   14142 
   14143     default:
   14144       abort ();
   14145     }
   14146 
   14147   if (et.type != NT_invtype)
   14148     {
   14149       pfn (rs);
   14150       return SUCCESS;
   14151     }
   14152 
   14153   inst.error = NULL;
   14154   return FAIL;
   14155 }
   14156 
   14157 static void
   14158 do_vfp_nsyn_mla_mls (enum neon_shape rs)
   14159 {
   14160   int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
   14161 
   14162   if (rs == NS_FFF || rs == NS_HHH)
   14163     {
   14164       if (is_mla)
   14165 	do_vfp_nsyn_opcode ("fmacs");
   14166       else
   14167 	do_vfp_nsyn_opcode ("fnmacs");
   14168 
   14169       /* ARMv8.2 fp16 instruction.  */
   14170       if (rs == NS_HHH)
   14171 	do_scalar_fp16_v82_encode ();
   14172     }
   14173   else
   14174     {
   14175       if (is_mla)
   14176 	do_vfp_nsyn_opcode ("fmacd");
   14177       else
   14178 	do_vfp_nsyn_opcode ("fnmacd");
   14179     }
   14180 }
   14181 
   14182 static void
   14183 do_vfp_nsyn_fma_fms (enum neon_shape rs)
   14184 {
   14185   int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
   14186 
   14187   if (rs == NS_FFF || rs == NS_HHH)
   14188     {
   14189       if (is_fma)
   14190 	do_vfp_nsyn_opcode ("ffmas");
   14191       else
   14192 	do_vfp_nsyn_opcode ("ffnmas");
   14193 
   14194       /* ARMv8.2 fp16 instruction.  */
   14195       if (rs == NS_HHH)
   14196 	do_scalar_fp16_v82_encode ();
   14197     }
   14198   else
   14199     {
   14200       if (is_fma)
   14201 	do_vfp_nsyn_opcode ("ffmad");
   14202       else
   14203 	do_vfp_nsyn_opcode ("ffnmad");
   14204     }
   14205 }
   14206 
   14207 static void
   14208 do_vfp_nsyn_mul (enum neon_shape rs)
   14209 {
   14210   if (rs == NS_FFF || rs == NS_HHH)
   14211     {
   14212       do_vfp_nsyn_opcode ("fmuls");
   14213 
   14214       /* ARMv8.2 fp16 instruction.  */
   14215       if (rs == NS_HHH)
   14216 	do_scalar_fp16_v82_encode ();
   14217     }
   14218   else
   14219     do_vfp_nsyn_opcode ("fmuld");
   14220 }
   14221 
   14222 static void
   14223 do_vfp_nsyn_abs_neg (enum neon_shape rs)
   14224 {
   14225   int is_neg = (inst.instruction & 0x80) != 0;
   14226   neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
   14227 
   14228   if (rs == NS_FF || rs == NS_HH)
   14229     {
   14230       if (is_neg)
   14231 	do_vfp_nsyn_opcode ("fnegs");
   14232       else
   14233 	do_vfp_nsyn_opcode ("fabss");
   14234 
   14235       /* ARMv8.2 fp16 instruction.  */
   14236       if (rs == NS_HH)
   14237 	do_scalar_fp16_v82_encode ();
   14238     }
   14239   else
   14240     {
   14241       if (is_neg)
   14242 	do_vfp_nsyn_opcode ("fnegd");
   14243       else
   14244 	do_vfp_nsyn_opcode ("fabsd");
   14245     }
   14246 }
   14247 
   14248 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
   14249    insns belong to Neon, and are handled elsewhere.  */
   14250 
   14251 static void
   14252 do_vfp_nsyn_ldm_stm (int is_dbmode)
   14253 {
   14254   int is_ldm = (inst.instruction & (1 << 20)) != 0;
   14255   if (is_ldm)
   14256     {
   14257       if (is_dbmode)
   14258 	do_vfp_nsyn_opcode ("fldmdbs");
   14259       else
   14260 	do_vfp_nsyn_opcode ("fldmias");
   14261     }
   14262   else
   14263     {
   14264       if (is_dbmode)
   14265 	do_vfp_nsyn_opcode ("fstmdbs");
   14266       else
   14267 	do_vfp_nsyn_opcode ("fstmias");
   14268     }
   14269 }
   14270 
   14271 static void
   14272 do_vfp_nsyn_sqrt (void)
   14273 {
   14274   enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
   14275   neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
   14276 
   14277   if (rs == NS_FF || rs == NS_HH)
   14278     {
   14279       do_vfp_nsyn_opcode ("fsqrts");
   14280 
   14281       /* ARMv8.2 fp16 instruction.  */
   14282       if (rs == NS_HH)
   14283 	do_scalar_fp16_v82_encode ();
   14284     }
   14285   else
   14286     do_vfp_nsyn_opcode ("fsqrtd");
   14287 }
   14288 
   14289 static void
   14290 do_vfp_nsyn_div (void)
   14291 {
   14292   enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
   14293   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
   14294 		   N_F_ALL | N_KEY | N_VFP);
   14295 
   14296   if (rs == NS_FFF || rs == NS_HHH)
   14297     {
   14298       do_vfp_nsyn_opcode ("fdivs");
   14299 
   14300       /* ARMv8.2 fp16 instruction.  */
   14301       if (rs == NS_HHH)
   14302 	do_scalar_fp16_v82_encode ();
   14303     }
   14304   else
   14305     do_vfp_nsyn_opcode ("fdivd");
   14306 }
   14307 
   14308 static void
   14309 do_vfp_nsyn_nmul (void)
   14310 {
   14311   enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
   14312   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
   14313 		   N_F_ALL | N_KEY | N_VFP);
   14314 
   14315   if (rs == NS_FFF || rs == NS_HHH)
   14316     {
   14317       NEON_ENCODE (SINGLE, inst);
   14318       do_vfp_sp_dyadic ();
   14319 
   14320       /* ARMv8.2 fp16 instruction.  */
   14321       if (rs == NS_HHH)
   14322 	do_scalar_fp16_v82_encode ();
   14323     }
   14324   else
   14325     {
   14326       NEON_ENCODE (DOUBLE, inst);
   14327       do_vfp_dp_rd_rn_rm ();
   14328     }
   14329   do_vfp_cond_or_thumb ();
   14330 
   14331 }
   14332 
   14333 static void
   14334 do_vfp_nsyn_cmp (void)
   14335 {
   14336   enum neon_shape rs;
   14337   if (inst.operands[1].isreg)
   14338     {
   14339       rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
   14340       neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
   14341 
   14342       if (rs == NS_FF || rs == NS_HH)
   14343 	{
   14344 	  NEON_ENCODE (SINGLE, inst);
   14345 	  do_vfp_sp_monadic ();
   14346 	}
   14347       else
   14348 	{
   14349 	  NEON_ENCODE (DOUBLE, inst);
   14350 	  do_vfp_dp_rd_rm ();
   14351 	}
   14352     }
   14353   else
   14354     {
   14355       rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
   14356       neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
   14357 
   14358       switch (inst.instruction & 0x0fffffff)
   14359 	{
   14360 	case N_MNEM_vcmp:
   14361 	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
   14362 	  break;
   14363 	case N_MNEM_vcmpe:
   14364 	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
   14365 	  break;
   14366 	default:
   14367 	  abort ();
   14368 	}
   14369 
   14370       if (rs == NS_FI || rs == NS_HI)
   14371 	{
   14372 	  NEON_ENCODE (SINGLE, inst);
   14373 	  do_vfp_sp_compare_z ();
   14374 	}
   14375       else
   14376 	{
   14377 	  NEON_ENCODE (DOUBLE, inst);
   14378 	  do_vfp_dp_rd ();
   14379 	}
   14380     }
   14381   do_vfp_cond_or_thumb ();
   14382 
   14383   /* ARMv8.2 fp16 instruction.  */
   14384   if (rs == NS_HI || rs == NS_HH)
   14385     do_scalar_fp16_v82_encode ();
   14386 }
   14387 
   14388 static void
   14389 nsyn_insert_sp (void)
   14390 {
   14391   inst.operands[1] = inst.operands[0];
   14392   memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
   14393   inst.operands[0].reg = REG_SP;
   14394   inst.operands[0].isreg = 1;
   14395   inst.operands[0].writeback = 1;
   14396   inst.operands[0].present = 1;
   14397 }
   14398 
   14399 static void
   14400 do_vfp_nsyn_push (void)
   14401 {
   14402   nsyn_insert_sp ();
   14403   if (inst.operands[1].issingle)
   14404     do_vfp_nsyn_opcode ("fstmdbs");
   14405   else
   14406     do_vfp_nsyn_opcode ("fstmdbd");
   14407 }
   14408 
   14409 static void
   14410 do_vfp_nsyn_pop (void)
   14411 {
   14412   nsyn_insert_sp ();
   14413   if (inst.operands[1].issingle)
   14414     do_vfp_nsyn_opcode ("fldmias");
   14415   else
   14416     do_vfp_nsyn_opcode ("fldmiad");
   14417 }
   14418 
   14419 /* Fix up Neon data-processing instructions, ORing in the correct bits for
   14420    ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
   14421 
   14422 static void
   14423 neon_dp_fixup (struct arm_it* insn)
   14424 {
   14425   unsigned int i = insn->instruction;
   14426   insn->is_neon = 1;
   14427 
   14428   if (thumb_mode)
   14429     {
   14430       /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
   14431       if (i & (1 << 24))
   14432 	i |= 1 << 28;
   14433 
   14434       i &= ~(1 << 24);
   14435 
   14436       i |= 0xef000000;
   14437     }
   14438   else
   14439     i |= 0xf2000000;
   14440 
   14441   insn->instruction = i;
   14442 }
   14443 
   14444 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
   14445    (0, 1, 2, 3).  */
   14446 
   14447 static unsigned
   14448 neon_logbits (unsigned x)
   14449 {
   14450   return ffs (x) - 4;
   14451 }
   14452 
   14453 #define LOW4(R) ((R) & 0xf)
   14454 #define HI1(R) (((R) >> 4) & 1)
   14455 
   14456 /* Encode insns with bit pattern:
   14457 
   14458   |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
   14459   |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
   14460 
   14461   SIZE is passed in bits. -1 means size field isn't changed, in case it has a
   14462   different meaning for some instruction.  */
   14463 
   14464 static void
   14465 neon_three_same (int isquad, int ubit, int size)
   14466 {
   14467   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14468   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14469   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   14470   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   14471   inst.instruction |= LOW4 (inst.operands[2].reg);
   14472   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   14473   inst.instruction |= (isquad != 0) << 6;
   14474   inst.instruction |= (ubit != 0) << 24;
   14475   if (size != -1)
   14476     inst.instruction |= neon_logbits (size) << 20;
   14477 
   14478   neon_dp_fixup (&inst);
   14479 }
   14480 
   14481 /* Encode instructions of the form:
   14482 
   14483   |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
   14484   |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
   14485 
   14486   Don't write size if SIZE == -1.  */
   14487 
   14488 static void
   14489 neon_two_same (int qbit, int ubit, int size)
   14490 {
   14491   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14492   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14493   inst.instruction |= LOW4 (inst.operands[1].reg);
   14494   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14495   inst.instruction |= (qbit != 0) << 6;
   14496   inst.instruction |= (ubit != 0) << 24;
   14497 
   14498   if (size != -1)
   14499     inst.instruction |= neon_logbits (size) << 18;
   14500 
   14501   neon_dp_fixup (&inst);
   14502 }
   14503 
   14504 /* Neon instruction encoders, in approximate order of appearance.  */
   14505 
   14506 static void
   14507 do_neon_dyadic_i_su (void)
   14508 {
   14509   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14510   struct neon_type_el et = neon_check_type (3, rs,
   14511     N_EQK, N_EQK, N_SU_32 | N_KEY);
   14512   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14513 }
   14514 
   14515 static void
   14516 do_neon_dyadic_i64_su (void)
   14517 {
   14518   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14519   struct neon_type_el et = neon_check_type (3, rs,
   14520     N_EQK, N_EQK, N_SU_ALL | N_KEY);
   14521   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14522 }
   14523 
   14524 static void
   14525 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
   14526 		unsigned immbits)
   14527 {
   14528   unsigned size = et.size >> 3;
   14529   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14530   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14531   inst.instruction |= LOW4 (inst.operands[1].reg);
   14532   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14533   inst.instruction |= (isquad != 0) << 6;
   14534   inst.instruction |= immbits << 16;
   14535   inst.instruction |= (size >> 3) << 7;
   14536   inst.instruction |= (size & 0x7) << 19;
   14537   if (write_ubit)
   14538     inst.instruction |= (uval != 0) << 24;
   14539 
   14540   neon_dp_fixup (&inst);
   14541 }
   14542 
   14543 static void
   14544 do_neon_shl_imm (void)
   14545 {
   14546   if (!inst.operands[2].isreg)
   14547     {
   14548       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14549       struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
   14550       int imm = inst.operands[2].imm;
   14551 
   14552       constraint (imm < 0 || (unsigned)imm >= et.size,
   14553 		  _("immediate out of range for shift"));
   14554       NEON_ENCODE (IMMED, inst);
   14555       neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
   14556     }
   14557   else
   14558     {
   14559       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14560       struct neon_type_el et = neon_check_type (3, rs,
   14561 	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
   14562       unsigned int tmp;
   14563 
   14564       /* VSHL/VQSHL 3-register variants have syntax such as:
   14565 	   vshl.xx Dd, Dm, Dn
   14566 	 whereas other 3-register operations encoded by neon_three_same have
   14567 	 syntax like:
   14568 	   vadd.xx Dd, Dn, Dm
   14569 	 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
   14570 	 here.  */
   14571       tmp = inst.operands[2].reg;
   14572       inst.operands[2].reg = inst.operands[1].reg;
   14573       inst.operands[1].reg = tmp;
   14574       NEON_ENCODE (INTEGER, inst);
   14575       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14576     }
   14577 }
   14578 
   14579 static void
   14580 do_neon_qshl_imm (void)
   14581 {
   14582   if (!inst.operands[2].isreg)
   14583     {
   14584       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14585       struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
   14586       int imm = inst.operands[2].imm;
   14587 
   14588       constraint (imm < 0 || (unsigned)imm >= et.size,
   14589 		  _("immediate out of range for shift"));
   14590       NEON_ENCODE (IMMED, inst);
   14591       neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
   14592     }
   14593   else
   14594     {
   14595       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14596       struct neon_type_el et = neon_check_type (3, rs,
   14597 	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
   14598       unsigned int tmp;
   14599 
   14600       /* See note in do_neon_shl_imm.  */
   14601       tmp = inst.operands[2].reg;
   14602       inst.operands[2].reg = inst.operands[1].reg;
   14603       inst.operands[1].reg = tmp;
   14604       NEON_ENCODE (INTEGER, inst);
   14605       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14606     }
   14607 }
   14608 
   14609 static void
   14610 do_neon_rshl (void)
   14611 {
   14612   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14613   struct neon_type_el et = neon_check_type (3, rs,
   14614     N_EQK, N_EQK, N_SU_ALL | N_KEY);
   14615   unsigned int tmp;
   14616 
   14617   tmp = inst.operands[2].reg;
   14618   inst.operands[2].reg = inst.operands[1].reg;
   14619   inst.operands[1].reg = tmp;
   14620   neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
   14621 }
   14622 
   14623 static int
   14624 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
   14625 {
   14626   /* Handle .I8 pseudo-instructions.  */
   14627   if (size == 8)
   14628     {
   14629       /* Unfortunately, this will make everything apart from zero out-of-range.
   14630 	 FIXME is this the intended semantics? There doesn't seem much point in
   14631 	 accepting .I8 if so.  */
   14632       immediate |= immediate << 8;
   14633       size = 16;
   14634     }
   14635 
   14636   if (size >= 32)
   14637     {
   14638       if (immediate == (immediate & 0x000000ff))
   14639 	{
   14640 	  *immbits = immediate;
   14641 	  return 0x1;
   14642 	}
   14643       else if (immediate == (immediate & 0x0000ff00))
   14644 	{
   14645 	  *immbits = immediate >> 8;
   14646 	  return 0x3;
   14647 	}
   14648       else if (immediate == (immediate & 0x00ff0000))
   14649 	{
   14650 	  *immbits = immediate >> 16;
   14651 	  return 0x5;
   14652 	}
   14653       else if (immediate == (immediate & 0xff000000))
   14654 	{
   14655 	  *immbits = immediate >> 24;
   14656 	  return 0x7;
   14657 	}
   14658       if ((immediate & 0xffff) != (immediate >> 16))
   14659 	goto bad_immediate;
   14660       immediate &= 0xffff;
   14661     }
   14662 
   14663   if (immediate == (immediate & 0x000000ff))
   14664     {
   14665       *immbits = immediate;
   14666       return 0x9;
   14667     }
   14668   else if (immediate == (immediate & 0x0000ff00))
   14669     {
   14670       *immbits = immediate >> 8;
   14671       return 0xb;
   14672     }
   14673 
   14674   bad_immediate:
   14675   first_error (_("immediate value out of range"));
   14676   return FAIL;
   14677 }
   14678 
   14679 static void
   14680 do_neon_logic (void)
   14681 {
   14682   if (inst.operands[2].present && inst.operands[2].isreg)
   14683     {
   14684       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14685       neon_check_type (3, rs, N_IGNORE_TYPE);
   14686       /* U bit and size field were set as part of the bitmask.  */
   14687       NEON_ENCODE (INTEGER, inst);
   14688       neon_three_same (neon_quad (rs), 0, -1);
   14689     }
   14690   else
   14691     {
   14692       const int three_ops_form = (inst.operands[2].present
   14693 				  && !inst.operands[2].isreg);
   14694       const int immoperand = (three_ops_form ? 2 : 1);
   14695       enum neon_shape rs = (three_ops_form
   14696 			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
   14697 			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
   14698       struct neon_type_el et = neon_check_type (2, rs,
   14699 	N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
   14700       enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
   14701       unsigned immbits;
   14702       int cmode;
   14703 
   14704       if (et.type == NT_invtype)
   14705 	return;
   14706 
   14707       if (three_ops_form)
   14708 	constraint (inst.operands[0].reg != inst.operands[1].reg,
   14709 		    _("first and second operands shall be the same register"));
   14710 
   14711       NEON_ENCODE (IMMED, inst);
   14712 
   14713       immbits = inst.operands[immoperand].imm;
   14714       if (et.size == 64)
   14715 	{
   14716 	  /* .i64 is a pseudo-op, so the immediate must be a repeating
   14717 	     pattern.  */
   14718 	  if (immbits != (inst.operands[immoperand].regisimm ?
   14719 			  inst.operands[immoperand].reg : 0))
   14720 	    {
   14721 	      /* Set immbits to an invalid constant.  */
   14722 	      immbits = 0xdeadbeef;
   14723 	    }
   14724 	}
   14725 
   14726       switch (opcode)
   14727 	{
   14728 	case N_MNEM_vbic:
   14729 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14730 	  break;
   14731 
   14732 	case N_MNEM_vorr:
   14733 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14734 	  break;
   14735 
   14736 	case N_MNEM_vand:
   14737 	  /* Pseudo-instruction for VBIC.  */
   14738 	  neon_invert_size (&immbits, 0, et.size);
   14739 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14740 	  break;
   14741 
   14742 	case N_MNEM_vorn:
   14743 	  /* Pseudo-instruction for VORR.  */
   14744 	  neon_invert_size (&immbits, 0, et.size);
   14745 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
   14746 	  break;
   14747 
   14748 	default:
   14749 	  abort ();
   14750 	}
   14751 
   14752       if (cmode == FAIL)
   14753 	return;
   14754 
   14755       inst.instruction |= neon_quad (rs) << 6;
   14756       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14757       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14758       inst.instruction |= cmode << 8;
   14759       neon_write_immbits (immbits);
   14760 
   14761       neon_dp_fixup (&inst);
   14762     }
   14763 }
   14764 
   14765 static void
   14766 do_neon_bitfield (void)
   14767 {
   14768   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14769   neon_check_type (3, rs, N_IGNORE_TYPE);
   14770   neon_three_same (neon_quad (rs), 0, -1);
   14771 }
   14772 
   14773 static void
   14774 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
   14775 		  unsigned destbits)
   14776 {
   14777   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   14778   struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
   14779 					    types | N_KEY);
   14780   if (et.type == NT_float)
   14781     {
   14782       NEON_ENCODE (FLOAT, inst);
   14783       neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
   14784     }
   14785   else
   14786     {
   14787       NEON_ENCODE (INTEGER, inst);
   14788       neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
   14789     }
   14790 }
   14791 
   14792 static void
   14793 do_neon_dyadic_if_su (void)
   14794 {
   14795   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
   14796 }
   14797 
   14798 static void
   14799 do_neon_dyadic_if_su_d (void)
   14800 {
   14801   /* This version only allow D registers, but that constraint is enforced during
   14802      operand parsing so we don't need to do anything extra here.  */
   14803   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
   14804 }
   14805 
   14806 static void
   14807 do_neon_dyadic_if_i_d (void)
   14808 {
   14809   /* The "untyped" case can't happen. Do this to stop the "U" bit being
   14810      affected if we specify unsigned args.  */
   14811   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   14812 }
   14813 
   14814 enum vfp_or_neon_is_neon_bits
   14815 {
   14816   NEON_CHECK_CC = 1,
   14817   NEON_CHECK_ARCH = 2,
   14818   NEON_CHECK_ARCH8 = 4
   14819 };
   14820 
   14821 /* Call this function if an instruction which may have belonged to the VFP or
   14822    Neon instruction sets, but turned out to be a Neon instruction (due to the
   14823    operand types involved, etc.). We have to check and/or fix-up a couple of
   14824    things:
   14825 
   14826      - Make sure the user hasn't attempted to make a Neon instruction
   14827        conditional.
   14828      - Alter the value in the condition code field if necessary.
   14829      - Make sure that the arch supports Neon instructions.
   14830 
   14831    Which of these operations take place depends on bits from enum
   14832    vfp_or_neon_is_neon_bits.
   14833 
   14834    WARNING: This function has side effects! If NEON_CHECK_CC is used and the
   14835    current instruction's condition is COND_ALWAYS, the condition field is
   14836    changed to inst.uncond_value. This is necessary because instructions shared
   14837    between VFP and Neon may be conditional for the VFP variants only, and the
   14838    unconditional Neon version must have, e.g., 0xF in the condition field.  */
   14839 
   14840 static int
   14841 vfp_or_neon_is_neon (unsigned check)
   14842 {
   14843   /* Conditions are always legal in Thumb mode (IT blocks).  */
   14844   if (!thumb_mode && (check & NEON_CHECK_CC))
   14845     {
   14846       if (inst.cond != COND_ALWAYS)
   14847 	{
   14848 	  first_error (_(BAD_COND));
   14849 	  return FAIL;
   14850 	}
   14851       if (inst.uncond_value != -1)
   14852 	inst.instruction |= inst.uncond_value << 28;
   14853     }
   14854 
   14855   if ((check & NEON_CHECK_ARCH)
   14856       && !mark_feature_used (&fpu_neon_ext_v1))
   14857     {
   14858       first_error (_(BAD_FPU));
   14859       return FAIL;
   14860     }
   14861 
   14862   if ((check & NEON_CHECK_ARCH8)
   14863       && !mark_feature_used (&fpu_neon_ext_armv8))
   14864     {
   14865       first_error (_(BAD_FPU));
   14866       return FAIL;
   14867     }
   14868 
   14869   return SUCCESS;
   14870 }
   14871 
   14872 static void
   14873 do_neon_addsub_if_i (void)
   14874 {
   14875   if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
   14876     return;
   14877 
   14878   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   14879     return;
   14880 
   14881   /* The "untyped" case can't happen. Do this to stop the "U" bit being
   14882      affected if we specify unsigned args.  */
   14883   neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
   14884 }
   14885 
   14886 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
   14887    result to be:
   14888      V<op> A,B     (A is operand 0, B is operand 2)
   14889    to mean:
   14890      V<op> A,B,A
   14891    not:
   14892      V<op> A,B,B
   14893    so handle that case specially.  */
   14894 
   14895 static void
   14896 neon_exchange_operands (void)
   14897 {
   14898   if (inst.operands[1].present)
   14899     {
   14900       void *scratch = xmalloc (sizeof (inst.operands[0]));
   14901 
   14902       /* Swap operands[1] and operands[2].  */
   14903       memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
   14904       inst.operands[1] = inst.operands[2];
   14905       memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
   14906       free (scratch);
   14907     }
   14908   else
   14909     {
   14910       inst.operands[1] = inst.operands[2];
   14911       inst.operands[2] = inst.operands[0];
   14912     }
   14913 }
   14914 
   14915 static void
   14916 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
   14917 {
   14918   if (inst.operands[2].isreg)
   14919     {
   14920       if (invert)
   14921 	neon_exchange_operands ();
   14922       neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
   14923     }
   14924   else
   14925     {
   14926       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   14927       struct neon_type_el et = neon_check_type (2, rs,
   14928 	N_EQK | N_SIZ, immtypes | N_KEY);
   14929 
   14930       NEON_ENCODE (IMMED, inst);
   14931       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   14932       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   14933       inst.instruction |= LOW4 (inst.operands[1].reg);
   14934       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   14935       inst.instruction |= neon_quad (rs) << 6;
   14936       inst.instruction |= (et.type == NT_float) << 10;
   14937       inst.instruction |= neon_logbits (et.size) << 18;
   14938 
   14939       neon_dp_fixup (&inst);
   14940     }
   14941 }
   14942 
   14943 static void
   14944 do_neon_cmp (void)
   14945 {
   14946   neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
   14947 }
   14948 
   14949 static void
   14950 do_neon_cmp_inv (void)
   14951 {
   14952   neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
   14953 }
   14954 
   14955 static void
   14956 do_neon_ceq (void)
   14957 {
   14958   neon_compare (N_IF_32, N_IF_32, FALSE);
   14959 }
   14960 
   14961 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
   14962    scalars, which are encoded in 5 bits, M : Rm.
   14963    For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
   14964    M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
   14965    index in M.  */
   14966 
   14967 static unsigned
   14968 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
   14969 {
   14970   unsigned regno = NEON_SCALAR_REG (scalar);
   14971   unsigned elno = NEON_SCALAR_INDEX (scalar);
   14972 
   14973   switch (elsize)
   14974     {
   14975     case 16:
   14976       if (regno > 7 || elno > 3)
   14977 	goto bad_scalar;
   14978       return regno | (elno << 3);
   14979 
   14980     case 32:
   14981       if (regno > 15 || elno > 1)
   14982 	goto bad_scalar;
   14983       return regno | (elno << 4);
   14984 
   14985     default:
   14986     bad_scalar:
   14987       first_error (_("scalar out of range for multiply instruction"));
   14988     }
   14989 
   14990   return 0;
   14991 }
   14992 
   14993 /* Encode multiply / multiply-accumulate scalar instructions.  */
   14994 
   14995 static void
   14996 neon_mul_mac (struct neon_type_el et, int ubit)
   14997 {
   14998   unsigned scalar;
   14999 
   15000   /* Give a more helpful error message if we have an invalid type.  */
   15001   if (et.type == NT_invtype)
   15002     return;
   15003 
   15004   scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
   15005   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15006   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15007   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   15008   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   15009   inst.instruction |= LOW4 (scalar);
   15010   inst.instruction |= HI1 (scalar) << 5;
   15011   inst.instruction |= (et.type == NT_float) << 8;
   15012   inst.instruction |= neon_logbits (et.size) << 20;
   15013   inst.instruction |= (ubit != 0) << 24;
   15014 
   15015   neon_dp_fixup (&inst);
   15016 }
   15017 
   15018 static void
   15019 do_neon_mac_maybe_scalar (void)
   15020 {
   15021   if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
   15022     return;
   15023 
   15024   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15025     return;
   15026 
   15027   if (inst.operands[2].isscalar)
   15028     {
   15029       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
   15030       struct neon_type_el et = neon_check_type (3, rs,
   15031 	N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
   15032       NEON_ENCODE (SCALAR, inst);
   15033       neon_mul_mac (et, neon_quad (rs));
   15034     }
   15035   else
   15036     {
   15037       /* The "untyped" case can't happen.  Do this to stop the "U" bit being
   15038 	 affected if we specify unsigned args.  */
   15039       neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   15040     }
   15041 }
   15042 
   15043 static void
   15044 do_neon_fmac (void)
   15045 {
   15046   if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
   15047     return;
   15048 
   15049   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15050     return;
   15051 
   15052   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
   15053 }
   15054 
   15055 static void
   15056 do_neon_tst (void)
   15057 {
   15058   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15059   struct neon_type_el et = neon_check_type (3, rs,
   15060     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
   15061   neon_three_same (neon_quad (rs), 0, et.size);
   15062 }
   15063 
   15064 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
   15065    same types as the MAC equivalents. The polynomial type for this instruction
   15066    is encoded the same as the integer type.  */
   15067 
   15068 static void
   15069 do_neon_mul (void)
   15070 {
   15071   if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
   15072     return;
   15073 
   15074   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15075     return;
   15076 
   15077   if (inst.operands[2].isscalar)
   15078     do_neon_mac_maybe_scalar ();
   15079   else
   15080     neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
   15081 }
   15082 
   15083 static void
   15084 do_neon_qdmulh (void)
   15085 {
   15086   if (inst.operands[2].isscalar)
   15087     {
   15088       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
   15089       struct neon_type_el et = neon_check_type (3, rs,
   15090 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   15091       NEON_ENCODE (SCALAR, inst);
   15092       neon_mul_mac (et, neon_quad (rs));
   15093     }
   15094   else
   15095     {
   15096       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15097       struct neon_type_el et = neon_check_type (3, rs,
   15098 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   15099       NEON_ENCODE (INTEGER, inst);
   15100       /* The U bit (rounding) comes from bit mask.  */
   15101       neon_three_same (neon_quad (rs), 0, et.size);
   15102     }
   15103 }
   15104 
   15105 static void
   15106 do_neon_qrdmlah (void)
   15107 {
   15108   /* Check we're on the correct architecture.  */
   15109   if (!mark_feature_used (&fpu_neon_ext_armv8))
   15110     inst.error =
   15111       _("instruction form not available on this architecture.");
   15112   else if (!mark_feature_used (&fpu_neon_ext_v8_1))
   15113     {
   15114       as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
   15115       record_feature_use (&fpu_neon_ext_v8_1);
   15116     }
   15117 
   15118   if (inst.operands[2].isscalar)
   15119     {
   15120       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
   15121       struct neon_type_el et = neon_check_type (3, rs,
   15122 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   15123       NEON_ENCODE (SCALAR, inst);
   15124       neon_mul_mac (et, neon_quad (rs));
   15125     }
   15126   else
   15127     {
   15128       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15129       struct neon_type_el et = neon_check_type (3, rs,
   15130 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
   15131       NEON_ENCODE (INTEGER, inst);
   15132       /* The U bit (rounding) comes from bit mask.  */
   15133       neon_three_same (neon_quad (rs), 0, et.size);
   15134     }
   15135 }
   15136 
   15137 static void
   15138 do_neon_fcmp_absolute (void)
   15139 {
   15140   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15141   struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
   15142 					    N_F_16_32 | N_KEY);
   15143   /* Size field comes from bit mask.  */
   15144   neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
   15145 }
   15146 
   15147 static void
   15148 do_neon_fcmp_absolute_inv (void)
   15149 {
   15150   neon_exchange_operands ();
   15151   do_neon_fcmp_absolute ();
   15152 }
   15153 
   15154 static void
   15155 do_neon_step (void)
   15156 {
   15157   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
   15158   struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
   15159 					    N_F_16_32 | N_KEY);
   15160   neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
   15161 }
   15162 
   15163 static void
   15164 do_neon_abs_neg (void)
   15165 {
   15166   enum neon_shape rs;
   15167   struct neon_type_el et;
   15168 
   15169   if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
   15170     return;
   15171 
   15172   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15173     return;
   15174 
   15175   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15176   et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
   15177 
   15178   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15179   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15180   inst.instruction |= LOW4 (inst.operands[1].reg);
   15181   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15182   inst.instruction |= neon_quad (rs) << 6;
   15183   inst.instruction |= (et.type == NT_float) << 10;
   15184   inst.instruction |= neon_logbits (et.size) << 18;
   15185 
   15186   neon_dp_fixup (&inst);
   15187 }
   15188 
   15189 static void
   15190 do_neon_sli (void)
   15191 {
   15192   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   15193   struct neon_type_el et = neon_check_type (2, rs,
   15194     N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   15195   int imm = inst.operands[2].imm;
   15196   constraint (imm < 0 || (unsigned)imm >= et.size,
   15197 	      _("immediate out of range for insert"));
   15198   neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
   15199 }
   15200 
   15201 static void
   15202 do_neon_sri (void)
   15203 {
   15204   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   15205   struct neon_type_el et = neon_check_type (2, rs,
   15206     N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   15207   int imm = inst.operands[2].imm;
   15208   constraint (imm < 1 || (unsigned)imm > et.size,
   15209 	      _("immediate out of range for insert"));
   15210   neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
   15211 }
   15212 
   15213 static void
   15214 do_neon_qshlu_imm (void)
   15215 {
   15216   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   15217   struct neon_type_el et = neon_check_type (2, rs,
   15218     N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
   15219   int imm = inst.operands[2].imm;
   15220   constraint (imm < 0 || (unsigned)imm >= et.size,
   15221 	      _("immediate out of range for shift"));
   15222   /* Only encodes the 'U present' variant of the instruction.
   15223      In this case, signed types have OP (bit 8) set to 0.
   15224      Unsigned types have OP set to 1.  */
   15225   inst.instruction |= (et.type == NT_unsigned) << 8;
   15226   /* The rest of the bits are the same as other immediate shifts.  */
   15227   neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
   15228 }
   15229 
   15230 static void
   15231 do_neon_qmovn (void)
   15232 {
   15233   struct neon_type_el et = neon_check_type (2, NS_DQ,
   15234     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
   15235   /* Saturating move where operands can be signed or unsigned, and the
   15236      destination has the same signedness.  */
   15237   NEON_ENCODE (INTEGER, inst);
   15238   if (et.type == NT_unsigned)
   15239     inst.instruction |= 0xc0;
   15240   else
   15241     inst.instruction |= 0x80;
   15242   neon_two_same (0, 1, et.size / 2);
   15243 }
   15244 
   15245 static void
   15246 do_neon_qmovun (void)
   15247 {
   15248   struct neon_type_el et = neon_check_type (2, NS_DQ,
   15249     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
   15250   /* Saturating move with unsigned results. Operands must be signed.  */
   15251   NEON_ENCODE (INTEGER, inst);
   15252   neon_two_same (0, 1, et.size / 2);
   15253 }
   15254 
   15255 static void
   15256 do_neon_rshift_sat_narrow (void)
   15257 {
   15258   /* FIXME: Types for narrowing. If operands are signed, results can be signed
   15259      or unsigned. If operands are unsigned, results must also be unsigned.  */
   15260   struct neon_type_el et = neon_check_type (2, NS_DQI,
   15261     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
   15262   int imm = inst.operands[2].imm;
   15263   /* This gets the bounds check, size encoding and immediate bits calculation
   15264      right.  */
   15265   et.size /= 2;
   15266 
   15267   /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
   15268      VQMOVN.I<size> <Dd>, <Qm>.  */
   15269   if (imm == 0)
   15270     {
   15271       inst.operands[2].present = 0;
   15272       inst.instruction = N_MNEM_vqmovn;
   15273       do_neon_qmovn ();
   15274       return;
   15275     }
   15276 
   15277   constraint (imm < 1 || (unsigned)imm > et.size,
   15278 	      _("immediate out of range"));
   15279   neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
   15280 }
   15281 
   15282 static void
   15283 do_neon_rshift_sat_narrow_u (void)
   15284 {
   15285   /* FIXME: Types for narrowing. If operands are signed, results can be signed
   15286      or unsigned. If operands are unsigned, results must also be unsigned.  */
   15287   struct neon_type_el et = neon_check_type (2, NS_DQI,
   15288     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
   15289   int imm = inst.operands[2].imm;
   15290   /* This gets the bounds check, size encoding and immediate bits calculation
   15291      right.  */
   15292   et.size /= 2;
   15293 
   15294   /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
   15295      VQMOVUN.I<size> <Dd>, <Qm>.  */
   15296   if (imm == 0)
   15297     {
   15298       inst.operands[2].present = 0;
   15299       inst.instruction = N_MNEM_vqmovun;
   15300       do_neon_qmovun ();
   15301       return;
   15302     }
   15303 
   15304   constraint (imm < 1 || (unsigned)imm > et.size,
   15305 	      _("immediate out of range"));
   15306   /* FIXME: The manual is kind of unclear about what value U should have in
   15307      VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
   15308      must be 1.  */
   15309   neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
   15310 }
   15311 
   15312 static void
   15313 do_neon_movn (void)
   15314 {
   15315   struct neon_type_el et = neon_check_type (2, NS_DQ,
   15316     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
   15317   NEON_ENCODE (INTEGER, inst);
   15318   neon_two_same (0, 1, et.size / 2);
   15319 }
   15320 
   15321 static void
   15322 do_neon_rshift_narrow (void)
   15323 {
   15324   struct neon_type_el et = neon_check_type (2, NS_DQI,
   15325     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
   15326   int imm = inst.operands[2].imm;
   15327   /* This gets the bounds check, size encoding and immediate bits calculation
   15328      right.  */
   15329   et.size /= 2;
   15330 
   15331   /* If immediate is zero then we are a pseudo-instruction for
   15332      VMOVN.I<size> <Dd>, <Qm>  */
   15333   if (imm == 0)
   15334     {
   15335       inst.operands[2].present = 0;
   15336       inst.instruction = N_MNEM_vmovn;
   15337       do_neon_movn ();
   15338       return;
   15339     }
   15340 
   15341   constraint (imm < 1 || (unsigned)imm > et.size,
   15342 	      _("immediate out of range for narrowing operation"));
   15343   neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
   15344 }
   15345 
   15346 static void
   15347 do_neon_shll (void)
   15348 {
   15349   /* FIXME: Type checking when lengthening.  */
   15350   struct neon_type_el et = neon_check_type (2, NS_QDI,
   15351     N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
   15352   unsigned imm = inst.operands[2].imm;
   15353 
   15354   if (imm == et.size)
   15355     {
   15356       /* Maximum shift variant.  */
   15357       NEON_ENCODE (INTEGER, inst);
   15358       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15359       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15360       inst.instruction |= LOW4 (inst.operands[1].reg);
   15361       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15362       inst.instruction |= neon_logbits (et.size) << 18;
   15363 
   15364       neon_dp_fixup (&inst);
   15365     }
   15366   else
   15367     {
   15368       /* A more-specific type check for non-max versions.  */
   15369       et = neon_check_type (2, NS_QDI,
   15370 	N_EQK | N_DBL, N_SU_32 | N_KEY);
   15371       NEON_ENCODE (IMMED, inst);
   15372       neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
   15373     }
   15374 }
   15375 
   15376 /* Check the various types for the VCVT instruction, and return which version
   15377    the current instruction is.  */
   15378 
   15379 #define CVT_FLAVOUR_VAR							      \
   15380   CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
   15381   CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
   15382   CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
   15383   CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
   15384   /* Half-precision conversions.  */					      \
   15385   CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
   15386   CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
   15387   CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL)	      \
   15388   CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL)	      \
   15389   CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
   15390   CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
   15391   /* New VCVT instructions introduced by ARMv8.2 fp16 extension.	      \
   15392      Compared with single/double precision variants, only the co-processor    \
   15393      field is different, so the encoding flow is reused here.  */	      \
   15394   CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL)    \
   15395   CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL)    \
   15396   CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
   15397   CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
   15398   /* VFP instructions.  */						      \
   15399   CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
   15400   CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
   15401   CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
   15402   CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
   15403   CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
   15404   CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
   15405   /* VFP instructions with bitshift.  */				      \
   15406   CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
   15407   CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
   15408   CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
   15409   CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
   15410   CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
   15411   CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
   15412   CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
   15413   CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
   15414 
   15415 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
   15416   neon_cvt_flavour_##C,
   15417 
   15418 /* The different types of conversions we can do.  */
   15419 enum neon_cvt_flavour
   15420 {
   15421   CVT_FLAVOUR_VAR
   15422   neon_cvt_flavour_invalid,
   15423   neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
   15424 };
   15425 
   15426 #undef CVT_VAR
   15427 
   15428 static enum neon_cvt_flavour
   15429 get_neon_cvt_flavour (enum neon_shape rs)
   15430 {
   15431 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
   15432   et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
   15433   if (et.type != NT_invtype)				\
   15434     {							\
   15435       inst.error = NULL;				\
   15436       return (neon_cvt_flavour_##C);			\
   15437     }
   15438 
   15439   struct neon_type_el et;
   15440   unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
   15441 			|| rs == NS_FF) ? N_VFP : 0;
   15442   /* The instruction versions which take an immediate take one register
   15443      argument, which is extended to the width of the full register. Thus the
   15444      "source" and "destination" registers must have the same width.  Hack that
   15445      here by making the size equal to the key (wider, in this case) operand.  */
   15446   unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
   15447 
   15448   CVT_FLAVOUR_VAR;
   15449 
   15450   return neon_cvt_flavour_invalid;
   15451 #undef CVT_VAR
   15452 }
   15453 
   15454 enum neon_cvt_mode
   15455 {
   15456   neon_cvt_mode_a,
   15457   neon_cvt_mode_n,
   15458   neon_cvt_mode_p,
   15459   neon_cvt_mode_m,
   15460   neon_cvt_mode_z,
   15461   neon_cvt_mode_x,
   15462   neon_cvt_mode_r
   15463 };
   15464 
   15465 /* Neon-syntax VFP conversions.  */
   15466 
   15467 static void
   15468 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
   15469 {
   15470   const char *opname = 0;
   15471 
   15472   if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
   15473       || rs == NS_FHI || rs == NS_HFI)
   15474     {
   15475       /* Conversions with immediate bitshift.  */
   15476       const char *enc[] =
   15477 	{
   15478 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
   15479 	  CVT_FLAVOUR_VAR
   15480 	  NULL
   15481 #undef CVT_VAR
   15482 	};
   15483 
   15484       if (flavour < (int) ARRAY_SIZE (enc))
   15485 	{
   15486 	  opname = enc[flavour];
   15487 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
   15488 		      _("operands 0 and 1 must be the same register"));
   15489 	  inst.operands[1] = inst.operands[2];
   15490 	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
   15491 	}
   15492     }
   15493   else
   15494     {
   15495       /* Conversions without bitshift.  */
   15496       const char *enc[] =
   15497 	{
   15498 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
   15499 	  CVT_FLAVOUR_VAR
   15500 	  NULL
   15501 #undef CVT_VAR
   15502 	};
   15503 
   15504       if (flavour < (int) ARRAY_SIZE (enc))
   15505 	opname = enc[flavour];
   15506     }
   15507 
   15508   if (opname)
   15509     do_vfp_nsyn_opcode (opname);
   15510 
   15511   /* ARMv8.2 fp16 VCVT instruction.  */
   15512   if (flavour == neon_cvt_flavour_s32_f16
   15513       || flavour == neon_cvt_flavour_u32_f16
   15514       || flavour == neon_cvt_flavour_f16_u32
   15515       || flavour == neon_cvt_flavour_f16_s32)
   15516     do_scalar_fp16_v82_encode ();
   15517 }
   15518 
   15519 static void
   15520 do_vfp_nsyn_cvtz (void)
   15521 {
   15522   enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
   15523   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
   15524   const char *enc[] =
   15525     {
   15526 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
   15527       CVT_FLAVOUR_VAR
   15528       NULL
   15529 #undef CVT_VAR
   15530     };
   15531 
   15532   if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
   15533     do_vfp_nsyn_opcode (enc[flavour]);
   15534 }
   15535 
   15536 static void
   15537 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
   15538 		      enum neon_cvt_mode mode)
   15539 {
   15540   int sz, op;
   15541   int rm;
   15542 
   15543   /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
   15544      D register operands.  */
   15545   if (flavour == neon_cvt_flavour_s32_f64
   15546       || flavour == neon_cvt_flavour_u32_f64)
   15547     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   15548 		_(BAD_FPU));
   15549 
   15550   if (flavour == neon_cvt_flavour_s32_f16
   15551       || flavour == neon_cvt_flavour_u32_f16)
   15552     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
   15553 		_(BAD_FP16));
   15554 
   15555   set_it_insn_type (OUTSIDE_IT_INSN);
   15556 
   15557   switch (flavour)
   15558     {
   15559     case neon_cvt_flavour_s32_f64:
   15560       sz = 1;
   15561       op = 1;
   15562       break;
   15563     case neon_cvt_flavour_s32_f32:
   15564       sz = 0;
   15565       op = 1;
   15566       break;
   15567     case neon_cvt_flavour_s32_f16:
   15568       sz = 0;
   15569       op = 1;
   15570       break;
   15571     case neon_cvt_flavour_u32_f64:
   15572       sz = 1;
   15573       op = 0;
   15574       break;
   15575     case neon_cvt_flavour_u32_f32:
   15576       sz = 0;
   15577       op = 0;
   15578       break;
   15579     case neon_cvt_flavour_u32_f16:
   15580       sz = 0;
   15581       op = 0;
   15582       break;
   15583     default:
   15584       first_error (_("invalid instruction shape"));
   15585       return;
   15586     }
   15587 
   15588   switch (mode)
   15589     {
   15590     case neon_cvt_mode_a: rm = 0; break;
   15591     case neon_cvt_mode_n: rm = 1; break;
   15592     case neon_cvt_mode_p: rm = 2; break;
   15593     case neon_cvt_mode_m: rm = 3; break;
   15594     default: first_error (_("invalid rounding mode")); return;
   15595     }
   15596 
   15597   NEON_ENCODE (FPV8, inst);
   15598   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
   15599   encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
   15600   inst.instruction |= sz << 8;
   15601 
   15602   /* ARMv8.2 fp16 VCVT instruction.  */
   15603   if (flavour == neon_cvt_flavour_s32_f16
   15604       ||flavour == neon_cvt_flavour_u32_f16)
   15605     do_scalar_fp16_v82_encode ();
   15606   inst.instruction |= op << 7;
   15607   inst.instruction |= rm << 16;
   15608   inst.instruction |= 0xf0000000;
   15609   inst.is_neon = TRUE;
   15610 }
   15611 
   15612 static void
   15613 do_neon_cvt_1 (enum neon_cvt_mode mode)
   15614 {
   15615   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
   15616 					  NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
   15617 					  NS_FH, NS_HF, NS_FHI, NS_HFI,
   15618 					  NS_NULL);
   15619   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
   15620 
   15621   if (flavour == neon_cvt_flavour_invalid)
   15622     return;
   15623 
   15624   /* PR11109: Handle round-to-zero for VCVT conversions.  */
   15625   if (mode == neon_cvt_mode_z
   15626       && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
   15627       && (flavour == neon_cvt_flavour_s16_f16
   15628 	  || flavour == neon_cvt_flavour_u16_f16
   15629 	  || flavour == neon_cvt_flavour_s32_f32
   15630 	  || flavour == neon_cvt_flavour_u32_f32
   15631 	  || flavour == neon_cvt_flavour_s32_f64
   15632 	  || flavour == neon_cvt_flavour_u32_f64)
   15633       && (rs == NS_FD || rs == NS_FF))
   15634     {
   15635       do_vfp_nsyn_cvtz ();
   15636       return;
   15637     }
   15638 
   15639   /* ARMv8.2 fp16 VCVT conversions.  */
   15640   if (mode == neon_cvt_mode_z
   15641       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
   15642       && (flavour == neon_cvt_flavour_s32_f16
   15643 	  || flavour == neon_cvt_flavour_u32_f16)
   15644       && (rs == NS_FH))
   15645     {
   15646       do_vfp_nsyn_cvtz ();
   15647       do_scalar_fp16_v82_encode ();
   15648       return;
   15649     }
   15650 
   15651   /* VFP rather than Neon conversions.  */
   15652   if (flavour >= neon_cvt_flavour_first_fp)
   15653     {
   15654       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
   15655 	do_vfp_nsyn_cvt (rs, flavour);
   15656       else
   15657 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
   15658 
   15659       return;
   15660     }
   15661 
   15662   switch (rs)
   15663     {
   15664     case NS_DDI:
   15665     case NS_QQI:
   15666       {
   15667 	unsigned immbits;
   15668 	unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
   15669 			     0x0000100, 0x1000100, 0x0, 0x1000000};
   15670 
   15671 	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15672 	  return;
   15673 
   15674 	/* Fixed-point conversion with #0 immediate is encoded as an
   15675 	   integer conversion.  */
   15676 	if (inst.operands[2].present && inst.operands[2].imm == 0)
   15677 	  goto int_encode;
   15678 	NEON_ENCODE (IMMED, inst);
   15679 	if (flavour != neon_cvt_flavour_invalid)
   15680 	  inst.instruction |= enctab[flavour];
   15681 	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15682 	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15683 	inst.instruction |= LOW4 (inst.operands[1].reg);
   15684 	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15685 	inst.instruction |= neon_quad (rs) << 6;
   15686 	inst.instruction |= 1 << 21;
   15687 	if (flavour < neon_cvt_flavour_s16_f16)
   15688 	  {
   15689 	    inst.instruction |= 1 << 21;
   15690 	    immbits = 32 - inst.operands[2].imm;
   15691 	    inst.instruction |= immbits << 16;
   15692 	  }
   15693 	else
   15694 	  {
   15695 	    inst.instruction |= 3 << 20;
   15696 	    immbits = 16 - inst.operands[2].imm;
   15697 	    inst.instruction |= immbits << 16;
   15698 	    inst.instruction &= ~(1 << 9);
   15699 	  }
   15700 
   15701 	neon_dp_fixup (&inst);
   15702       }
   15703       break;
   15704 
   15705     case NS_DD:
   15706     case NS_QQ:
   15707       if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
   15708 	{
   15709 	  NEON_ENCODE (FLOAT, inst);
   15710 	  set_it_insn_type (OUTSIDE_IT_INSN);
   15711 
   15712 	  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
   15713 	    return;
   15714 
   15715 	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15716 	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15717 	  inst.instruction |= LOW4 (inst.operands[1].reg);
   15718 	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15719 	  inst.instruction |= neon_quad (rs) << 6;
   15720 	  inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
   15721 			       || flavour == neon_cvt_flavour_u32_f32) << 7;
   15722 	  inst.instruction |= mode << 8;
   15723 	  if (flavour == neon_cvt_flavour_u16_f16
   15724 	      || flavour == neon_cvt_flavour_s16_f16)
   15725 	    /* Mask off the original size bits and reencode them.  */
   15726 	    inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
   15727 
   15728 	  if (thumb_mode)
   15729 	    inst.instruction |= 0xfc000000;
   15730 	  else
   15731 	    inst.instruction |= 0xf0000000;
   15732 	}
   15733       else
   15734 	{
   15735     int_encode:
   15736 	  {
   15737 	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
   15738 				  0x100, 0x180, 0x0, 0x080};
   15739 
   15740 	    NEON_ENCODE (INTEGER, inst);
   15741 
   15742 	    if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   15743 	      return;
   15744 
   15745 	    if (flavour != neon_cvt_flavour_invalid)
   15746 	      inst.instruction |= enctab[flavour];
   15747 
   15748 	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15749 	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15750 	    inst.instruction |= LOW4 (inst.operands[1].reg);
   15751 	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15752 	    inst.instruction |= neon_quad (rs) << 6;
   15753 	    if (flavour >= neon_cvt_flavour_s16_f16
   15754 		&& flavour <= neon_cvt_flavour_f16_u16)
   15755 	      /* Half precision.  */
   15756 	      inst.instruction |= 1 << 18;
   15757 	    else
   15758 	      inst.instruction |= 2 << 18;
   15759 
   15760 	    neon_dp_fixup (&inst);
   15761 	  }
   15762 	}
   15763       break;
   15764 
   15765     /* Half-precision conversions for Advanced SIMD -- neon.  */
   15766     case NS_QD:
   15767     case NS_DQ:
   15768 
   15769       if ((rs == NS_DQ)
   15770 	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
   15771 	  {
   15772 	    as_bad (_("operand size must match register width"));
   15773 	    break;
   15774 	  }
   15775 
   15776       if ((rs == NS_QD)
   15777 	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
   15778 	  {
   15779 	    as_bad (_("operand size must match register width"));
   15780 	    break;
   15781 	  }
   15782 
   15783       if (rs == NS_DQ)
   15784 	inst.instruction = 0x3b60600;
   15785       else
   15786 	inst.instruction = 0x3b60700;
   15787 
   15788       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15789       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15790       inst.instruction |= LOW4 (inst.operands[1].reg);
   15791       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15792       neon_dp_fixup (&inst);
   15793       break;
   15794 
   15795     default:
   15796       /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
   15797       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
   15798 	do_vfp_nsyn_cvt (rs, flavour);
   15799       else
   15800 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
   15801     }
   15802 }
   15803 
   15804 static void
   15805 do_neon_cvtr (void)
   15806 {
   15807   do_neon_cvt_1 (neon_cvt_mode_x);
   15808 }
   15809 
   15810 static void
   15811 do_neon_cvt (void)
   15812 {
   15813   do_neon_cvt_1 (neon_cvt_mode_z);
   15814 }
   15815 
   15816 static void
   15817 do_neon_cvta (void)
   15818 {
   15819   do_neon_cvt_1 (neon_cvt_mode_a);
   15820 }
   15821 
   15822 static void
   15823 do_neon_cvtn (void)
   15824 {
   15825   do_neon_cvt_1 (neon_cvt_mode_n);
   15826 }
   15827 
   15828 static void
   15829 do_neon_cvtp (void)
   15830 {
   15831   do_neon_cvt_1 (neon_cvt_mode_p);
   15832 }
   15833 
   15834 static void
   15835 do_neon_cvtm (void)
   15836 {
   15837   do_neon_cvt_1 (neon_cvt_mode_m);
   15838 }
   15839 
   15840 static void
   15841 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
   15842 {
   15843   if (is_double)
   15844     mark_feature_used (&fpu_vfp_ext_armv8);
   15845 
   15846   encode_arm_vfp_reg (inst.operands[0].reg,
   15847 		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
   15848   encode_arm_vfp_reg (inst.operands[1].reg,
   15849 		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
   15850   inst.instruction |= to ? 0x10000 : 0;
   15851   inst.instruction |= t ? 0x80 : 0;
   15852   inst.instruction |= is_double ? 0x100 : 0;
   15853   do_vfp_cond_or_thumb ();
   15854 }
   15855 
   15856 static void
   15857 do_neon_cvttb_1 (bfd_boolean t)
   15858 {
   15859   enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
   15860 					  NS_DF, NS_DH, NS_NULL);
   15861 
   15862   if (rs == NS_NULL)
   15863     return;
   15864   else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
   15865     {
   15866       inst.error = NULL;
   15867       do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
   15868     }
   15869   else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
   15870     {
   15871       inst.error = NULL;
   15872       do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
   15873     }
   15874   else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
   15875     {
   15876       /* The VCVTB and VCVTT instructions with D-register operands
   15877          don't work for SP only targets.  */
   15878       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   15879 		  _(BAD_FPU));
   15880 
   15881       inst.error = NULL;
   15882       do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
   15883     }
   15884   else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
   15885     {
   15886       /* The VCVTB and VCVTT instructions with D-register operands
   15887          don't work for SP only targets.  */
   15888       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   15889 		  _(BAD_FPU));
   15890 
   15891       inst.error = NULL;
   15892       do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
   15893     }
   15894   else
   15895     return;
   15896 }
   15897 
   15898 static void
   15899 do_neon_cvtb (void)
   15900 {
   15901   do_neon_cvttb_1 (FALSE);
   15902 }
   15903 
   15904 
   15905 static void
   15906 do_neon_cvtt (void)
   15907 {
   15908   do_neon_cvttb_1 (TRUE);
   15909 }
   15910 
   15911 static void
   15912 neon_move_immediate (void)
   15913 {
   15914   enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
   15915   struct neon_type_el et = neon_check_type (2, rs,
   15916     N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
   15917   unsigned immlo, immhi = 0, immbits;
   15918   int op, cmode, float_p;
   15919 
   15920   constraint (et.type == NT_invtype,
   15921 	      _("operand size must be specified for immediate VMOV"));
   15922 
   15923   /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
   15924   op = (inst.instruction & (1 << 5)) != 0;
   15925 
   15926   immlo = inst.operands[1].imm;
   15927   if (inst.operands[1].regisimm)
   15928     immhi = inst.operands[1].reg;
   15929 
   15930   constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
   15931 	      _("immediate has bits set outside the operand size"));
   15932 
   15933   float_p = inst.operands[1].immisfloat;
   15934 
   15935   if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
   15936 					et.size, et.type)) == FAIL)
   15937     {
   15938       /* Invert relevant bits only.  */
   15939       neon_invert_size (&immlo, &immhi, et.size);
   15940       /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
   15941 	 with one or the other; those cases are caught by
   15942 	 neon_cmode_for_move_imm.  */
   15943       op = !op;
   15944       if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
   15945 					    &op, et.size, et.type)) == FAIL)
   15946 	{
   15947 	  first_error (_("immediate out of range"));
   15948 	  return;
   15949 	}
   15950     }
   15951 
   15952   inst.instruction &= ~(1 << 5);
   15953   inst.instruction |= op << 5;
   15954 
   15955   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15956   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15957   inst.instruction |= neon_quad (rs) << 6;
   15958   inst.instruction |= cmode << 8;
   15959 
   15960   neon_write_immbits (immbits);
   15961 }
   15962 
   15963 static void
   15964 do_neon_mvn (void)
   15965 {
   15966   if (inst.operands[1].isreg)
   15967     {
   15968       enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   15969 
   15970       NEON_ENCODE (INTEGER, inst);
   15971       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15972       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15973       inst.instruction |= LOW4 (inst.operands[1].reg);
   15974       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   15975       inst.instruction |= neon_quad (rs) << 6;
   15976     }
   15977   else
   15978     {
   15979       NEON_ENCODE (IMMED, inst);
   15980       neon_move_immediate ();
   15981     }
   15982 
   15983   neon_dp_fixup (&inst);
   15984 }
   15985 
   15986 /* Encode instructions of form:
   15987 
   15988   |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
   15989   |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
   15990 
   15991 static void
   15992 neon_mixed_length (struct neon_type_el et, unsigned size)
   15993 {
   15994   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   15995   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   15996   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   15997   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   15998   inst.instruction |= LOW4 (inst.operands[2].reg);
   15999   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16000   inst.instruction |= (et.type == NT_unsigned) << 24;
   16001   inst.instruction |= neon_logbits (size) << 20;
   16002 
   16003   neon_dp_fixup (&inst);
   16004 }
   16005 
   16006 static void
   16007 do_neon_dyadic_long (void)
   16008 {
   16009   /* FIXME: Type checking for lengthening op.  */
   16010   struct neon_type_el et = neon_check_type (3, NS_QDD,
   16011     N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
   16012   neon_mixed_length (et, et.size);
   16013 }
   16014 
   16015 static void
   16016 do_neon_abal (void)
   16017 {
   16018   struct neon_type_el et = neon_check_type (3, NS_QDD,
   16019     N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
   16020   neon_mixed_length (et, et.size);
   16021 }
   16022 
   16023 static void
   16024 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
   16025 {
   16026   if (inst.operands[2].isscalar)
   16027     {
   16028       struct neon_type_el et = neon_check_type (3, NS_QDS,
   16029 	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
   16030       NEON_ENCODE (SCALAR, inst);
   16031       neon_mul_mac (et, et.type == NT_unsigned);
   16032     }
   16033   else
   16034     {
   16035       struct neon_type_el et = neon_check_type (3, NS_QDD,
   16036 	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
   16037       NEON_ENCODE (INTEGER, inst);
   16038       neon_mixed_length (et, et.size);
   16039     }
   16040 }
   16041 
   16042 static void
   16043 do_neon_mac_maybe_scalar_long (void)
   16044 {
   16045   neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
   16046 }
   16047 
   16048 static void
   16049 do_neon_dyadic_wide (void)
   16050 {
   16051   struct neon_type_el et = neon_check_type (3, NS_QQD,
   16052     N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
   16053   neon_mixed_length (et, et.size);
   16054 }
   16055 
   16056 static void
   16057 do_neon_dyadic_narrow (void)
   16058 {
   16059   struct neon_type_el et = neon_check_type (3, NS_QDD,
   16060     N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
   16061   /* Operand sign is unimportant, and the U bit is part of the opcode,
   16062      so force the operand type to integer.  */
   16063   et.type = NT_integer;
   16064   neon_mixed_length (et, et.size / 2);
   16065 }
   16066 
   16067 static void
   16068 do_neon_mul_sat_scalar_long (void)
   16069 {
   16070   neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
   16071 }
   16072 
   16073 static void
   16074 do_neon_vmull (void)
   16075 {
   16076   if (inst.operands[2].isscalar)
   16077     do_neon_mac_maybe_scalar_long ();
   16078   else
   16079     {
   16080       struct neon_type_el et = neon_check_type (3, NS_QDD,
   16081 	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
   16082 
   16083       if (et.type == NT_poly)
   16084 	NEON_ENCODE (POLY, inst);
   16085       else
   16086 	NEON_ENCODE (INTEGER, inst);
   16087 
   16088       /* For polynomial encoding the U bit must be zero, and the size must
   16089 	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
   16090 	 obviously, as 0b10).  */
   16091       if (et.size == 64)
   16092 	{
   16093 	  /* Check we're on the correct architecture.  */
   16094 	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
   16095 	    inst.error =
   16096 	      _("Instruction form not available on this architecture.");
   16097 
   16098 	  et.size = 32;
   16099 	}
   16100 
   16101       neon_mixed_length (et, et.size);
   16102     }
   16103 }
   16104 
   16105 static void
   16106 do_neon_ext (void)
   16107 {
   16108   enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
   16109   struct neon_type_el et = neon_check_type (3, rs,
   16110     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
   16111   unsigned imm = (inst.operands[3].imm * et.size) / 8;
   16112 
   16113   constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
   16114 	      _("shift out of range"));
   16115   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16116   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16117   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   16118   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   16119   inst.instruction |= LOW4 (inst.operands[2].reg);
   16120   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16121   inst.instruction |= neon_quad (rs) << 6;
   16122   inst.instruction |= imm << 8;
   16123 
   16124   neon_dp_fixup (&inst);
   16125 }
   16126 
   16127 static void
   16128 do_neon_rev (void)
   16129 {
   16130   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16131   struct neon_type_el et = neon_check_type (2, rs,
   16132     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   16133   unsigned op = (inst.instruction >> 7) & 3;
   16134   /* N (width of reversed regions) is encoded as part of the bitmask. We
   16135      extract it here to check the elements to be reversed are smaller.
   16136      Otherwise we'd get a reserved instruction.  */
   16137   unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
   16138   gas_assert (elsize != 0);
   16139   constraint (et.size >= elsize,
   16140 	      _("elements must be smaller than reversal region"));
   16141   neon_two_same (neon_quad (rs), 1, et.size);
   16142 }
   16143 
   16144 static void
   16145 do_neon_dup (void)
   16146 {
   16147   if (inst.operands[1].isscalar)
   16148     {
   16149       enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
   16150       struct neon_type_el et = neon_check_type (2, rs,
   16151 	N_EQK, N_8 | N_16 | N_32 | N_KEY);
   16152       unsigned sizebits = et.size >> 3;
   16153       unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
   16154       int logsize = neon_logbits (et.size);
   16155       unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
   16156 
   16157       if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
   16158 	return;
   16159 
   16160       NEON_ENCODE (SCALAR, inst);
   16161       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16162       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16163       inst.instruction |= LOW4 (dm);
   16164       inst.instruction |= HI1 (dm) << 5;
   16165       inst.instruction |= neon_quad (rs) << 6;
   16166       inst.instruction |= x << 17;
   16167       inst.instruction |= sizebits << 16;
   16168 
   16169       neon_dp_fixup (&inst);
   16170     }
   16171   else
   16172     {
   16173       enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
   16174       struct neon_type_el et = neon_check_type (2, rs,
   16175 	N_8 | N_16 | N_32 | N_KEY, N_EQK);
   16176       /* Duplicate ARM register to lanes of vector.  */
   16177       NEON_ENCODE (ARMREG, inst);
   16178       switch (et.size)
   16179 	{
   16180 	case 8:  inst.instruction |= 0x400000; break;
   16181 	case 16: inst.instruction |= 0x000020; break;
   16182 	case 32: inst.instruction |= 0x000000; break;
   16183 	default: break;
   16184 	}
   16185       inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
   16186       inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
   16187       inst.instruction |= HI1 (inst.operands[0].reg) << 7;
   16188       inst.instruction |= neon_quad (rs) << 21;
   16189       /* The encoding for this instruction is identical for the ARM and Thumb
   16190 	 variants, except for the condition field.  */
   16191       do_vfp_cond_or_thumb ();
   16192     }
   16193 }
   16194 
   16195 /* VMOV has particularly many variations. It can be one of:
   16196      0. VMOV<c><q> <Qd>, <Qm>
   16197      1. VMOV<c><q> <Dd>, <Dm>
   16198    (Register operations, which are VORR with Rm = Rn.)
   16199      2. VMOV<c><q>.<dt> <Qd>, #<imm>
   16200      3. VMOV<c><q>.<dt> <Dd>, #<imm>
   16201    (Immediate loads.)
   16202      4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
   16203    (ARM register to scalar.)
   16204      5. VMOV<c><q> <Dm>, <Rd>, <Rn>
   16205    (Two ARM registers to vector.)
   16206      6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
   16207    (Scalar to ARM register.)
   16208      7. VMOV<c><q> <Rd>, <Rn>, <Dm>
   16209    (Vector to two ARM registers.)
   16210      8. VMOV.F32 <Sd>, <Sm>
   16211      9. VMOV.F64 <Dd>, <Dm>
   16212    (VFP register moves.)
   16213     10. VMOV.F32 <Sd>, #imm
   16214     11. VMOV.F64 <Dd>, #imm
   16215    (VFP float immediate load.)
   16216     12. VMOV <Rd>, <Sm>
   16217    (VFP single to ARM reg.)
   16218     13. VMOV <Sd>, <Rm>
   16219    (ARM reg to VFP single.)
   16220     14. VMOV <Rd>, <Re>, <Sn>, <Sm>
   16221    (Two ARM regs to two VFP singles.)
   16222     15. VMOV <Sd>, <Se>, <Rn>, <Rm>
   16223    (Two VFP singles to two ARM regs.)
   16224 
   16225    These cases can be disambiguated using neon_select_shape, except cases 1/9
   16226    and 3/11 which depend on the operand type too.
   16227 
   16228    All the encoded bits are hardcoded by this function.
   16229 
   16230    Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
   16231    Cases 5, 7 may be used with VFPv2 and above.
   16232 
   16233    FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
   16234    can specify a type where it doesn't make sense to, and is ignored).  */
   16235 
   16236 static void
   16237 do_neon_mov (void)
   16238 {
   16239   enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
   16240 					  NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
   16241 					  NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
   16242 					  NS_HR, NS_RH, NS_HI, NS_NULL);
   16243   struct neon_type_el et;
   16244   const char *ldconst = 0;
   16245 
   16246   switch (rs)
   16247     {
   16248     case NS_DD:  /* case 1/9.  */
   16249       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
   16250       /* It is not an error here if no type is given.  */
   16251       inst.error = NULL;
   16252       if (et.type == NT_float && et.size == 64)
   16253 	{
   16254 	  do_vfp_nsyn_opcode ("fcpyd");
   16255 	  break;
   16256 	}
   16257       /* fall through.  */
   16258 
   16259     case NS_QQ:  /* case 0/1.  */
   16260       {
   16261 	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   16262 	  return;
   16263 	/* The architecture manual I have doesn't explicitly state which
   16264 	   value the U bit should have for register->register moves, but
   16265 	   the equivalent VORR instruction has U = 0, so do that.  */
   16266 	inst.instruction = 0x0200110;
   16267 	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16268 	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16269 	inst.instruction |= LOW4 (inst.operands[1].reg);
   16270 	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
   16271 	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   16272 	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   16273 	inst.instruction |= neon_quad (rs) << 6;
   16274 
   16275 	neon_dp_fixup (&inst);
   16276       }
   16277       break;
   16278 
   16279     case NS_DI:  /* case 3/11.  */
   16280       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
   16281       inst.error = NULL;
   16282       if (et.type == NT_float && et.size == 64)
   16283 	{
   16284 	  /* case 11 (fconstd).  */
   16285 	  ldconst = "fconstd";
   16286 	  goto encode_fconstd;
   16287 	}
   16288       /* fall through.  */
   16289 
   16290     case NS_QI:  /* case 2/3.  */
   16291       if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
   16292 	return;
   16293       inst.instruction = 0x0800010;
   16294       neon_move_immediate ();
   16295       neon_dp_fixup (&inst);
   16296       break;
   16297 
   16298     case NS_SR:  /* case 4.  */
   16299       {
   16300 	unsigned bcdebits = 0;
   16301 	int logsize;
   16302 	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
   16303 	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
   16304 
   16305 	/* .<size> is optional here, defaulting to .32. */
   16306 	if (inst.vectype.elems == 0
   16307 	    && inst.operands[0].vectype.type == NT_invtype
   16308 	    && inst.operands[1].vectype.type == NT_invtype)
   16309 	  {
   16310 	    inst.vectype.el[0].type = NT_untyped;
   16311 	    inst.vectype.el[0].size = 32;
   16312 	    inst.vectype.elems = 1;
   16313 	  }
   16314 
   16315 	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
   16316 	logsize = neon_logbits (et.size);
   16317 
   16318 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
   16319 		    _(BAD_FPU));
   16320 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
   16321 		    && et.size != 32, _(BAD_FPU));
   16322 	constraint (et.type == NT_invtype, _("bad type for scalar"));
   16323 	constraint (x >= 64 / et.size, _("scalar index out of range"));
   16324 
   16325 	switch (et.size)
   16326 	  {
   16327 	  case 8:  bcdebits = 0x8; break;
   16328 	  case 16: bcdebits = 0x1; break;
   16329 	  case 32: bcdebits = 0x0; break;
   16330 	  default: ;
   16331 	  }
   16332 
   16333 	bcdebits |= x << logsize;
   16334 
   16335 	inst.instruction = 0xe000b10;
   16336 	do_vfp_cond_or_thumb ();
   16337 	inst.instruction |= LOW4 (dn) << 16;
   16338 	inst.instruction |= HI1 (dn) << 7;
   16339 	inst.instruction |= inst.operands[1].reg << 12;
   16340 	inst.instruction |= (bcdebits & 3) << 5;
   16341 	inst.instruction |= (bcdebits >> 2) << 21;
   16342       }
   16343       break;
   16344 
   16345     case NS_DRR:  /* case 5 (fmdrr).  */
   16346       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
   16347 		  _(BAD_FPU));
   16348 
   16349       inst.instruction = 0xc400b10;
   16350       do_vfp_cond_or_thumb ();
   16351       inst.instruction |= LOW4 (inst.operands[0].reg);
   16352       inst.instruction |= HI1 (inst.operands[0].reg) << 5;
   16353       inst.instruction |= inst.operands[1].reg << 12;
   16354       inst.instruction |= inst.operands[2].reg << 16;
   16355       break;
   16356 
   16357     case NS_RS:  /* case 6.  */
   16358       {
   16359 	unsigned logsize;
   16360 	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
   16361 	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
   16362 	unsigned abcdebits = 0;
   16363 
   16364 	/* .<dt> is optional here, defaulting to .32. */
   16365 	if (inst.vectype.elems == 0
   16366 	    && inst.operands[0].vectype.type == NT_invtype
   16367 	    && inst.operands[1].vectype.type == NT_invtype)
   16368 	  {
   16369 	    inst.vectype.el[0].type = NT_untyped;
   16370 	    inst.vectype.el[0].size = 32;
   16371 	    inst.vectype.elems = 1;
   16372 	  }
   16373 
   16374 	et = neon_check_type (2, NS_NULL,
   16375 			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
   16376 	logsize = neon_logbits (et.size);
   16377 
   16378 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
   16379 		    _(BAD_FPU));
   16380 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
   16381 		    && et.size != 32, _(BAD_FPU));
   16382 	constraint (et.type == NT_invtype, _("bad type for scalar"));
   16383 	constraint (x >= 64 / et.size, _("scalar index out of range"));
   16384 
   16385 	switch (et.size)
   16386 	  {
   16387 	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
   16388 	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
   16389 	  case 32: abcdebits = 0x00; break;
   16390 	  default: ;
   16391 	  }
   16392 
   16393 	abcdebits |= x << logsize;
   16394 	inst.instruction = 0xe100b10;
   16395 	do_vfp_cond_or_thumb ();
   16396 	inst.instruction |= LOW4 (dn) << 16;
   16397 	inst.instruction |= HI1 (dn) << 7;
   16398 	inst.instruction |= inst.operands[0].reg << 12;
   16399 	inst.instruction |= (abcdebits & 3) << 5;
   16400 	inst.instruction |= (abcdebits >> 2) << 21;
   16401       }
   16402       break;
   16403 
   16404     case NS_RRD:  /* case 7 (fmrrd).  */
   16405       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
   16406 		  _(BAD_FPU));
   16407 
   16408       inst.instruction = 0xc500b10;
   16409       do_vfp_cond_or_thumb ();
   16410       inst.instruction |= inst.operands[0].reg << 12;
   16411       inst.instruction |= inst.operands[1].reg << 16;
   16412       inst.instruction |= LOW4 (inst.operands[2].reg);
   16413       inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16414       break;
   16415 
   16416     case NS_FF:  /* case 8 (fcpys).  */
   16417       do_vfp_nsyn_opcode ("fcpys");
   16418       break;
   16419 
   16420     case NS_HI:
   16421     case NS_FI:  /* case 10 (fconsts).  */
   16422       ldconst = "fconsts";
   16423       encode_fconstd:
   16424       if (is_quarter_float (inst.operands[1].imm))
   16425 	{
   16426 	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
   16427 	  do_vfp_nsyn_opcode (ldconst);
   16428 
   16429 	  /* ARMv8.2 fp16 vmov.f16 instruction.  */
   16430 	  if (rs == NS_HI)
   16431 	    do_scalar_fp16_v82_encode ();
   16432 	}
   16433       else
   16434 	first_error (_("immediate out of range"));
   16435       break;
   16436 
   16437     case NS_RH:
   16438     case NS_RF:  /* case 12 (fmrs).  */
   16439       do_vfp_nsyn_opcode ("fmrs");
   16440       /* ARMv8.2 fp16 vmov.f16 instruction.  */
   16441       if (rs == NS_RH)
   16442 	do_scalar_fp16_v82_encode ();
   16443       break;
   16444 
   16445     case NS_HR:
   16446     case NS_FR:  /* case 13 (fmsr).  */
   16447       do_vfp_nsyn_opcode ("fmsr");
   16448       /* ARMv8.2 fp16 vmov.f16 instruction.  */
   16449       if (rs == NS_HR)
   16450 	do_scalar_fp16_v82_encode ();
   16451       break;
   16452 
   16453     /* The encoders for the fmrrs and fmsrr instructions expect three operands
   16454        (one of which is a list), but we have parsed four.  Do some fiddling to
   16455        make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
   16456        expect.  */
   16457     case NS_RRFF:  /* case 14 (fmrrs).  */
   16458       constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
   16459 		  _("VFP registers must be adjacent"));
   16460       inst.operands[2].imm = 2;
   16461       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
   16462       do_vfp_nsyn_opcode ("fmrrs");
   16463       break;
   16464 
   16465     case NS_FFRR:  /* case 15 (fmsrr).  */
   16466       constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
   16467 		  _("VFP registers must be adjacent"));
   16468       inst.operands[1] = inst.operands[2];
   16469       inst.operands[2] = inst.operands[3];
   16470       inst.operands[0].imm = 2;
   16471       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
   16472       do_vfp_nsyn_opcode ("fmsrr");
   16473       break;
   16474 
   16475     case NS_NULL:
   16476       /* neon_select_shape has determined that the instruction
   16477 	 shape is wrong and has already set the error message.  */
   16478       break;
   16479 
   16480     default:
   16481       abort ();
   16482     }
   16483 }
   16484 
   16485 static void
   16486 do_neon_rshift_round_imm (void)
   16487 {
   16488   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
   16489   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
   16490   int imm = inst.operands[2].imm;
   16491 
   16492   /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
   16493   if (imm == 0)
   16494     {
   16495       inst.operands[2].present = 0;
   16496       do_neon_mov ();
   16497       return;
   16498     }
   16499 
   16500   constraint (imm < 1 || (unsigned)imm > et.size,
   16501 	      _("immediate out of range for shift"));
   16502   neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
   16503 		  et.size - imm);
   16504 }
   16505 
   16506 static void
   16507 do_neon_movhf (void)
   16508 {
   16509   enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
   16510   constraint (rs != NS_HH, _("invalid suffix"));
   16511 
   16512   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   16513 	      _(BAD_FPU));
   16514 
   16515   do_vfp_sp_monadic ();
   16516 
   16517   inst.is_neon = 1;
   16518   inst.instruction |= 0xf0000000;
   16519 }
   16520 
   16521 static void
   16522 do_neon_movl (void)
   16523 {
   16524   struct neon_type_el et = neon_check_type (2, NS_QD,
   16525     N_EQK | N_DBL, N_SU_32 | N_KEY);
   16526   unsigned sizebits = et.size >> 3;
   16527   inst.instruction |= sizebits << 19;
   16528   neon_two_same (0, et.type == NT_unsigned, -1);
   16529 }
   16530 
   16531 static void
   16532 do_neon_trn (void)
   16533 {
   16534   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16535   struct neon_type_el et = neon_check_type (2, rs,
   16536     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   16537   NEON_ENCODE (INTEGER, inst);
   16538   neon_two_same (neon_quad (rs), 1, et.size);
   16539 }
   16540 
   16541 static void
   16542 do_neon_zip_uzp (void)
   16543 {
   16544   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16545   struct neon_type_el et = neon_check_type (2, rs,
   16546     N_EQK, N_8 | N_16 | N_32 | N_KEY);
   16547   if (rs == NS_DD && et.size == 32)
   16548     {
   16549       /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
   16550       inst.instruction = N_MNEM_vtrn;
   16551       do_neon_trn ();
   16552       return;
   16553     }
   16554   neon_two_same (neon_quad (rs), 1, et.size);
   16555 }
   16556 
   16557 static void
   16558 do_neon_sat_abs_neg (void)
   16559 {
   16560   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16561   struct neon_type_el et = neon_check_type (2, rs,
   16562     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
   16563   neon_two_same (neon_quad (rs), 1, et.size);
   16564 }
   16565 
   16566 static void
   16567 do_neon_pair_long (void)
   16568 {
   16569   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16570   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
   16571   /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
   16572   inst.instruction |= (et.type == NT_unsigned) << 7;
   16573   neon_two_same (neon_quad (rs), 1, et.size);
   16574 }
   16575 
   16576 static void
   16577 do_neon_recip_est (void)
   16578 {
   16579   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16580   struct neon_type_el et = neon_check_type (2, rs,
   16581     N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
   16582   inst.instruction |= (et.type == NT_float) << 8;
   16583   neon_two_same (neon_quad (rs), 1, et.size);
   16584 }
   16585 
   16586 static void
   16587 do_neon_cls (void)
   16588 {
   16589   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16590   struct neon_type_el et = neon_check_type (2, rs,
   16591     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
   16592   neon_two_same (neon_quad (rs), 1, et.size);
   16593 }
   16594 
   16595 static void
   16596 do_neon_clz (void)
   16597 {
   16598   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16599   struct neon_type_el et = neon_check_type (2, rs,
   16600     N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
   16601   neon_two_same (neon_quad (rs), 1, et.size);
   16602 }
   16603 
   16604 static void
   16605 do_neon_cnt (void)
   16606 {
   16607   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16608   struct neon_type_el et = neon_check_type (2, rs,
   16609     N_EQK | N_INT, N_8 | N_KEY);
   16610   neon_two_same (neon_quad (rs), 1, et.size);
   16611 }
   16612 
   16613 static void
   16614 do_neon_swp (void)
   16615 {
   16616   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
   16617   neon_two_same (neon_quad (rs), 1, -1);
   16618 }
   16619 
   16620 static void
   16621 do_neon_tbl_tbx (void)
   16622 {
   16623   unsigned listlenbits;
   16624   neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
   16625 
   16626   if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
   16627     {
   16628       first_error (_("bad list length for table lookup"));
   16629       return;
   16630     }
   16631 
   16632   listlenbits = inst.operands[1].imm - 1;
   16633   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   16634   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   16635   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
   16636   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
   16637   inst.instruction |= LOW4 (inst.operands[2].reg);
   16638   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
   16639   inst.instruction |= listlenbits << 8;
   16640 
   16641   neon_dp_fixup (&inst);
   16642 }
   16643 
   16644 static void
   16645 do_neon_ldm_stm (void)
   16646 {
   16647   /* P, U and L bits are part of bitmask.  */
   16648   int is_dbmode = (inst.instruction & (1 << 24)) != 0;
   16649   unsigned offsetbits = inst.operands[1].imm * 2;
   16650 
   16651   if (inst.operands[1].issingle)
   16652     {
   16653       do_vfp_nsyn_ldm_stm (is_dbmode);
   16654       return;
   16655     }
   16656 
   16657   constraint (is_dbmode && !inst.operands[0].writeback,
   16658 	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
   16659 
   16660   constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
   16661 	      _("register list must contain at least 1 and at most 16 "
   16662 		"registers"));
   16663 
   16664   inst.instruction |= inst.operands[0].reg << 16;
   16665   inst.instruction |= inst.operands[0].writeback << 21;
   16666   inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
   16667   inst.instruction |= HI1 (inst.operands[1].reg) << 22;
   16668 
   16669   inst.instruction |= offsetbits;
   16670 
   16671   do_vfp_cond_or_thumb ();
   16672 }
   16673 
   16674 static void
   16675 do_neon_ldr_str (void)
   16676 {
   16677   int is_ldr = (inst.instruction & (1 << 20)) != 0;
   16678 
   16679   /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
   16680      And is UNPREDICTABLE in thumb mode.  */
   16681   if (!is_ldr
   16682       && inst.operands[1].reg == REG_PC
   16683       && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
   16684     {
   16685       if (thumb_mode)
   16686 	inst.error = _("Use of PC here is UNPREDICTABLE");
   16687       else if (warn_on_deprecated)
   16688 	as_tsktsk (_("Use of PC here is deprecated"));
   16689     }
   16690 
   16691   if (inst.operands[0].issingle)
   16692     {
   16693       if (is_ldr)
   16694 	do_vfp_nsyn_opcode ("flds");
   16695       else
   16696 	do_vfp_nsyn_opcode ("fsts");
   16697 
   16698       /* ARMv8.2 vldr.16/vstr.16 instruction.  */
   16699       if (inst.vectype.el[0].size == 16)
   16700 	do_scalar_fp16_v82_encode ();
   16701     }
   16702   else
   16703     {
   16704       if (is_ldr)
   16705 	do_vfp_nsyn_opcode ("fldd");
   16706       else
   16707 	do_vfp_nsyn_opcode ("fstd");
   16708     }
   16709 }
   16710 
   16711 /* "interleave" version also handles non-interleaving register VLD1/VST1
   16712    instructions.  */
   16713 
   16714 static void
   16715 do_neon_ld_st_interleave (void)
   16716 {
   16717   struct neon_type_el et = neon_check_type (1, NS_NULL,
   16718 					    N_8 | N_16 | N_32 | N_64);
   16719   unsigned alignbits = 0;
   16720   unsigned idx;
   16721   /* The bits in this table go:
   16722      0: register stride of one (0) or two (1)
   16723      1,2: register list length, minus one (1, 2, 3, 4).
   16724      3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
   16725      We use -1 for invalid entries.  */
   16726   const int typetable[] =
   16727     {
   16728       0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
   16729        -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
   16730        -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
   16731        -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
   16732     };
   16733   int typebits;
   16734 
   16735   if (et.type == NT_invtype)
   16736     return;
   16737 
   16738   if (inst.operands[1].immisalign)
   16739     switch (inst.operands[1].imm >> 8)
   16740       {
   16741       case 64: alignbits = 1; break;
   16742       case 128:
   16743 	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
   16744 	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
   16745 	  goto bad_alignment;
   16746 	alignbits = 2;
   16747 	break;
   16748       case 256:
   16749 	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
   16750 	  goto bad_alignment;
   16751 	alignbits = 3;
   16752 	break;
   16753       default:
   16754       bad_alignment:
   16755 	first_error (_("bad alignment"));
   16756 	return;
   16757       }
   16758 
   16759   inst.instruction |= alignbits << 4;
   16760   inst.instruction |= neon_logbits (et.size) << 6;
   16761 
   16762   /* Bits [4:6] of the immediate in a list specifier encode register stride
   16763      (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
   16764      VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
   16765      up the right value for "type" in a table based on this value and the given
   16766      list style, then stick it back.  */
   16767   idx = ((inst.operands[0].imm >> 4) & 7)
   16768 	| (((inst.instruction >> 8) & 3) << 3);
   16769 
   16770   typebits = typetable[idx];
   16771 
   16772   constraint (typebits == -1, _("bad list type for instruction"));
   16773   constraint (((inst.instruction >> 8) & 3) && et.size == 64,
   16774 	      _("bad element type for instruction"));
   16775 
   16776   inst.instruction &= ~0xf00;
   16777   inst.instruction |= typebits << 8;
   16778 }
   16779 
   16780 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
   16781    *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
   16782    otherwise. The variable arguments are a list of pairs of legal (size, align)
   16783    values, terminated with -1.  */
   16784 
   16785 static int
   16786 neon_alignment_bit (int size, int align, int *do_alignment, ...)
   16787 {
   16788   va_list ap;
   16789   int result = FAIL, thissize, thisalign;
   16790 
   16791   if (!inst.operands[1].immisalign)
   16792     {
   16793       *do_alignment = 0;
   16794       return SUCCESS;
   16795     }
   16796 
   16797   va_start (ap, do_alignment);
   16798 
   16799   do
   16800     {
   16801       thissize = va_arg (ap, int);
   16802       if (thissize == -1)
   16803 	break;
   16804       thisalign = va_arg (ap, int);
   16805 
   16806       if (size == thissize && align == thisalign)
   16807 	result = SUCCESS;
   16808     }
   16809   while (result != SUCCESS);
   16810 
   16811   va_end (ap);
   16812 
   16813   if (result == SUCCESS)
   16814     *do_alignment = 1;
   16815   else
   16816     first_error (_("unsupported alignment for instruction"));
   16817 
   16818   return result;
   16819 }
   16820 
   16821 static void
   16822 do_neon_ld_st_lane (void)
   16823 {
   16824   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
   16825   int align_good, do_alignment = 0;
   16826   int logsize = neon_logbits (et.size);
   16827   int align = inst.operands[1].imm >> 8;
   16828   int n = (inst.instruction >> 8) & 3;
   16829   int max_el = 64 / et.size;
   16830 
   16831   if (et.type == NT_invtype)
   16832     return;
   16833 
   16834   constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
   16835 	      _("bad list length"));
   16836   constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
   16837 	      _("scalar index out of range"));
   16838   constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
   16839 	      && et.size == 8,
   16840 	      _("stride of 2 unavailable when element size is 8"));
   16841 
   16842   switch (n)
   16843     {
   16844     case 0:  /* VLD1 / VST1.  */
   16845       align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
   16846 				       32, 32, -1);
   16847       if (align_good == FAIL)
   16848 	return;
   16849       if (do_alignment)
   16850 	{
   16851 	  unsigned alignbits = 0;
   16852 	  switch (et.size)
   16853 	    {
   16854 	    case 16: alignbits = 0x1; break;
   16855 	    case 32: alignbits = 0x3; break;
   16856 	    default: ;
   16857 	    }
   16858 	  inst.instruction |= alignbits << 4;
   16859 	}
   16860       break;
   16861 
   16862     case 1:  /* VLD2 / VST2.  */
   16863       align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
   16864 		      16, 32, 32, 64, -1);
   16865       if (align_good == FAIL)
   16866 	return;
   16867       if (do_alignment)
   16868 	inst.instruction |= 1 << 4;
   16869       break;
   16870 
   16871     case 2:  /* VLD3 / VST3.  */
   16872       constraint (inst.operands[1].immisalign,
   16873 		  _("can't use alignment with this instruction"));
   16874       break;
   16875 
   16876     case 3:  /* VLD4 / VST4.  */
   16877       align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
   16878 				       16, 64, 32, 64, 32, 128, -1);
   16879       if (align_good == FAIL)
   16880 	return;
   16881       if (do_alignment)
   16882 	{
   16883 	  unsigned alignbits = 0;
   16884 	  switch (et.size)
   16885 	    {
   16886 	    case 8:  alignbits = 0x1; break;
   16887 	    case 16: alignbits = 0x1; break;
   16888 	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
   16889 	    default: ;
   16890 	    }
   16891 	  inst.instruction |= alignbits << 4;
   16892 	}
   16893       break;
   16894 
   16895     default: ;
   16896     }
   16897 
   16898   /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
   16899   if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16900     inst.instruction |= 1 << (4 + logsize);
   16901 
   16902   inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
   16903   inst.instruction |= logsize << 10;
   16904 }
   16905 
   16906 /* Encode single n-element structure to all lanes VLD<n> instructions.  */
   16907 
   16908 static void
   16909 do_neon_ld_dup (void)
   16910 {
   16911   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
   16912   int align_good, do_alignment = 0;
   16913 
   16914   if (et.type == NT_invtype)
   16915     return;
   16916 
   16917   switch ((inst.instruction >> 8) & 3)
   16918     {
   16919     case 0:  /* VLD1.  */
   16920       gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
   16921       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
   16922 				       &do_alignment, 16, 16, 32, 32, -1);
   16923       if (align_good == FAIL)
   16924 	return;
   16925       switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
   16926 	{
   16927 	case 1: break;
   16928 	case 2: inst.instruction |= 1 << 5; break;
   16929 	default: first_error (_("bad list length")); return;
   16930 	}
   16931       inst.instruction |= neon_logbits (et.size) << 6;
   16932       break;
   16933 
   16934     case 1:  /* VLD2.  */
   16935       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
   16936 				       &do_alignment, 8, 16, 16, 32, 32, 64,
   16937 				       -1);
   16938       if (align_good == FAIL)
   16939 	return;
   16940       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
   16941 		  _("bad list length"));
   16942       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16943 	inst.instruction |= 1 << 5;
   16944       inst.instruction |= neon_logbits (et.size) << 6;
   16945       break;
   16946 
   16947     case 2:  /* VLD3.  */
   16948       constraint (inst.operands[1].immisalign,
   16949 		  _("can't use alignment with this instruction"));
   16950       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
   16951 		  _("bad list length"));
   16952       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16953 	inst.instruction |= 1 << 5;
   16954       inst.instruction |= neon_logbits (et.size) << 6;
   16955       break;
   16956 
   16957     case 3:  /* VLD4.  */
   16958       {
   16959 	int align = inst.operands[1].imm >> 8;
   16960 	align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
   16961 					 16, 64, 32, 64, 32, 128, -1);
   16962 	if (align_good == FAIL)
   16963 	  return;
   16964 	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
   16965 		    _("bad list length"));
   16966 	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
   16967 	  inst.instruction |= 1 << 5;
   16968 	if (et.size == 32 && align == 128)
   16969 	  inst.instruction |= 0x3 << 6;
   16970 	else
   16971 	  inst.instruction |= neon_logbits (et.size) << 6;
   16972       }
   16973       break;
   16974 
   16975     default: ;
   16976     }
   16977 
   16978   inst.instruction |= do_alignment << 4;
   16979 }
   16980 
   16981 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
   16982    apart from bits [11:4].  */
   16983 
   16984 static void
   16985 do_neon_ldx_stx (void)
   16986 {
   16987   if (inst.operands[1].isreg)
   16988     constraint (inst.operands[1].reg == REG_PC, BAD_PC);
   16989 
   16990   switch (NEON_LANE (inst.operands[0].imm))
   16991     {
   16992     case NEON_INTERLEAVE_LANES:
   16993       NEON_ENCODE (INTERLV, inst);
   16994       do_neon_ld_st_interleave ();
   16995       break;
   16996 
   16997     case NEON_ALL_LANES:
   16998       NEON_ENCODE (DUP, inst);
   16999       if (inst.instruction == N_INV)
   17000 	{
   17001 	  first_error ("only loads support such operands");
   17002 	  break;
   17003 	}
   17004       do_neon_ld_dup ();
   17005       break;
   17006 
   17007     default:
   17008       NEON_ENCODE (LANE, inst);
   17009       do_neon_ld_st_lane ();
   17010     }
   17011 
   17012   /* L bit comes from bit mask.  */
   17013   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
   17014   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
   17015   inst.instruction |= inst.operands[1].reg << 16;
   17016 
   17017   if (inst.operands[1].postind)
   17018     {
   17019       int postreg = inst.operands[1].imm & 0xf;
   17020       constraint (!inst.operands[1].immisreg,
   17021 		  _("post-index must be a register"));
   17022       constraint (postreg == 0xd || postreg == 0xf,
   17023 		  _("bad register for post-index"));
   17024       inst.instruction |= postreg;
   17025     }
   17026   else
   17027     {
   17028       constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
   17029       constraint (inst.reloc.exp.X_op != O_constant
   17030 		  || inst.reloc.exp.X_add_number != 0,
   17031 		  BAD_ADDR_MODE);
   17032 
   17033       if (inst.operands[1].writeback)
   17034 	{
   17035 	  inst.instruction |= 0xd;
   17036 	}
   17037       else
   17038 	inst.instruction |= 0xf;
   17039     }
   17040 
   17041   if (thumb_mode)
   17042     inst.instruction |= 0xf9000000;
   17043   else
   17044     inst.instruction |= 0xf4000000;
   17045 }
   17046 
   17047 /* FP v8.  */
   17048 static void
   17049 do_vfp_nsyn_fpv8 (enum neon_shape rs)
   17050 {
   17051   /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
   17052      D register operands.  */
   17053   if (neon_shape_class[rs] == SC_DOUBLE)
   17054     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
   17055 		_(BAD_FPU));
   17056 
   17057   NEON_ENCODE (FPV8, inst);
   17058 
   17059   if (rs == NS_FFF || rs == NS_HHH)
   17060     {
   17061       do_vfp_sp_dyadic ();
   17062 
   17063       /* ARMv8.2 fp16 instruction.  */
   17064       if (rs == NS_HHH)
   17065 	do_scalar_fp16_v82_encode ();
   17066     }
   17067   else
   17068     do_vfp_dp_rd_rn_rm ();
   17069 
   17070   if (rs == NS_DDD)
   17071     inst.instruction |= 0x100;
   17072 
   17073   inst.instruction |= 0xf0000000;
   17074 }
   17075 
   17076 static void
   17077 do_vsel (void)
   17078 {
   17079   set_it_insn_type (OUTSIDE_IT_INSN);
   17080 
   17081   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
   17082     first_error (_("invalid instruction shape"));
   17083 }
   17084 
   17085 static void
   17086 do_vmaxnm (void)
   17087 {
   17088   set_it_insn_type (OUTSIDE_IT_INSN);
   17089 
   17090   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
   17091     return;
   17092 
   17093   if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
   17094     return;
   17095 
   17096   neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
   17097 }
   17098 
   17099 static void
   17100 do_vrint_1 (enum neon_cvt_mode mode)
   17101 {
   17102   enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
   17103   struct